repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
archetipo/account-invoicing | account_invoice_line_sort/models/account_invoice.py | 26 | 4940 | # -*- coding: utf-8 -*-
##############################################################################
# This file is part of account_invoice_line_sort, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_line_sort is free software: you can redistribute it
# and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# account_invoice_line_sort is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with account_invoice_line_sort.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from operator import attrgetter
AVAILABLE_SORT_OPTIONS = [
('sequence', 'Sequence'),
('name', 'Description'),
('price_unit', 'Unit Price'),
('price_subtotal', 'Amount'),
]
AVAILABLE_ORDER_OPTIONS = [
('asc', 'Ascending'),
('desc', 'Descending')
]
class account_invoice(models.Model):
_inherit = "account.invoice"
_sort_trigger_fields = ('line_order',
'line_order_direction')
line_order = fields.Selection(AVAILABLE_SORT_OPTIONS,
"Sort Lines By",
default='sequence')
line_order_direction = fields.Selection(AVAILABLE_ORDER_OPTIONS,
"Sort Direction",
default='asc')
@api.model
def get_partner_sort_options(self, partner_id):
res = {}
if partner_id:
p = self.env['res.partner'].browse(partner_id)
res['line_order'] = p.line_order
res['line_order_direction'] = p.line_order_direction
return res
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False,
company_id=False):
res = super(account_invoice,
self).onchange_partner_id(type,
partner_id,
date_invoice=date_invoice,
payment_term=payment_term,
partner_bank_id=partner_bank_id,
company_id=company_id)
if partner_id:
res['value'].update(self.get_partner_sort_options(partner_id))
return res
@api.one
def _sort_account_invoice_line(self):
if self.invoice_line:
sequence = 0
key = attrgetter(self.line_order)
reverse = self.line_order_direction == 'desc'
for line in self.invoice_line.sorted(key=key, reverse=reverse):
sequence += 10
line.sequence = sequence
@api.multi
def write(self, vals):
sort = False
fields = [key for key in vals if key in self._sort_trigger_fields]
if fields:
if [key for key in fields if vals[key] != self[key]]:
sort = True
res = super(account_invoice, self).write(vals)
if sort or 'invoice_line' in vals:
self._sort_account_invoice_line()
return res
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
if not [key for key in vals if key in self._sort_trigger_fields]:
partner_id = vals.get('partner_id', False)
vals.update(self.get_partner_sort_options(partner_id))
invoice = super(account_invoice, self).create(vals)
invoice._sort_account_invoice_line()
return invoice
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
_sort_trigger_fields = ('name', 'quantity', 'price_unit', 'discount')
@api.multi
def write(self, vals):
sort = False
fields = [key for key in vals if key in self._sort_trigger_fields]
if fields:
if [key for key in fields if vals[key] != self[key]]:
sort = True
res = super(account_invoice_line, self).write(vals)
if sort:
self.invoice_id._sort_account_invoice_line()
return res
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
line = super(account_invoice_line, self).create(vals)
self.invoice_id._sort_account_invoice_line()
return line
| agpl-3.0 | -8,342,897,780,152,663,000 | 1,146,835,147,718,752,400 | 37 | 79 | 0.54919 | false |
vroyer/elasticassandra | dev-tools/smoke_test_rc.py | 56 | 11737 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
from os.path import basename, dirname, isdir, join
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from http.client import HTTPConnection
def find_official_plugins():
plugins_dir = join(dirname(dirname(__file__)), 'plugins')
plugins = []
for plugin in os.listdir(plugins_dir):
if isdir(join(plugins_dir, plugin)):
plugins.append(plugin)
return plugins
DEFAULT_PLUGINS = find_official_plugins()
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
| apache-2.0 | -905,725,078,269,815,600 | 8,334,096,630,575,511,000 | 40.038462 | 166 | 0.645651 | false |
drawks/ansible | lib/ansible/modules/network/f5/bigip_firewall_rule.py | 14 | 42629 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
type: str
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
type: str
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
type: str
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
type: str
description:
description:
- The rule description.
type: str
irule:
description:
- Specifies an iRule that is applied to the firewall rule.
- An iRule can be started when the firewall rule matches traffic.
type: str
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
type: str
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
vlan:
description:
- Specifies VLANs to which the rule applies.
- The VLAN source refers to the packet's source.
type: str
type: list
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
type: list
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
type: str
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
type: str
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
type: str
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new rule that is uses an existing rule list
bigip_firewall_rule:
name: foo
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the rule.
returned: changed
type: str
sample: FooRule
parent_policy:
description: The policy which contains the rule to be managed.
returned: changed
type: str
sample: FooPolicy
parent_rule_list:
description: The rule list which contains the rule to be managed.
returned: changed
type: str
sample: FooRuleList
action:
description: The action for the firewall rule.
returned: changed
type: str
sample: drop
status:
description: The activity state of the rule or rule list.
returned: changed
type: str
sample: scheduled
schedule:
description: The schedule for the firewall rule.
returned: changed
type: str
sample: Foo_schedule
description:
description: The rule description.
returned: changed
type: str
sample: MyRule
irule:
description: The iRule that is applied to the firewall rule.
returned: changed
type: str
sample: _sys_auth_radius
protocol:
description: The protocol to which the rule applies.
returned: changed
type: str
sample: any
source:
description: The packet sources to which the rule applies
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
vlan:
description: Source VLANs for the packets.
returned: changed
type: str
sample: vlan1
sample: hash/dictionary of values
destination:
description: The packet destinations to which the rule applies.
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
sample: hash/dictionary of values
logging:
description: Enable or Disable logging for the firewall rule.
returned: changed
type: bool
sample: yes
rule_list:
description: An existing rule list to use in the rule.
returned: changed
type: str
sample: rule-list-1
icmp_message:
description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses.
returned: changed
type: complex
contains:
type:
description: The type of ICMP message.
returned: changed
type: str
sample: 0
code:
description: The code returned in response to the specified ICMP message type.
returned: changed
type: str
sample: 1
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.rule_list is None and self.want.parent_rule_list is None:
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -6,448,212,171,795,570,000 | 1,833,938,567,683,008,800 | 32.020139 | 108 | 0.56051 | false |
ArchiveTeam/spuf-grab | pipeline.py | 1 | 11245 | # encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20170615.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'spuf'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'spuf.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--load-cookies", "cookies.txt",
#"--no-cookies",
"--lua-script", "spuf.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "steampowered.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "steam-users-forum-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("steam-users-forum-item: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
tries = 0
while tries < 10:
if os.path.isfile('login.php?do=login'):
os.remove('login.php?do=login')
os.system("wget --save-cookies cookies.txt --user-agent 'ArchiveTeam' --keep-session-cookies --post-data 'vb_login_username=archiveTeam&cookieuser=1&vb_login_password=&s=&securitytoken=guest&do=login&vb_login_md5password=9aa65d84012ee50e456c4e6916089636&vb_login_md5password_utf=9aa65d84012ee50e456c4e6916089636' --referer http://forums.steampowered.com/forums/ http://forums.steampowered.com/forums/login.php?do=login")
if not os.path.isfile('login.php?do=login'):
continue
with open('login.php?do=login') as f:
if 'alt="Forum Database Error"' in f.read():
continue
break
else:
raise Exception('Could not log in.')
wget_args.append('http://forums.steampowered.com/forums/showthread.php')
if item_type == 'threads':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-thread: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/showthread.php?t={i}'.format(i=i))
elif item_type == 'forums':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-forum: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}&daysprune=-1'.format(i=i))
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}'.format(i=i))
elif item_type == 'members':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-member: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/member.php?u={i}'.format(i=i))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title = "Steam Users' Forum",
project_html = """
<img class="project-logo" alt="Steam Logo" src="http://archiveteam.org/images/thumb/4/48/Steam_Icon_2014.png/100px-Steam_Icon_2014.png" />
<h2>Steam Users' Forum <span class="links"><a href="http://forums.steampowered.com/forums">Website</a> · <a href="http://tracker.archiveteam.org/spuf/">Leaderboard</a></span></h2>
<p>Getting killed June 5th.</p>
""",
utc_deadline = datetime.datetime(2017, 6, 4, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="spuf"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
"warc_file_base": ItemValue("warc_file_base"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| unlicense | -1,258,583,010,367,898,400 | 5,322,126,359,212,980,000 | 35.868852 | 432 | 0.574033 | false |
m4ns0ur/grumpy | third_party/stdlib/getopt.py | 31 | 7319 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Astrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - an option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
| apache-2.0 | -1,231,871,196,633,095,400 | -3,269,498,299,063,881,000 | 33.852381 | 77 | 0.619757 | false |
geggo/pyface | pyface/tree/api.py | 1 | 1198 | #------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
from __future__ import absolute_import
from .node_event import NodeEvent
from .node_monitor import NodeMonitor
from .node_manager import NodeManager
from .node_tree import NodeTree
from .node_tree_model import NodeTreeModel
from .node_type import NodeType
from .trait_dict_node_type import TraitDictNodeType
from .trait_list_node_type import TraitListNodeType
from .tree_model import TreeModel
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == 'wx':
# Tree has not yet been ported to qt
from .tree import Tree
del ETSConfig
| bsd-3-clause | 1,632,696,340,230,929,700 | 1,082,092,186,087,932,500 | 36.4375 | 79 | 0.682805 | false |
Encrylize/flask-blogger | app/utils/helpers.py | 1 | 1218 | from urllib.parse import urljoin, urlparse
from flask import request
def get_or_create(model, **kwargs):
"""
Gets or creates an instance of model.
Args:
model: SQLAlchemy model
**kwargs: Model properties
Returns:
An instance of model and True if it was created, False if it was not.
"""
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True
def is_safe_url(target):
"""
Checks if a URL is safe.
Args:
target: The URL to check
Returns:
True if the URL is safe, False if it is not.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http',
'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Gets a safe redirect target.
Returns:
The first safe redirect target.
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
elif is_safe_url(target):
return target
| mit | 4,872,659,461,844,845,000 | 4,977,384,661,115,162,000 | 20 | 77 | 0.591954 | false |
coupdair/pyoptools | pyoptools/misc/GS/gs.py | 9 | 10699 | from mako.template import Template
from pyoptools.misc.resources import has_double_support, has_amd_double_support
### ojo, toca solucionar esta importacion en caso de que no exista pypencl
try:
from pyfft.cl import Plan
import pyopencl as cl
import pyopencl.array as cl_array
except:
pass
from numpy.fft import fft2,ifft2,fftshift,ifftshift
from numpy import angle,exp,pi, complex128, zeros, sqrt,int32, zeros_like,ones
from numpy.random import random
from pylab import imshow,colorbar
KERNEL= \
"""
//There are some operations that are not defined in the RV770 GPUs
// for doubles, so a cast to float is needed
% if double_support:
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define CAST (double)
% elif amd_double_support:
#pragma OPENCL EXTENSION cl_amd_fp64: enable
#define CAST (float)
% endif
__kernel void norm(__global double2 *data)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i= oy*nWidth+ox;
double norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=data[i].x/norm;
data[i].y=data[i].y/norm;
}gs
else
{
data[i].x=1;
data[i].y=0;
}
}
__kernel void norm1(__global double2 *data, __global double2 *idata, __global double *error, int cut)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i;
double norm,intdata;
i= oy*nWidth+ox;
error[i]=0;
///OJO, aca las matrices vienen con fftshift
if( ((ox<cut) && (oy<cut) ) ||
((ox>(nWidth-cut)) && (oy<cut) ) ||
((ox<cut) && (oy>(nHeight-cut))) ||
((ox>(nWidth-cut)) && (oy>(nHeight-cut))) )
{
intdata=data[i].x*data[i].x+data[i].y*data[i].y;
intdata=sqrt((float)intdata);
error[i]=(intdata-idata[i].x)*(intdata-idata[i].x);
norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=(data[i].x/norm)*idata[i].x;
data[i].y=(data[i].y/norm)*idata[i].x;
}
else
{
data[i].x=idata[i].x;
data[i].y=0;
}
}
}
__kernel void norm2(__global double2 *data, __global double2 *idata)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i;
double norm;
i= oy*nWidth+ox;
norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=(data[i].x/norm)*idata[i].x;
data[i].y=(data[i].y/norm)*idata[i].x;
}
else
{
data[i].x=idata[i].x;
data[i].y=0;
}
}
"""
#TODO: The GS algorithm should also use an maximum error condition to stop
# Not only the iteration condition
def gs(idata,itera=10, ia=None):
"""Gerchberg-Saxton algorithm to calculate DOEs
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
ia Illumination amplitude at the hologram plane if not given
it is assumed to be a constant amplitude with a value
of 1. If given it should be an array with the same shape
of idata
========== ======================================================
"""
if ia==None:
inpa=ones(idata.shape)
else:
inpa=ia
assert idata.shape==inpa.shape, "ia and idata must have the same dimentions"
fdata=fftshift(fft2(ifftshift(idata)))
e=1000
ea=1000
for i in range (itera):
fdata=exp(1.j*angle(fdata))*inpa
rdata=ifftshift(ifft2(fftshift(fdata)))
e= (abs(rdata)-idata).std()
if e>ea:
break
ea=e
rdata=exp(1.j*angle(rdata))*(idata)
fdata=fftshift(fft2(ifftshift(rdata)))
fdata=exp(1.j*angle(fdata))
return fdata*inpa
def gs_mod(idata,itera=10,osize=256):
"""Modiffied Gerchberg-Saxton algorithm to calculate DOEs
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
This algoritm leaves a window around the image plane to allow the
noise to move there. It only optimises the center of the image.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
osize Size of the center of the image to be optimized
It should be smaller than the image itself.
========== ======================================================
"""
M,N=idata.shape
cut=osize//2
zone=zeros_like(idata)
zone[M/2-cut:M/2+cut,N/2-cut:N/2+cut]=1
zone=zone.astype(bool)
mask=exp(2.j*pi*random(idata.shape))
mask[zone]=0
#~ imshow(abs(mask)),colorbar()
fdata=fftshift(fft2(ifftshift(idata+mask))) #Nota, colocar esta mascara es muy importante, por que si no no converge tan rapido
e=1000
ea=1000
for i in range (itera):
fdata=exp(1.j*angle(fdata))
rdata=ifftshift(ifft2(fftshift(fdata)))
#~ e= (abs(rdata[zone])-idata[zone]).std()
#~ if e>ea:
#~
#~ break
ea=e
rdata[zone]=exp(1.j*angle(rdata[zone]))*(idata[zone])
fdata=fftshift(fft2(ifftshift(rdata)))
fdata=exp(1.j*angle(fdata))
return fdata
def gs_gpu(idata,itera=100):
"""Gerchberg-Saxton algorithm to calculate DOEs using the GPU
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
========== ======================================================
"""
pl=cl.get_platforms()[0]
devices=pl.get_devices(device_type=cl.device_type.GPU)
ctx = cl.Context(devices=[devices[0]])
queue = cl.CommandQueue(ctx)
plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
src = str(Template(KERNEL).render(
double_support=all(
has_double_support(dev) for dev in devices),
amd_double_support=all(
has_amd_double_support(dev) for dev in devices)
))
prg = cl.Program(ctx,src).build()
idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
plan.execute(idata_gpu.data,fdata_gpu.data)
e=1000
ea=1000
for i in range (itera):
prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
tr=rdata_gpu.get()
rdata=ifftshift(tr)
#TODO: This calculation should be done in the GPU
e= (abs(rdata)-idata).std()
if e>ea:
break
ea=e
prg.norm2(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data)
plan.execute(rdata_gpu.data,fdata_gpu.data)
fdata=fdata_gpu.get()
#~ prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
fdata=ifftshift(fdata)
fdata=exp(1.j*angle(fdata))
#~ fdata=fdata_gpu.get()
return fdata
def gs_mod_gpu(idata,itera=10,osize=256):
cut=osize//2
pl=cl.get_platforms()[0]
devices=pl.get_devices(device_type=cl.device_type.GPU)
ctx = cl.Context(devices=[devices[0]])
queue = cl.CommandQueue(ctx)
plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
src = str(Template(KERNEL).render(
double_support=all(
has_double_support(dev) for dev in devices),
amd_double_support=all(
has_amd_double_support(dev) for dev in devices)
))
prg = cl.Program(ctx,src).build()
idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
plan.execute(idata_gpu.data,fdata_gpu.data)
mask=exp(2.j*pi*random(idata.shape))
mask[512-cut:512+cut,512-cut:512+cut]=0
idata_gpu=cl_array.to_device(queue, ifftshift(idata+mask).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
error_gpu=cl_array.to_device(ctx, queue, zeros(idata_gpu.shape).astype("double"))
plan.execute(idata_gpu.data,fdata_gpu.data)
e=1000
ea=1000
for i in range (itera):
prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
#~ prg.norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
norm1=prg.norm1
norm1.set_scalar_arg_dtypes([None, None, None, int32])
norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
e= sqrt(cl_array.sum(error_gpu).get())/(2*cut)
#~ if e>ea:
#~
#~ break
#~ ea=e
plan.execute(rdata_gpu.data,fdata_gpu.data)
fdata=fdata_gpu.get()
fdata=ifftshift(fdata)
fdata=exp(1.j*angle(fdata))
return fdata
| bsd-3-clause | -3,010,212,739,262,650,400 | -4,477,593,873,000,879,000 | 30.19242 | 132 | 0.574446 | false |
thundernet8/WRGameVideos-API | venv/lib/python2.7/site-packages/sqlalchemy/util/compat.py | 11 | 6843 | # util/compat.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py36 = sys.version_info >= (3, 6)
py33 = sys.version_info >= (3, 3)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info < (3, 0)
py265 = sys.version_info >= (2, 6, 5)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
cpython = not pypy and not jython # TODO: something better for this ?
import collections
next = next
if py3k:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
# work around http://bugs.python.org/issue2646
if py265:
safe_kwarg = lambda arg: arg
else:
safe_kwarg = str
ArgSpec = collections.namedtuple("ArgSpec",
["args", "varargs", "keywords", "defaults"])
if py3k:
import builtins
from inspect import getfullargspec as inspect_getfullargspec
from urllib.parse import (quote_plus, unquote_plus,
parse_qsl, quote, unquote)
import configparser
from io import StringIO
from io import BytesIO as byte_buffer
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
string_types = str,
binary_type = bytes
text_type = str
int_types = int,
iterbytes = iter
def u(s):
return s
def ue(s):
return s
def b(s):
return s.encode("latin-1")
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
print_ = getattr(builtins, "print")
import_ = getattr(builtins, '__import__')
import itertools
itertools_filterfalse = itertools.filterfalse
itertools_filter = filter
itertools_imap = map
from itertools import zip_longest
import base64
def b64encode(x):
return base64.b64encode(x).decode('ascii')
def b64decode(x):
return base64.b64decode(x.encode('ascii'))
else:
from inspect import getargspec as inspect_getfullargspec
inspect_getargspec = inspect_getfullargspec
from urllib import quote_plus, unquote_plus, quote, unquote
from urlparse import parse_qsl
import ConfigParser as configparser
from StringIO import StringIO
from cStringIO import StringIO as byte_buffer
string_types = basestring,
binary_type = str
text_type = unicode
int_types = int, long
def iterbytes(buf):
return (ord(byte) for byte in buf)
def u(s):
# this differs from what six does, which doesn't support non-ASCII
# strings - we only use u() with
# literal source strings, and all our source files with non-ascii
# in them (all are tests) are utf-8 encoded.
return unicode(s, "utf-8")
def ue(s):
return unicode(s, "unicode_escape")
def b(s):
return s
def import_(*args):
if len(args) == 4:
args = args[0:3] + ([str(arg) for arg in args[3]],)
return __import__(*args)
callable = callable
cmp = cmp
reduce = reduce
import base64
b64encode = base64.b64encode
b64decode = base64.b64decode
def print_(*args, **kwargs):
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
for arg in enumerate(args):
if not isinstance(arg, basestring):
arg = str(arg)
fp.write(arg)
import itertools
itertools_filterfalse = itertools.ifilterfalse
itertools_filter = itertools.ifilter
itertools_imap = itertools.imap
from itertools import izip_longest as zip_longest
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
from collections import namedtuple
from operator import attrgetter as dottedgetter
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def raise_from_cause(exception, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
else:
exec("def reraise(tp, value, tb=None, cause=None):\n"
" raise tp, value, tb\n")
def raise_from_cause(exception, exc_info=None):
# not as nice as that of Py3K, but at least preserves
# the code line where the issue occurred
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb)
if py3k:
exec_ = getattr(builtins, 'exec')
else:
def exec_(func_text, globals_, lcl=None):
if lcl is None:
exec('exec func_text in globals_')
else:
exec('exec func_text in globals_, lcl')
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Drops the middle class upon creation.
Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
from contextlib import contextmanager
try:
from contextlib import nested
except ImportError:
# removed in py3k, credit to mitsuhiko for
# workaround
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
reraise(exc[0], exc[1], exc[2])
| gpl-2.0 | 4,913,815,334,314,230,000 | 2,729,601,891,961,761,000 | 25.019011 | 77 | 0.595791 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_images_operations.py | 1 | 29335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ImagesOperations:
"""ImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Image')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Create or update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.Image
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ImageUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.ImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
image_name=image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def get(
self,
resource_group_name: str,
image_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Image":
"""Gets an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_12_01.models.Image
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'} # type: ignore
| mit | 5,389,861,444,013,197,000 | 8,896,404,560,613,712,000 | 47.407591 | 181 | 0.634907 | false |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/flask_script/cli.py | 66 | 2780 | # -*- coding: utf-8 -*-
import getpass
from ._compat import string_types, ascii_lowercase, input
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_pass(name, default=None):
"""
Grabs hidden (password) input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = getpass.getpass(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False, yes_choices=None, no_choices=None):
"""
Grabs user input from command line and converts to boolean
value.
:param name: prompt text
:param default: default value if no input provided.
:param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'
:param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')
no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')
while True:
rv = prompt(name, default and yes_choices[0] or no_choices[0])
if not rv:
return default
if rv.lower() in yes_choices:
return True
elif rv.lower() in no_choices:
return False
def prompt_choices(name, choices, default=None, resolve=ascii_lowercase,
no_choice=('none',)):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices. Choices may be
single strings or (key, value) tuples.
:param default: default value if no input provided.
:param no_choice: acceptable list of strings for "null choice"
"""
_choices = []
options = []
for choice in choices:
if isinstance(choice, string_types):
options.append(choice)
else:
options.append("%s [%s]" % (choice[1], choice[0]))
choice = choice[0]
_choices.append(choice)
while True:
rv = prompt(name + ' - (%s)' % ', '.join(options), default)
if not rv:
return default
rv = resolve(rv)
if rv in no_choice:
return None
if rv in _choices:
return rv
| mit | -5,422,064,722,289,634,000 | -4,027,207,678,847,830,000 | 27.659794 | 72 | 0.565468 | false |
wubr2000/googleads-python-lib | examples/dfp/v201411/activity_service/get_all_activities.py | 4 | 1934 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all activities.
To create activities, run create_activities.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201411')
# Create statement object to select only all activities.
statement = dfp.FilterStatement()
# Get activities by statement.
while True:
response = activity_service.getActivitiesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for activity in response['results']:
print ('Activity with ID \'%s\', name \'%s\', and type \'%s\' was '
'found.' % (activity['id'], activity['name'], activity['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | -3,918,406,561,486,001,700 | -9,062,674,030,784,991,000 | 32.344828 | 79 | 0.719752 | false |
nunezro2/cassandra_cs597 | pylib/cqlshlib/test/test_cqlsh_completion.py | 5 | 11789 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s\s+')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=self.cqlver, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _trycompletions_inner(self, inputstring, immediate='', choices=(), other_choices_ok=False):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
completed = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
self.assertEqual(completed[:len(inputstring)], inputstring)
completed = completed[len(inputstring):]
completed = completed.replace(BEL, '')
self.assertEqual(completed, immediate, 'cqlsh completed %r, but we expected %r'
% (completed, immediate))
if immediate:
return
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
lines = ()
else:
lines = choice_output.splitlines()
self.assertRegexpMatches(lines[-1], self.cqlsh.prompt.lstrip() + re.escape(inputstring))
choicesseen = set()
for line in lines[:-1]:
choicesseen.update(completion_separation_re.split(line.strip()))
choicesseen.discard('')
if other_choices_ok:
self.assertEqual(set(choices), choicesseen.intersection(choices))
else:
self.assertEqual(set(choices), choicesseen)
def trycompletions(self, inputstring, immediate='', choices=(), other_choices_ok=False):
try:
self._trycompletions_inner(inputstring, immediate, choices, other_choices_ok)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion_CQL2(CqlshCompletionCase):
cqlver = 2
module = cqlsh.cqlhandling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'HELP', 'INSERT', 'SELECT', 'SHOW', 'SOURCE',
'TRACING', 'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("insert into system.'NodeId", "Info'")
self.trycompletions("USE '", choices=('system', self.cqlsh.keyspace), other_choices_ok=True)
self.trycompletions("create keyspace blah with strategy_class = 'Sim",
"pleStrategy'")
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
pass
def test_complete_in_update(self):
pass
def test_complete_in_delete(self):
pass
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<new_keyspace_name>',))
self.trycompletions('create keyspace moo ', "WITH strategy_class = '")
self.trycompletions("create keyspace '12SomeName' with ", "strategy_class = '")
self.trycompletions("create keyspace moo with strategy_class", " = '")
self.trycompletions("create keyspace moo with strategy_class='",
choices=self.strategies())
self.trycompletions("create keySPACE 123 with strategy_class='SimpleStrategy' A",
"ND strategy_options:replication_factor = ")
self.trycompletions("create keyspace fish with strategy_class='SimpleStrategy'"
"and strategy_options:replication_factor = ", '',
choices=('<option_value>',))
self.trycompletions("create keyspace 'PB and J' with strategy_class="
"'NetworkTopologyStrategy' AND", ' ')
self.trycompletions("create keyspace 'PB and J' with strategy_class="
"'NetworkTopologyStrategy' AND ", '',
choices=('<strategy_option_name>',))
def test_complete_in_drop_keyspace(self):
pass
def test_complete_in_create_columnfamily(self):
pass
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
class TestCqlshCompletion_CQL3final(TestCqlshCompletion_CQL2):
cqlver = '3.0.0'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'TRUNCATE', 'UPDATE',
'USE', 'exit', 'quit'))
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<value>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<value>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions('insert into system."NodeId', 'Info"')
self.trycompletions('USE "', choices=('system', self.cqlsh.keyspace),
other_choices_ok=True)
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
| apache-2.0 | -5,786,315,000,136,005,000 | -2,976,941,287,068,031,000 | 47.514403 | 101 | 0.595725 | false |
msiedlarek/qtwebkit | Tools/Scripts/webkitpy/common/config/ports_mock.py | 121 | 2482 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockPort(object):
def name(self):
return "MockPort"
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
def update_webkit_command(self, non_interactive=False):
return ["mock-update-webkit"]
def build_webkit_command(self, build_style=None):
return ["mock-build-webkit"]
def prepare_changelog_command(self):
return ['mock-prepare-ChangeLog']
def run_python_unittests_command(self):
return ['mock-test-webkitpy']
def run_perl_unittests_command(self):
return ['mock-test-webkitperl']
def run_javascriptcore_tests_command(self):
return ['mock-run-javacriptcore-tests']
def run_webkit_unit_tests_command(self):
return ['mock-run-webkit-unit-tests']
def run_webkit_tests_command(self):
return ['mock-run-webkit-tests']
def run_bindings_tests_command(self):
return ['mock-run-bindings-tests']
| lgpl-3.0 | 2,218,180,517,699,805,000 | -2,235,137,279,540,242,400 | 39.032258 | 72 | 0.734488 | false |
gigq/flasktodo | werkzeug/security.py | 25 | 3570 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import hmac
import string
from random import SystemRandom
# because the API of hmac changed with the introduction of the
# new hashlib module, we have to support both. This sets up a
# mapping to the digest factory functions and the digest modules
# (or factory functions with changed API)
try:
from hashlib import sha1, md5
_hash_funcs = _hash_mods = {'sha1': sha1, 'md5': md5}
_sha1_mod = sha1
_md5_mod = md5
except ImportError:
import sha as _sha1_mod, md5 as _md5_mod
_hash_mods = {'sha1': _sha1_mod, 'md5': _md5_mod}
_hash_funcs = {'sha1': _sha1_mod.new, 'md5': _md5_mod.new}
SALT_CHARS = string.letters + string.digits
_sys_rng = SystemRandom()
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('requested salt of length <= 0')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in xrange(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password
if salt:
if method not in _hash_mods:
return None
if isinstance(salt, unicode):
salt = salt.encode('utf-8')
h = hmac.new(salt, None, _hash_mods[method])
else:
if method not in _hash_funcs:
return None
h = _hash_funcs[method]()
if isinstance(password, unicode):
password = password.encode('utf-8')
h.update(password)
return h.hexdigest()
def generate_password_hash(password, method='sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
:param password: the password to hash
:param method: the hash method to use (``'md5'`` or ``'sha1'``)
:param salt_length: the lengt of the salt in letters
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h = _hash_internal(method, salt, password)
if h is None:
raise TypeError('invalid method %r' % method)
return '%s$%s$%s' % (method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`
:param password: the plaintext password to compare against the hash
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return _hash_internal(method, salt, password) == hashval
| mit | -9,186,579,703,016,513,000 | -6,532,991,791,856,061,000 | 33.326923 | 76 | 0.658263 | false |
pradyu1993/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 1 | 34415 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD style
import numpy as np
from scipy import linalg, optimize, rand
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import array2d, check_random_state
from ..utils import deprecated
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij.astype(np.int)
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood rstimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
`theta_`: array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
`reduced_likelihood_function_value_`: array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
# Run input checks
self._check_params()
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X = array2d(X)
y = np.asarray(y).ravel()[:, np.newaxis]
# Check shapes of DOE & observations
n_samples_X, n_features = X.shape
n_samples_y = y.shape[0]
if n_samples_X != n_samples_y:
raise ValueError("X and y must have the same number of rows.")
else:
n_samples = n_samples_X
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple input features cannot have the same"
" value")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
+ "likely something is going wrong with the "
+ "regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
+ "n_samples=%d must be greater than the "
+ "regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
+ "autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
+ "Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
+ "Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
+ "Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simulatneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) with the Mean Squared Error at x.
"""
# Check input shapes
X = array2d(X)
n_eval, n_features_X = X.shape
n_samples, n_features = self.X.shape
# Run input checks
self._check_params(n_samples)
if n_features_X != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
+ "should match the sample size used for fit() "
+ "which is %d.") % (n_features_X, n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
+ "at instanciation. Need to recompute "
+ "autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros(y.shape)
MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
+ "of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
@deprecated("to be removed in 0.14, access ``self.theta_`` etc. directly "
" after fit.")
def arg_max_reduced_likelihood_function(self):
return self._arg_max_reduced_likelihood_function()
@property
@deprecated('``theta`` is deprecated and will be removed in 0.14, '
'please use ``theta_`` instead.')
def theta(self):
return self.theta_
@property
@deprecated("``reduced_likelihood_function_value`` is deprecated and will"
"be removed in 0.14, please use "
"``reduced_likelihood_function_value_`` instead.")
def reduced_likelihood_function_value(self):
return self.reduced_likelihood_function_value_
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print "The chosen optimizer is: " + str(self.optimizer)
if self.random_start > 1:
print str(self.random_start) + " random starts are required."
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(theta=10.
** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t: \
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t: \
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ rand(self.theta0.size).reshape(self.theta0.shape) \
* np.log10(self.thetaU / self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints, iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_minus_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
optimal_rlf_value = - optimal_minus_rlf_value
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print "%s completed" % (5 * percent_completed)
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = array2d(self.theta0.min())
self.thetaL = array2d(self.thetaL.min())
self.thetaU = array2d(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print "Proceeding along dimension %d..." % (i + 1)
self.theta0 = array2d(theta_iso)
self.thetaL = array2d(thetaL[0, i])
self.thetaU = array2d(thetaU[0, i])
def corr_cut(t, d):
return corr(array2d(np.hstack([
optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i + 1)::]])), d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError(("This optimizer ('%s') is not "
+ "implemented yet. Please contribute!")
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError(("regr should be one of %s or callable, "
+ "%s was given.")
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = array2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError(("corr should be one of %s or callable, "
+ "%s was given.")
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
+ "'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = array2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = array2d(self.thetaL)
self.thetaU = array2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
+ "same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
+ "thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
+ "neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause | -2,553,882,699,494,784,000 | 5,866,618,068,844,550,000 | 37.366778 | 79 | 0.555165 | false |
devendermishrajio/nova | nova/network/minidns.py | 59 | 7113 | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LI, _LW
from nova.network import dns_driver
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class MiniDNS(dns_driver.DNSDriver):
"""Trivial DNS driver. This will read/write to a local, flat file
and have no effect on your actual DNS system. This class is
strictly for testing purposes, and should keep you out of dependency
hell.
Note that there is almost certainly a race condition here that
will manifest anytime instances are rapidly created and deleted.
A proper implementation will need some manner of locking.
"""
def __init__(self):
if CONF.log_dir:
self.filename = os.path.join(CONF.log_dir, "dnstest.txt")
self.tempdir = None
else:
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, "dnstest.txt")
LOG.debug('minidns file is |%s|', self.filename)
if not os.path.exists(self.filename):
f = open(self.filename, "w+")
f.write("# minidns\n\n\n")
f.close()
def get_domains(self):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if entry and entry['address'] == 'domain':
entries.append(entry['name'])
infile.close()
return entries
def qualify(self, name, domain):
if domain:
qualified = "%s.%s" % (name, domain)
else:
qualified = name
return qualified.lower()
def create_entry(self, name, address, type, domain):
if name is None:
raise exception.InvalidInput(_("Invalid name"))
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a'"))
if self.get_entries_by_name(name, domain):
raise exception.FloatingIpDNSExists(name=name, domain=domain)
outfile = open(self.filename, 'a+')
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), type))
outfile.close()
def parse_line(self, line):
vals = line.split()
if len(vals) < 3:
return None
else:
entry = {}
entry['address'] = vals[0].lower()
entry['name'] = vals[1].lower()
entry['type'] = vals[2].lower()
if entry['address'] == 'domain':
entry['domain'] = entry['name']
else:
entry['domain'] = entry['name'].partition('.')[2]
return entry
def delete_entry(self, name, domain):
if name is None:
raise exception.InvalidInput(_("Invalid name"))
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['name'] != self.qualify(name, domain)):
outfile.write(line)
else:
deleted = True
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
LOG.warning(_LW('Cannot delete entry |%s|'),
self.qualify(name, domain))
raise exception.NotFound
def modify_address(self, name, address, domain):
if not self.get_entries_by_name(name, domain):
raise exception.NotFound
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'] == self.qualify(name, domain)):
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), entry['type']))
else:
outfile.write(line)
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
def get_entries_by_address(self, address, domain):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if entry and entry['address'] == address.lower():
if entry['name'].endswith(domain.lower()):
name = entry['name'].split(".")[0]
if name not in entries:
entries.append(name)
infile.close()
return entries
def get_entries_by_name(self, name, domain):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'] == self.qualify(name, domain)):
entries.append(entry['address'])
infile.close()
return entries
def delete_dns_file(self):
if os.path.exists(self.filename):
try:
os.remove(self.filename)
except OSError:
pass
if self.tempdir and os.path.exists(self.tempdir):
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
def create_domain(self, fqdomain):
if self.get_entries_by_name(fqdomain, ''):
raise exception.FloatingIpDNSExists(name=fqdomain, domain='')
outfile = open(self.filename, 'a+')
outfile.write("%s %s %s\n" %
('domain', fqdomain, 'domain'))
outfile.close()
def delete_domain(self, fqdomain):
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['domain'] != fqdomain.lower()):
outfile.write(line)
else:
LOG.info(_LI("deleted %s"), entry)
deleted = True
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
LOG.warning(_LW('Cannot delete domain |%s|'), fqdomain)
raise exception.NotFound
| apache-2.0 | -1,620,741,842,915,721,000 | 1,690,260,935,943,291,000 | 33.197115 | 78 | 0.556587 | false |
danielneis/osf.io | api/base/utils.py | 3 | 4028 | # -*- coding: utf-8 -*-
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from rest_framework.exceptions import NotFound
from rest_framework.reverse import reverse
import furl
from website import util as website_util # noqa
from website import settings as website_settings
from framework.auth import Auth, User
from api.base.exceptions import Gone
# These values are copied from rest_framework.fields.BooleanField
# BooleanField cannot be imported here without raising an
# ImproperlyConfigured error
TRUTHY = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSY = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
UPDATE_METHODS = ['PUT', 'PATCH']
def is_bulk_request(request):
"""
Returns True if bulk request. Can be called as early as the parser.
"""
content_type = request.content_type
return 'ext=bulk' in content_type
def is_truthy(value):
return value in TRUTHY
def is_falsy(value):
return value in FALSY
def get_user_auth(request):
"""Given a Django request object, return an ``Auth`` object with the
authenticated user attached to it.
"""
user = request.user
if user.is_anonymous():
auth = Auth(None)
else:
auth = Auth(user)
return auth
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs, base_prefix='')
return url
def get_object_or_error(model_cls, query_or_pk, display_name=None):
display_name = display_name or None
if isinstance(query_or_pk, basestring):
query = Q('_id', 'eq', query_or_pk)
else:
query = query_or_pk
try:
obj = model_cls.find_one(query)
if getattr(obj, 'is_deleted', False) is True:
if display_name is None:
raise Gone
else:
raise Gone(detail='The requested {name} is no longer available.'.format(name=display_name))
# For objects that have been disabled (is_active is False), return a 410.
# The User model is an exception because we still want to allow
# users who are unconfirmed or unregistered, but not users who have been
# disabled.
if model_cls is User:
if obj.is_disabled:
raise Gone(detail='The requested user is no longer available.')
else:
if not getattr(obj, 'is_active', True) or getattr(obj, 'is_deleted', False):
if display_name is None:
raise Gone
else:
raise Gone(detail='The requested {name} is no longer available.'.format(name=display_name))
return obj
except NoResultsFound:
raise NotFound
def waterbutler_url_for(request_type, provider, path, node_id, token, obj_args=None, **query):
"""Reverse URL lookup for WaterButler routes
:param str request_type: data or metadata
:param str provider: The name of the requested provider
:param str path: The path of the requested file or folder
:param str node_id: The id of the node being accessed
:param str token: The cookie to be used or None
:param dict **query: Addition query parameters to be appended
"""
url = furl.furl(website_settings.WATERBUTLER_URL)
url.path.segments.append(request_type)
url.args.update({
'path': path,
'nid': node_id,
'provider': provider,
})
if token is not None:
url.args['cookie'] = token
if 'view_only' in obj_args:
url.args['view_only'] = obj_args['view_only']
url.args.update(query)
return url.url
def add_dev_only_items(items, dev_only_items):
"""Add some items to a dictionary if in ``DEV_MODE``.
"""
items = items.copy()
if website_settings.DEV_MODE:
items.update(dev_only_items)
return items
| apache-2.0 | 6,409,694,853,402,183,000 | -8,976,713,052,752,132,000 | 32.566667 | 111 | 0.647716 | false |
roadmapper/ansible | test/units/modules/storage/netapp/test_na_ontap_unix_group.py | 23 | 11651 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_unix_group \
import NetAppOntapUnixGroup as group_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'group':
xml = self.build_group_info(self.params)
elif self.kind == 'group-fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_group_info(data):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = \
{'attributes-list': {'unix-group-info': {'group-name': data['name'],
'group-id': data['id']}},
'num-records': 1}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_group = {
'name': 'test',
'id': '11',
'vserver': 'something',
}
def mock_args(self):
return {
'name': self.mock_group['name'],
'id': self.mock_group['id'],
'vserver': self.mock_group['vserver'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_group_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_unix_group object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_unix_group object
"""
obj = group_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_group
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
group_module()
def test_get_nonexistent_group(self):
''' Test if get_unix_group returns None for non-existent group '''
set_module_args(self.mock_args())
result = self.get_group_mock_object().get_unix_group()
assert result is None
def test_get_existing_group(self):
''' Test if get_unix_group returns details for existing group '''
set_module_args(self.mock_args())
result = self.get_group_mock_object('group').get_unix_group()
assert result['name'] == self.mock_group['name']
def test_get_xml(self):
set_module_args(self.mock_args())
obj = self.get_group_mock_object('group')
result = obj.get_unix_group()
assert obj.server.xml_in['query']
assert obj.server.xml_in['query']['unix-group-info']
group_info = obj.server.xml_in['query']['unix-group-info']
assert group_info['group-name'] == self.mock_group['name']
assert group_info['vserver'] == self.mock_group['vserver']
def test_create_error_missing_params(self):
data = self.mock_args()
del data['id']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group').create_unix_group()
assert 'Error: Missing a required parameter for create: (id)' == exc.value.args[0]['msg']
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.create_unix_group')
def test_create_called(self, create_group):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_group_mock_object().apply()
assert exc.value.args[0]['changed']
create_group.assert_called_with()
def test_create_xml(self):
'''Test create ZAPI element'''
set_module_args(self.mock_args())
create = self.get_group_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
create.apply()
mock_key = {
'group-name': 'name',
'group-id': 'id',
}
for key in ['group-name', 'group-id']:
assert create.server.xml_in[key] == self.mock_group[mock_key[key]]
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.delete_unix_group')
def test_delete_called(self, delete_group, modify_group):
''' Test delete existing group '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_group_mock_object('group').apply()
assert exc.value.args[0]['changed']
delete_group.assert_called_with()
assert modify_group.call_count == 0
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
def test_modify_called(self, modify_group, get_group):
''' Test modify group group_id '''
data = self.mock_args()
data['id'] = 20
set_module_args(data)
get_group.return_value = {'id': 10}
obj = self.get_group_mock_object('group')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
get_group.assert_called_with()
modify_group.assert_called_with({'id': 20})
def test_modify_only_id(self):
''' Test modify group id '''
set_module_args(self.mock_args())
modify = self.get_group_mock_object('group')
modify.modify_unix_group({'id': 123})
print(modify.server.xml_in.to_string())
assert modify.server.xml_in['group-id'] == '123'
with pytest.raises(KeyError):
modify.server.xml_in['id']
def test_modify_xml(self):
''' Test modify group full_name '''
set_module_args(self.mock_args())
modify = self.get_group_mock_object('group')
modify.modify_unix_group({'id': 25})
assert modify.server.xml_in['group-name'] == self.mock_group['name']
assert modify.server.xml_in['group-id'] == '25'
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.create_unix_group')
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.delete_unix_group')
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.modify_unix_group')
def test_do_nothing(self, modify, delete, create):
''' changed is False and none of the opetaion methods are called'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
obj = self.get_group_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
create.assert_not_called()
delete.assert_not_called()
modify.assert_not_called()
def test_get_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').get_unix_group()
assert 'Error getting UNIX group' in exc.value.args[0]['msg']
def test_create_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').create_unix_group()
assert 'Error creating UNIX group' in exc.value.args[0]['msg']
def test_modify_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').modify_unix_group({'id': '123'})
assert 'Error modifying UNIX group' in exc.value.args[0]['msg']
def test_delete_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').delete_unix_group()
assert 'Error removing UNIX group' in exc.value.args[0]['msg']
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
def test_add_user_exception(self, get_unix_group):
data = self.mock_args()
data['users'] = 'test_user'
set_module_args(data)
get_unix_group.side_effect = [
{'users': []}
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').modify_users_in_group()
print(exc.value.args[0]['msg'])
assert 'Error adding user' in exc.value.args[0]['msg']
@patch('ansible.modules.storage.netapp.na_ontap_unix_group.NetAppOntapUnixGroup.get_unix_group')
def test_delete_user_exception(self, get_unix_group):
data = self.mock_args()
data['users'] = ''
set_module_args(data)
get_unix_group.side_effect = [
{'users': ['test_user']}
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_group_mock_object('group-fail').modify_users_in_group()
print(exc.value.args[0]['msg'])
assert 'Error deleting user' in exc.value.args[0]['msg']
| gpl-3.0 | -2,333,118,087,820,886,000 | -7,032,905,078,364,260,000 | 39.454861 | 107 | 0.621835 | false |
SteveHNH/ansible | lib/ansible/modules/utilities/logic/set_stats.py | 58 | 1935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 Ansible RedHat, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Brian Coca (@bcoca)"
module: set_stats
short_description: Set stats for the current ansible run
description:
- This module allows setting/accumulating stats on the current ansible run, either per host of for all hosts in the run.
- This module is also supported for Windows targets.
options:
data:
description:
- A dictionary of which each key represents a stat (or variable) you want to keep track of
required: true
per_host:
description:
- boolean that indicates if the stats is per host or for all hosts in the run.
required: no
default: no
aggregate:
description:
- boolean that indicates if the provided value is aggregated to the existing stat C(yes) or will replace it C(no)
required: no
default: yes
notes:
- This module is also supported for Windows targets.
- In order for custom stats to be displayed, you must set C(show_custom_stats) in C(ansible.cfg) or C(ANSIBLE_SHOW_CUSTOM_STATS) to C(true).
version_added: "2.3"
'''
EXAMPLES = '''
# Aggregating packages_installed stat per host
- set_stats:
data:
packages_installed: 31
# Aggregating random stats for all hosts using complex arguments
- set_stats:
data:
one_stat: 11
other_stat: "{{ local_var * 2 }}"
another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
per_host: no
# setting stats (not aggregating)
- set_stats:
data:
the_answer: 42
aggregate: no
'''
| gpl-3.0 | 1,841,889,556,320,110,800 | -1,039,950,033,308,049,300 | 29.234375 | 144 | 0.670801 | false |
anthraxx/pwndbg | pwndbg/regs.py | 2 | 14879 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reading register value from the inferior, and provides a
standardized interface to registers like "sp" and "pc".
"""
import collections
import ctypes
import re
import sys
from types import ModuleType
import gdb
import pwndbg.arch
import pwndbg.events
import pwndbg.memoize
import pwndbg.proc
import pwndbg.remote
try:
long
except NameError:
long=int
class RegisterSet:
#: Program counter register
pc = None
#: Stack pointer register
stack = None
#: Frame pointer register
frame = None
#: Return address register
retaddr = None
#: Flags register (eflags, cpsr)
flags = None
#: List of native-size generalp-purpose registers
gpr = None
#: List of miscellaneous, valid registers
misc = None
#: Register-based arguments for most common ABI
regs = None
#: Return value register
retval = None
#: Common registers which should be displayed in the register context
common = None
#: All valid registers
all = None
def __init__(self,
pc='pc',
stack='sp',
frame=None,
retaddr=tuple(),
flags=dict(),
gpr=tuple(),
misc=tuple(),
args=tuple(),
retval=None):
self.pc = pc
self.stack = stack
self.frame = frame
self.retaddr = retaddr
self.flags = flags
self.gpr = gpr
self.misc = misc
self.args = args
self.retval = retval
# In 'common', we don't want to lose the ordering of:
self.common = []
for reg in gpr + (frame, stack, pc) + tuple(flags):
if reg and reg not in self.common:
self.common.append(reg)
self.all = set(i for i in misc) | set(flags) | set(self.retaddr) | set(self.common)
self.all -= {None}
def __iter__(self):
for r in self.all:
yield r
arm_cpsr_flags = collections.OrderedDict([
('N', 31), ('Z', 30), ('C', 29), ('V', 28), ('Q', 27), ('J', 24), ('T', 5), ('E', 9), ('A', 8), ('I', 7), ('F', 6)])
arm_xpsr_flags = collections.OrderedDict([
('N', 31), ('Z', 30), ('C', 29), ('V', 28), ('Q', 27), ('T', 24)])
arm = RegisterSet( retaddr = ('lr',),
flags = {'cpsr': arm_cpsr_flags},
gpr = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12'),
args = ('r0','r1','r2','r3'),
retval = 'r0')
# ARM Cortex-M
armcm = RegisterSet( retaddr = ('lr',),
flags = {'xpsr': arm_xpsr_flags},
gpr = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12'),
args = ('r0','r1','r2','r3'),
retval = 'r0')
# FIXME AArch64 does not have a CPSR register
aarch64 = RegisterSet( retaddr = ('lr',),
flags = {'cpsr':{}},
frame = 'x29',
gpr = ('x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9',
'x10', 'x11', 'x12', 'x13', 'x14', 'x15', 'x16', 'x17', 'x18', 'x19',
'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28'),
misc = ('w0', 'w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8', 'w9',
'w10', 'w11', 'w12', 'w13', 'w14', 'w15', 'w16', 'w17', 'w18', 'w19',
'w20', 'w21', 'w22', 'w23', 'w24', 'w25', 'w26', 'w27', 'w28'),
args = ('x0','x1','x2','x3'),
retval = 'x0')
x86flags = {'eflags': collections.OrderedDict([
('CF', 0),
('PF', 2),
('AF', 4),
('ZF', 6),
('SF', 7),
('IF', 9),
('DF', 10),
('OF', 11),
])}
amd64 = RegisterSet(pc = 'rip',
stack = 'rsp',
frame = 'rbp',
flags = x86flags,
gpr = ('rax','rbx','rcx','rdx','rdi','rsi',
'r8', 'r9', 'r10','r11','r12',
'r13','r14','r15'),
misc = ('cs','ss','ds','es','fs','gs',
'fsbase', 'gsbase',
'ax','ah','al',
'bx','bh','bl',
'cx','ch','cl',
'dx','dh','dl',
'dil','sil','spl','bpl',
'di','si','bp','sp','ip'),
args = ('rdi','rsi','rdx','rcx','r8','r9'),
retval = 'rax')
i386 = RegisterSet( pc = 'eip',
stack = 'esp',
frame = 'ebp',
flags = x86flags,
gpr = ('eax','ebx','ecx','edx','edi','esi'),
misc = ('cs','ss','ds','es','fs','gs',
'fsbase', 'gsbase',
'ax','ah','al',
'bx','bh','bl',
'cx','ch','cl',
'dx','dh','dl',
'dil','sil','spl','bpl',
'di','si','bp','sp','ip'),
retval = 'eax')
# http://math-atlas.sourceforge.net/devel/assembly/elfspec_ppc.pdf
# r0 Volatile register which may be modified during function linkage
# r1 Stack frame pointer, always valid
# r2 System-reserved register (points at GOT)
# r3-r4 Volatile registers used for parameter passing and return values
# r5-r10 Volatile registers used for parameter passing
# r11-r12 Volatile registers which may be modified during function linkage
# r13 Small data area pointer register (points to TLS)
# r14-r30 Registers used for local variables
# r31 Used for local variables or "environment pointers"
powerpc = RegisterSet( retaddr = ('lr','r0'),
flags = {'msr':{},'xer':{}},
gpr = ('r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
'r10', 'r11', 'r12', 'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19',
'r20', 'r21', 'r22', 'r23', 'r24', 'r25', 'r26', 'r27', 'r28', 'r29',
'r30', 'r31'),
misc = ('cr','lr','r2'),
args = ('r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'),
retval = 'r3')
# http://people.cs.clemson.edu/~mark/sparc/sparc_arch_desc.txt
# http://people.cs.clemson.edu/~mark/subroutines/sparc.html
# https://www.utdallas.edu/~edsha/security/sparcoverflow.htm
#
# http://people.cs.clemson.edu/~mark/sparc/assembly.txt
# ____________________________________
# %g0 == %r0 (always zero) \
# %g1 == %r1 | g stands for global
# ... |
# %g7 == %r7 |
# ____________________________________/
# %o0 == %r8 \
# ... | o stands for output (note: not 0)
# %o6 == %r14 == %sp (stack ptr) |
# %o7 == %r15 == for return address |
# ____________________________________/
# %l0 == %r16 \
# ... | l stands for local (note: not 1)
# %l7 == %r23 |
# ____________________________________/
# %i0 == %r24 \
# ... | i stands for input
# %i6 == %r30 == %fp (frame ptr) |
# %i7 == %r31 == for return address |
# ____________________________________/
sparc = RegisterSet(stack = 'sp',
frame = 'fp',
retaddr = ('i7',),
flags = {'psr':{}},
gpr = ('g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7',
'o0', 'o1', 'o2', 'o3', 'o4', 'o5', 'o7',
'l0', 'l1', 'l2', 'l3', 'l4', 'l5', 'l6', 'l7',
'i0', 'i1', 'i2', 'i3', 'i4', 'i5'),
args = ('i0','i1','i2','i3','i4','i5'),
retval = 'o0')
# http://logos.cs.uic.edu/366/notes/mips%20quick%20tutorial.htm
# r0 => zero
# r1 => temporary
# r2-r3 => values
# r4-r7 => arguments
# r8-r15 => temporary
# r16-r23 => saved values
# r24-r25 => temporary
# r26-r27 => interrupt/trap handler
# r28 => global pointer
# r29 => stack pointer
# r30 => frame pointer
# r31 => return address
mips = RegisterSet( frame = 'fp',
retaddr = ('ra',),
gpr = ('v0','v1','a0','a1','a2','a3',
't0', 't1', 't2', 't3', 't4', 't5', 't6', 't7', 't8', 't9',
's0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8'),
args = ('a0','a1','a2','a3'),
retval = 'v0')
arch_to_regs = {
'i386': i386,
'i8086': i386,
'x86-64': amd64,
'mips': mips,
'sparc': sparc,
'arm': arm,
'armcm': armcm,
'aarch64': aarch64,
'powerpc': powerpc,
}
@pwndbg.proc.OnlyWhenRunning
def gdb77_get_register(name):
return gdb.parse_and_eval('$' + name)
@pwndbg.proc.OnlyWhenRunning
def gdb79_get_register(name):
return gdb.selected_frame().read_register(name)
try:
gdb.Frame.read_register
get_register = gdb79_get_register
except AttributeError:
get_register = gdb77_get_register
# We need to manually make some ptrace calls to get fs/gs bases on Intel
PTRACE_ARCH_PRCTL = 30
ARCH_GET_FS = 0x1003
ARCH_GET_GS = 0x1004
class module(ModuleType):
last = {}
@pwndbg.memoize.reset_on_stop
@pwndbg.memoize.reset_on_prompt
def __getattr__(self, attr):
attr = attr.lstrip('$')
try:
# Seriously, gdb? Only accepts uint32.
if 'eflags' in attr or 'cpsr' in attr:
value = gdb77_get_register(attr)
value = value.cast(pwndbg.typeinfo.uint32)
else:
if attr.lower() == 'xpsr':
attr = 'xPSR'
value = get_register(attr)
size = pwndbg.typeinfo.unsigned.get(value.type.sizeof, pwndbg.typeinfo.ulong)
value = value.cast(size)
if attr.lower() == 'pc' and pwndbg.arch.current == 'i8086':
value += self.cs * 16
value = int(value)
return value & pwndbg.arch.ptrmask
except (ValueError, gdb.error):
return None
@pwndbg.memoize.reset_on_stop
@pwndbg.memoize.reset_on_prompt
def __getitem__(self, item):
if not isinstance(item, str):
print("Unknown register type: %r" % (item))
import pdb
import traceback
traceback.print_stack()
pdb.set_trace()
return None
# e.g. if we're looking for register "$rax", turn it into "rax"
item = item.lstrip('$')
item = getattr(self, item.lower())
if isinstance(item, int):
return int(item) & pwndbg.arch.ptrmask
return item
def __iter__(self):
regs = set(arch_to_regs[pwndbg.arch.current]) | {'pc', 'sp'}
for item in regs:
yield item
@property
def current(self):
return arch_to_regs[pwndbg.arch.current]
@property
def gpr(self):
return arch_to_regs[pwndbg.arch.current].gpr
@property
def common(self):
return arch_to_regs[pwndbg.arch.current].common
@property
def frame(self):
return arch_to_regs[pwndbg.arch.current].frame
@property
def retaddr(self):
return arch_to_regs[pwndbg.arch.current].retaddr
@property
def flags(self):
return arch_to_regs[pwndbg.arch.current].flags
@property
def stack(self):
return arch_to_regs[pwndbg.arch.current].stack
@property
def retval(self):
return arch_to_regs[pwndbg.arch.current].retval
@property
def all(self):
regs = arch_to_regs[pwndbg.arch.current]
retval = []
for regset in (regs.pc, regs.stack, regs.frame, regs.retaddr, regs.flags, regs.gpr, regs.misc):
if regset is None:
continue
elif isinstance(regset, (list, tuple)):
retval.extend(regset)
elif isinstance(regset, dict):
retval.extend(regset.keys())
else:
retval.append(regset)
return retval
def fix(self, expression):
for regname in set(self.all + ['sp','pc']):
expression = re.sub(r'\$?\b%s\b' % regname, r'$'+regname, expression)
return expression
def items(self):
for regname in self.all:
yield regname, self[regname]
arch_to_regs = arch_to_regs
@property
def changed(self):
delta = []
for reg, value in self.previous.items():
if self[reg] != value:
delta.append(reg)
return delta
@property
@pwndbg.memoize.reset_on_stop
def fsbase(self):
return self._fs_gs_helper(ARCH_GET_FS)
@property
@pwndbg.memoize.reset_on_stop
def gsbase(self):
return self._fs_gs_helper(ARCH_GET_GS)
@pwndbg.memoize.reset_on_stop
def _fs_gs_helper(self, which):
"""Supports fetching based on segmented addressing, a la fs:[0x30].
Requires ptrace'ing the child directly."""
# We can't really do anything if the process is remote.
if pwndbg.remote.is_remote(): return 0
# Use the lightweight process ID
pid, lwpid, tid = gdb.selected_thread().ptid
# Get the register
ppvoid = ctypes.POINTER(ctypes.c_void_p)
value = ppvoid(ctypes.c_void_p())
value.contents.value = 0
libc = ctypes.CDLL('libc.so.6')
result = libc.ptrace(PTRACE_ARCH_PRCTL,
lwpid,
value,
which)
if result == 0:
return (value.contents.value or 0) & pwndbg.arch.ptrmask
return 0
def __repr__(self):
return ('<module pwndbg.regs>')
# To prevent garbage collection
tether = sys.modules[__name__]
sys.modules[__name__] = module(__name__, '')
@pwndbg.events.cont
@pwndbg.events.stop
def update_last():
M = sys.modules[__name__]
M.previous = M.last
M.last = {k:M[k] for k in M.common}
if pwndbg.config.show_retaddr_reg:
M.last.update({k:M[k] for k in M.retaddr})
| mit | 3,118,295,357,238,623,000 | 7,979,315,046,571,085,000 | 32.138085 | 120 | 0.4587 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/core/internals.py | 1 | 151884 | import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(convert_numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(convert_dates=True,
convert_numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = isinstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_getitem(placement):
return value
elif value_is_cat:
# categorical
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if isinstance(v, (SparseArray, ABCSparseSeries)):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if isinstance(merged_blocks, list):
new_blocks.extend(merged_blocks)
else:
new_blocks.append(merged_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
res = op(a, b)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return com.CategoricalDtype(), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = com._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindex its
# block: no ax0 reindexing took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs are sequential (and
# length match is checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers={}):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__,
self.block, self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values_flat = self.block.values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i: i + chunk_len]).all():
return False
return True
@cache_readonly
def needs_block_conversion(self):
""" we might need to convert the joined values to a suitable block repr """
block = self.block
return block is not None and (block.is_sparse or block.is_categorical)
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null and not getattr(self.block,'is_categorical',None):
missing_arr = np.empty(self.shape, dtype=empty_dtype)
if np.prod(self.shape):
# NumPy 1.6 workaround: this statement gets strange if all
# blocks are of same dtype and some of them are empty:
# empty one are considered "null" so they must be filled,
# but no dtype upcasting happens and the dtype may not
# allow NaNs.
#
# In general, no one should get hurt when one tries to put
# incorrect values into empty array, but numpy 1.6 is
# strict about that.
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if self.block.is_categorical:
# preserve the categoricals for validation in _concat_compat
return self.block.values
elif self.block.is_sparse:
# preserve the sparse array for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = com.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| mit | 2,819,906,828,694,936,600 | 9,092,507,160,208,442,000 | 32.963327 | 134 | 0.537957 | false |
promptworks/horizon | openstack_dashboard/dashboards/admin/hypervisors/tabs.py | 59 | 1512 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.hypervisors.compute \
import tabs as cmp_tabs
from openstack_dashboard.dashboards.admin.hypervisors import tables
class HypervisorTab(tabs.TableTab):
table_classes = (tables.AdminHypervisorsTable,)
name = _("Hypervisor")
slug = "hypervisor"
template_name = "horizon/common/_detail_table.html"
def get_hypervisors_data(self):
hypervisors = []
try:
hypervisors = nova.hypervisor_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor information.'))
return hypervisors
class HypervisorHostTabs(tabs.TabGroup):
slug = "hypervisor_info"
tabs = (HypervisorTab, cmp_tabs.ComputeHostTab)
sticky = True
| apache-2.0 | 2,691,753,904,508,676,600 | -4,932,121,597,807,687,000 | 33.363636 | 78 | 0.722222 | false |
ravindrapanda/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py | 100 | 11789 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import deterministic as deterministic_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class DeterministicTest(test.TestCase):
def testShape(self):
with self.test_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.Deterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3, 4))
self.assertAllEqual(deterministic.batch_shape, (2, 3, 4))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.Deterministic(
loc, atol=-1, validate_args=True)
with self.test_session():
with self.assertRaisesOpError("Condition x >= 0"):
deterministic.prob(0.).eval()
def testProbWithNoBatchDimsIntegerType(self):
deterministic = deterministic_lib.Deterministic(0)
with self.test_session():
self.assertAllClose(1, deterministic.prob(0).eval())
self.assertAllClose(0, deterministic.prob(2).eval())
self.assertAllClose([1, 0], deterministic.prob([0, 2]).eval())
def testProbWithNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
with self.test_session():
self.assertAllClose(1., deterministic.prob(0.).eval())
self.assertAllClose(0., deterministic.prob(2.).eval())
self.assertAllClose([1., 0.], deterministic.prob([0., 2.]).eval())
def testProbWithDefaultTol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc)
expected_prob = [[1., 0.], [0., 1.]]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_prob = [[1., 0.], [1., 1.]]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATolIntegerType(self):
loc = [[0, 1], [2, 3]]
x = [[0, 2], [4, 2]]
deterministic = deterministic_lib.Deterministic(loc, atol=1)
expected_prob = [[1, 1], [0, 1]]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
loc = [[0., 1.], [100., 100.]]
x = [[0., 1.1], [100.1, 103.]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_prob = [[1., 0.], [1., 0.]]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTolIntegerType(self):
loc = [[10, 10, 10], [10, 10, 10]]
x = [[10, 20, 30], [10, 20, 30]]
# Batch 0 will have rtol = 0
# Batch 1 will have rtol = 1 (100% slack allowed)
deterministic = deterministic_lib.Deterministic(loc, rtol=[[0], [1]])
expected_prob = [[1, 0, 0], [1, 1, 0]]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 3), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testCdfWithDefaultTol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc)
expected_cdf = [[0., 0.], [0., 1.]]
with self.test_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroATol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_cdf = [[0., 0.], [1., 1.]]
with self.test_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroRTol(self):
loc = [[1., 1.], [100., 100.]]
x = [[0.9, 1.], [99.9, 97]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_cdf = [[0., 1.], [1., 0.]]
with self.test_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
for sample_shape in [(), (4,)]:
with self.test_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape, sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.Deterministic([0., 0.])
for sample_shape in [(), (4,)]:
with self.test_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2,)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.Deterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.test_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [0., 0.],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2,)).astype(np.float32), sample_)
class VectorDeterministicTest(test.TestCase):
def testShape(self):
with self.test_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.VectorDeterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3))
self.assertAllEqual(deterministic.batch_shape, (2, 3))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [4])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([4]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.VectorDeterministic(
loc, atol=-1, validate_args=True)
with self.test_session():
with self.assertRaisesOpError("Condition x >= 0"):
deterministic.prob(loc).eval()
def testInvalidXRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.VectorDeterministic(
loc, atol=-1, validate_args=True)
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
deterministic.prob(0.).eval()
def testProbVectorDeterministicWithNoBatchDims(self):
# 0 batch of deterministics on R^1.
deterministic = deterministic_lib.VectorDeterministic([0.])
with self.test_session():
self.assertAllClose(1., deterministic.prob([0.]).eval())
self.assertAllClose(0., deterministic.prob([2.]).eval())
self.assertAllClose([1., 0.], deterministic.prob([[0.], [2.]]).eval())
def testProbWithDefaultTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc)
expected_prob = [1., 0., 0.]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc, atol=0.05)
expected_prob = [1., 0., 1.]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [1., 1.], [100., 100.]]
x = [[0., 1.], [0.9, 1.], [99.9, 100.1]]
deterministic = deterministic_lib.VectorDeterministic(loc, rtol=0.01)
expected_prob = [1., 0., 1.]
with self.test_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZero(self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.test_session():
self.assertAllClose(1., deterministic.prob([]).eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZeroRaisesIfXNotInSameRk(
self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.test_session():
with self.assertRaisesOpError("not defined in the same space"):
deterministic.prob([1.]).eval()
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([0.])
for sample_shape in [(), (4,)]:
with self.test_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (1,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (1,)).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([[0.], [0.]])
for sample_shape in [(), (4,)]:
with self.test_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2, 1), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2, 1)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.VectorDeterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.test_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [[0.], [0.]],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2, 1)).astype(np.float32), sample_)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,469,713,839,776,587,000 | -4,297,199,272,542,109,700 | 38.962712 | 90 | 0.643905 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/simulation/units.py | 2 | 16178 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.simulation.units Working with SKIRT output units.
#
# An instance of the SkirtUnits class in this module provides support for working with SKIRT input/output units.
# The constructor arguments specify the name of a SKIRT unit system (SI, stellar, or extragalactic units)
# and the flux style (neutral, wavelength or frequency) to set the default units for physical quantities.
# The instance then offers functions to convert a physical quantity from its default unit to some specified unit
# (or between two specified units).
# -----------------------------------------------------------------
# Import standard modules
import types
import numpy as np
# -----------------------------------------------------------------
# SkirtUnits class
# -----------------------------------------------------------------
## An instance of the SkirtUnits class represents a particular SKIRT input/output unit system, specified at
# construction through the name of the unit system (SI, stellar, or extragalactic units) and the flux style
# (neutral, wavelength or frequency). Based on this information, the object knows the output units used by SKIRT
# for a series of supported physical quantities. This allows converting a value extracted from SKIRT output to
# some specified unit without having to know the actual output unit.
#
# The SkirtUnits class supports the following physical quantities and unit specifiers:
#
#| Physical Quantity | Flux Style | Units
#|-------------------|------------|-------
#| length, distance, wavelength | | A, nm, micron, mm, cm, m, km, AU, pc, kpc, Mpc
#| volume | | m3, AU3, pc3
#| mass | | g, kg, Msun
#| luminosity | | W, Lsun
#| luminositydensity | wavelength | W/m, W/micron, Lsun/micron
#| luminositydensity | frequency | W/Hz, erg/s/Hz, Lsun/Hz
#| fluxdensity | neutral | W/m2
#| fluxdensity | wavelength | W/m3, W/m2/micron
#| fluxdensity | frequency | W/m2/Hz, Jy, mJy, MJy, erg/s/cm2/Hz
#| surfacebrightness | neutral | W/m2/sr, W/m2/arcsec2
#| surfacebrightness | wavelength | W/m3/sr, W/m2/micron/sr, W/m2/micron/sr, W/m2/micron/arcsec2
#| surfacebrightness | frequency | W/m2/Hz/sr, W/m2/Hz/arcsec2, Jy/sr, Jy/arcsec2, MJy/sr, MJy/arcsec2
#
# Flux style 'neutral' indicates \f$\lambda F_\lambda = \nu F_\nu\f$; 'wavelength' indicates
# \f$F_\lambda\f$; and 'frequency' indicates \f$F_\nu\f$.
class SkirtUnits:
## The constructor accepts the name of the SKIRT unit system ('SI', 'stellar', or 'extragalactic')
# and the flux style ('neutral', 'wavelength' or 'frequency') to be represented by this instance.
# The specified strings are case-insensitive, and any portion beyond the recognized names is ignored.
# Based on this information, it initializes the default SKIRT units for a series of supported
# physical quantities.
#
def __init__(self, unitsystem, fluxstyle):
unitsystem = unitsystem.lower()
fluxstyle = fluxstyle.lower()
if unitsystem.startswith('si'):
self._defaultunit = { 'length': 'm', 'distance': 'm', 'wavelength': 'm',
'volume': 'm3', 'mass': 'kg',
'luminosity': 'W', 'luminositydensity': 'W/m' }
if fluxstyle.startswith('neutral'):
self._defaultunit.update(fluxdensity='W/m2', surfacebrightness='W/m2/sr')
elif fluxstyle.startswith('wavelength'):
self._defaultunit.update(fluxdensity='W/m3', surfacebrightness='W/m3/sr')
elif fluxstyle.startswith('frequency'):
self._defaultunit.update(fluxdensity='W/m2/Hz', surfacebrightness='W/m2/Hz/sr')
else:
raise ValueError("Unsupported flux style: " + fluxstyle)
elif unitsystem.startswith('stellar'):
self._defaultunit = { 'length': 'AU', 'distance': 'pc', 'wavelength': 'micron',
'volume': 'AU3', 'mass': 'Msun',
'luminosity': 'Lsun', 'luminositydensity': 'Lsun/micron' }
if fluxstyle.startswith('neutral'):
self._defaultunit.update(fluxdensity='W/m2', surfacebrightness='W/m2/arcsec2')
elif fluxstyle.startswith('wavelength'):
self._defaultunit.update(fluxdensity='W/m2/micron', surfacebrightness='W/m2/micron/arcsec2')
elif fluxstyle.startswith('frequency'):
self._defaultunit.update(fluxdensity='Jy', surfacebrightness='MJy/sr')
else:
raise ValueError("Unsupported flux style: " + fluxstyle)
elif unitsystem.startswith('extragalactic'):
self._defaultunit = { 'length': 'pc', 'distance': 'Mpc', 'wavelength': 'micron',
'volume': 'pc3', 'mass': 'Msun',
'luminosity': 'Lsun', 'luminositydensity': 'Lsun/micron' }
if fluxstyle.startswith('neutral'):
self._defaultunit.update(fluxdensity='W/m2', surfacebrightness='W/m2/arcsec2')
elif fluxstyle.startswith('wavelength'):
self._defaultunit.update(fluxdensity='W/m2/micron', surfacebrightness='W/m2/micron/arcsec2')
elif fluxstyle.startswith('frequency'):
self._defaultunit.update(fluxdensity='Jy', surfacebrightness='MJy/sr')
else:
raise ValueError("Unsupported flux style: " + fluxstyle)
else:
raise ValueError("Unsupported unit system: " + unitsystem)
## This function performs unit conversion for a specified value (or for a numpy array of values). The first
# argument specifies the value to be converted. This can be a number, a numpy array of numbers (in which case
# the conversion is performed for each array element), or a string representing a number optionally followed
# by a unit specifier (e.g. "0.76 micron"). The second argument specifies the target unit. In addition the
# function accepts the following optional arguments:
# - \em from_unit: specifies the units in which the incoming value is expressed.
# - \em quantity: specifies the physical quantity of the incoming value; this is used to determine the appropriate
# default unit in case the \em from_unit argument is missing (and the value is not a string including units).
# - \em wavelength: provides the wavelength, in micron, for converting between flux styles; this argument can be
# omitted for any other conversions; if \em value is a numpy array, \em wavelength can be an array of the same
# length (one wavelength per flux value), or it can be a single number (in which case all fluxes are considered
# to be at the same wavelength).
#
# The unit of the incoming value is determined using three mechanisms in the following order:
# - if the value is a string with two segments, the second segement determines the unit.
# - otherwise, if \em from_unit is specified (and not None), its value determines the unit.
# - otherwise, the default SKIRT unit corresponding to the specified \em quantity is used.
#
def convert(self, value, to_unit, from_unit=None, quantity=None, wavelength=None):
# if the value is a string, it may include a unit specifier that overrides the from_unit argument
if isinstance(value, types.StringTypes):
segments = value.split()
if len(segments) == 2:
value = float(segments[0])
from_unit = segments[1]
elif len(segments) == 1:
value = float(segments[0])
else:
raise ValueError("Invalid value/unit string")
# if the from_unit has not been specified, use the default for the specified quantity
if from_unit==None:
from_unit = self._defaultunit[quantity]
# skip the conversion if the units are identical
if from_unit == to_unit:
return value
# perform straightforward conversion between units of the same physical quantity
from_quantity = _quantity[from_unit]
to_quantity = _quantity[to_unit]
if from_quantity == to_quantity:
return value * (_conversion[from_unit]/_conversion[to_unit])
# perform conversion between styles of flux density or surface brightness
if ('fluxdensity' in from_quantity and 'fluxdensity' in to_quantity) or \
('luminositydensity' in from_quantity and 'luminositydensity' in to_quantity) or \
('surfacebrightness' in from_quantity and 'surfacebrightness' in to_quantity):
# convert to/from SI units within the respective flux styles
flux = value * (_conversion[from_unit]/_conversion[to_unit])
# convert between flux styles (convert specified wavelength from micron to m)
wave = wavelength * 1e-6
if 'wavelength' in from_quantity: flux *= wave
elif 'frequency' in from_quantity: flux *= _c/wave
if 'wavelength' in to_quantity: flux *= 1./wave
elif 'frequency' in to_quantity: flux *= wave/_c
return flux
else:
raise ValueError("Can't convert from " + from_unit + " to " + to_unit)
## This function returns the absolute AB magnitude corresponding to a given flux density and distance
# from the source. The units in which these values are expressed can be explicitly specified. If not,
# the default units for respectively flux density and distance are used instead. If the flux density
# is expressed per unit of frequency, the \em wavelength argument may be omitted. Otherwise, the
# wavelength is used to convert between flux styles.
#
# Given a flux density \f$F_\nu\f$, measured in ergs per second per square cm per Hz, the corresponding
# AB magnitude is defined as \f$\text{AB}=-2.5\log_{10} F_\nu -48.60\f$. The resulting apparent magnitude
# is converted to the absolute magnitude using the standard formula \f$M=m-5\log_{10}d^{(pc)}+5\f$.
def absolutemagnitude(self, fluxdensity, distance, fluxdensity_unit=None, distance_unit=None, wavelength=None):
fluxdensity = self.convert(fluxdensity, to_unit='erg/s/cm2/Hz', from_unit=fluxdensity_unit,
quantity='fluxdensity', wavelength=wavelength)
distance = self.convert(distance, to_unit='pc', from_unit=distance_unit, quantity='distance')
apparent = -2.5*np.log10(fluxdensity) - 48.60
absolute = apparent - 5*np.log10(distance) + 5
return absolute
## This function returns the luminosity density corresponding to a given flux density and distance
# from the source. The units in which these values are expressed can be explicitly specified. If not,
# the default units for respectively flux density and distance are used instead. The units for the
# returned luminosity must be specified (there is no default). If both the flux density and the
# luminosity density are expressed in the same style (per unit of frequency or per unit of wavelength),
# the \em wavelength argument may be omitted. Otherwise, the wavelength is used to convert between styles.
def luminosityforflux(self, fluxdensity, distance, luminositydensity_unit,
fluxdensity_unit=None, distance_unit=None, wavelength=None):
if 'wavelength' in _quantity[luminositydensity_unit]:
flux_si = 'W/m3'
lumi_si = 'W/m'
else:
flux_si = 'W/m2/Hz'
lumi_si = 'W/Hz'
fluxdensity = self.convert(fluxdensity, to_unit=flux_si, from_unit=fluxdensity_unit,
quantity='fluxdensity', wavelength=wavelength)
distance = self.convert(distance, to_unit='m', from_unit=distance_unit, quantity='distance')
luminosity = 4.*np.pi * distance*distance * fluxdensity
return self.convert(luminosity, to_unit=luminositydensity_unit, from_unit=lumi_si)
# -----------------------------------------------------------------
# Private conversion facilities
# -----------------------------------------------------------------
# --- fundamental physical and astronomical constants ---
_c = 2.99792458e8 # light speed in m/s
_AU = 1.49597871e11 # astronomical unit in m
_pc = 3.08567758e16 # parsec in m
_Lsun = 3.839e26 # solar bolometric luminosity in W (without solar neutrino radiation)
_Msun = 1.9891e30 # solar mass in kg
_arcsec2 = 2.350443053909789e-11 # solid angle of 1 square arc second in steradian
# --- library providing the physical quantity corresponding to each supported unit ---
# key: unit; value: physical quantity for this unit
_quantity = { 'A': 'length', 'nm': 'length', 'micron': 'length', 'mm': 'length', 'cm': 'length',
'm': 'length', 'km': 'length', 'AU': 'length', 'kpc': 'length', 'pc': 'length', 'Mpc': 'length',
'm3': 'volume', 'AU3': 'volume', 'pc3': 'volume',
'g': 'mass', 'kg': 'mass', 'Msun': 'mass',
'W': 'luminosity', 'Lsun': 'luminosity',
'W/m': 'wavelengthluminositydensity',
'W/micron': 'wavelengthluminositydensity',
'Lsun/micron': 'wavelengthluminositydensity',
'W/Hz': 'frequencyluminositydensity',
'erg/s/Hz': 'frequencyluminositydensity',
'Lsun/Hz': 'frequencyluminositydensity',
'W/m2': 'neutralfluxdensity',
'W/m2/sr': 'neutralsurfacebrightness',
'W/m2/arcsec2': 'neutralsurfacebrightness',
'W/m3': 'wavelengthfluxdensity',
'W/m2/micron': 'wavelengthfluxdensity',
'W/m3/sr': 'wavelengthsurfacebrightness',
'W/m2/micron/sr': 'wavelengthsurfacebrightness',
'W/m2/micron/arcsec2': 'wavelengthsurfacebrightness',
'W/m2/Hz': 'frequencyfluxdensity',
'Jy': 'frequencyfluxdensity',
'mJy': 'frequencyfluxdensity',
'MJy': 'frequencyfluxdensity',
'erg/s/cm2/Hz': 'frequencyfluxdensity',
'W/m2/Hz/sr': 'frequencysurfacebrightness',
'W/m2/Hz/arcsec2': 'frequencysurfacebrightness',
'Jy/sr': 'frequencysurfacebrightness',
'Jy/arcsec2': 'frequencysurfacebrightness',
'MJy/sr': 'frequencysurfacebrightness',
'MJy/arcsec2': 'frequencysurfacebrightness'
}
# --- library providing the conversion factor to SI units for each supported unit ---
# key: unit; value: conversion factor to corresponding SI unit
_conversion = { 'A': 1e-10, 'nm': 1e-9, 'micron': 1e-6, 'mm': 1e-3, 'cm': 1e-2,
'm': 1., 'km': 1e3, 'AU': _AU, 'pc': _pc, 'kpc': 1e3*_pc, 'Mpc': 1e6*_pc,
'm3': 1., 'AU3': _AU**3, 'pc3': _pc**3,
'g': 1e-3, 'kg': 1., 'Msun': _Msun,
'W': 1., 'Lsun': _Lsun,
'W/m': 1., 'W/micron': 1e6, 'Lsun/micron': _Lsun*1e6,
'W/Hz': 1., 'Lsun/Hz': _Lsun, "erg/s/Hz": 1e-7,
'W/m2': 1.,
'W/m2/sr': 1.,
'W/m2/arcsec2': 1./_arcsec2,
'W/m3': 1.,
'W/m2/micron': 1e6,
'W/m3/sr': 1.,
'W/m2/micron/sr': 1e6,
'W/m2/micron/arcsec2': 1e6/_arcsec2,
'W/m2/Hz': 1.,
'Jy': 1e-26,
'mJy': 1e-29,
'MJy': 1e-20,
'erg/s/cm2/Hz': 1e-3,
'W/m2/Hz/sr': 1.,
'W/m2/Hz/arcsec2': 1./_arcsec2,
'Jy/sr': 1e-26,
'Jy/arcsec2': 1e-26/_arcsec2,
'MJy/sr': 1e-20,
'MJy/arcsec2': 1e-20/_arcsec2
}
# -----------------------------------------------------------------
| mit | 3,450,315,630,920,129,000 | -575,552,815,881,675,800 | 55.365854 | 119 | 0.601348 | false |
carolinux/QGIS | python/plugins/GdalTools/GdalTools.py | 4 | 20971 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : GdalTools
Description : Integrate gdal tools into qgis
Date : 17/Sep/09
copyright : (C) 2009 by Lorenzo Masini (Faunalia)
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import QObject, QCoreApplication, QSettings, QLocale, QFileInfo, QTranslator, SIGNAL
from PyQt4.QtGui import QMessageBox, QMenu, QIcon, QAction
from qgis.core import QGis
import qgis.utils
# load icons for actions
import resources_rc
# are all dependencies satisfied?
valid = True
# Import required modules
req_mods = {"osgeo": "osgeo [python-gdal]"}
try:
from osgeo import gdal
from osgeo import ogr
except ImportError as e:
valid = False
# if the plugin is shipped with QGis catch the exception and
# display an error message
import os.path
qgisUserPluginPath = qgis.utils.home_plugin_path
if not os.path.dirname(__file__).startswith(qgisUserPluginPath):
title = QCoreApplication.translate("GdalTools", "Plugin error")
message = QCoreApplication.translate("GdalTools", u'Unable to load {0} plugin. \nThe required "{1}" module is missing. \nInstall it and try again.')
import qgis.utils
QMessageBox.warning(qgis.utils.iface.mainWindow(), title, message.format("GdalTools", req_mods["osgeo"]))
else:
# if a module is missing show a more friendly module's name
error_str = e.args[0]
error_mod = error_str.replace("No module named ", "")
if error_mod in req_mods:
error_str = error_str.replace(error_mod, req_mods[error_mod])
raise ImportError(error_str)
class GdalTools:
def __init__(self, iface):
if not valid:
return
# Save reference to the QGIS interface
self.iface = iface
try:
self.QgisVersion = unicode(QGis.QGIS_VERSION_INT)
except:
self.QgisVersion = unicode(QGis.qgisVersion)[0]
if QGis.QGIS_VERSION[0:3] < "1.5":
# For i18n support
userPluginPath = qgis.utils.home_plugin_path + "/GdalTools"
systemPluginPath = qgis.utils.sys_plugin_path + "/GdalTools"
overrideLocale = QSettings().value("locale/overrideFlag", False, type=bool)
if not overrideLocale:
localeFullName = QLocale.system().name()
else:
localeFullName = QSettings().value("locale/userLocale", "", type=str)
if QFileInfo(userPluginPath).exists():
translationPath = userPluginPath + "/i18n/GdalTools_" + localeFullName + ".qm"
else:
translationPath = systemPluginPath + "/i18n/GdalTools_" + localeFullName + ".qm"
self.localePath = translationPath
if QFileInfo(self.localePath).exists():
self.translator = QTranslator()
self.translator.load(self.localePath)
QCoreApplication.installTranslator(self.translator)
# The list of actions added to menus, so we can remove them when unloading the plugin
self._menuActions = []
def initGui(self):
if not valid:
return
if int(self.QgisVersion) < 1:
QMessageBox.warning(
self.iface.getMainWindow(), "Gdal Tools",
QCoreApplication.translate("GdalTools", "QGIS version detected: ") + unicode(self.QgisVersion) + ".xx\n"
+ QCoreApplication.translate("GdalTools", "This version of Gdal Tools requires at least QGIS version 1.0.0\nPlugin will not be enabled."))
return None
from tools.GdalTools_utils import GdalConfig, LayerRegistry
self.GdalVersionNum = GdalConfig.versionNum()
LayerRegistry.setIface(self.iface)
# find the Raster menu
rasterMenu = None
menu_bar = self.iface.mainWindow().menuBar()
actions = menu_bar.actions()
rasterText = QCoreApplication.translate("QgisApp", "&Raster")
for a in actions:
if a.menu() is not None and a.menu().title() == rasterText:
rasterMenu = a.menu()
break
if rasterMenu is None:
# no Raster menu, create and insert it before the Help menu
self.menu = QMenu(rasterText, self.iface.mainWindow())
lastAction = actions[len(actions) - 1]
menu_bar.insertMenu(lastAction, self.menu)
else:
self.menu = rasterMenu
self._menuActions.append(self.menu.addSeparator())
# projections menu (Warp (Reproject), Assign projection)
self.projectionsMenu = QMenu(QCoreApplication.translate("GdalTools", "Projections"), self.iface.mainWindow())
self.projectionsMenu.setObjectName("projectionsMenu")
self.warp = QAction(QIcon(":/icons/warp.png"), QCoreApplication.translate("GdalTools", "Warp (Reproject)..."), self.iface.mainWindow())
self.warp.setObjectName("warp")
self.warp.setStatusTip(QCoreApplication.translate("GdalTools", "Warp an image into a new coordinate system"))
QObject.connect(self.warp, SIGNAL("triggered()"), self.doWarp)
self.projection = QAction(QIcon(":icons/projection-add.png"), QCoreApplication.translate("GdalTools", "Assign Projection..."), self.iface.mainWindow())
self.projection.setObjectName("projection")
self.projection.setStatusTip(QCoreApplication.translate("GdalTools", "Add projection info to the raster"))
QObject.connect(self.projection, SIGNAL("triggered()"), self.doProjection)
self.extractProj = QAction(QIcon(":icons/projection-export.png"), QCoreApplication.translate("GdalTools", "Extract Projection..."), self.iface.mainWindow())
self.extractProj.setObjectName("extractProj")
self.extractProj.setStatusTip(QCoreApplication.translate("GdalTools", "Extract projection information from raster(s)"))
QObject.connect(self.extractProj, SIGNAL("triggered()"), self.doExtractProj)
self.projectionsMenu.addActions([self.warp, self.projection, self.extractProj])
# conversion menu (Rasterize (Vector to raster), Polygonize (Raster to vector), Translate, RGB to PCT, PCT to RGB)
self.conversionMenu = QMenu(QCoreApplication.translate("GdalTools", "Conversion"), self.iface.mainWindow())
self.conversionMenu.setObjectName("conversionMenu")
if self.GdalVersionNum >= 1300:
self.rasterize = QAction(QIcon(":/icons/rasterize.png"), QCoreApplication.translate("GdalTools", "Rasterize (Vector to Raster)..."), self.iface.mainWindow())
self.rasterize.setObjectName("rasterize")
self.rasterize.setStatusTip(QCoreApplication.translate("GdalTools", "Burns vector geometries into a raster"))
QObject.connect(self.rasterize, SIGNAL("triggered()"), self.doRasterize)
self.conversionMenu.addAction(self.rasterize)
if self.GdalVersionNum >= 1600:
self.polygonize = QAction(QIcon(":/icons/polygonize.png"), QCoreApplication.translate("GdalTools", "Polygonize (Raster to Vector)..."), self.iface.mainWindow())
self.polygonize.setObjectName("polygonize")
self.polygonize.setStatusTip(QCoreApplication.translate("GdalTools", "Produces a polygon feature layer from a raster"))
QObject.connect(self.polygonize, SIGNAL("triggered()"), self.doPolygonize)
self.conversionMenu.addAction(self.polygonize)
self.translate = QAction(QIcon(":/icons/translate.png"), QCoreApplication.translate("GdalTools", "Translate (Convert Format)..."), self.iface.mainWindow())
self.translate.setObjectName("translate")
self.translate.setStatusTip(QCoreApplication.translate("GdalTools", "Converts raster data between different formats"))
QObject.connect(self.translate, SIGNAL("triggered()"), self.doTranslate)
self.paletted = QAction(QIcon(":icons/24-to-8-bits.png"), QCoreApplication.translate("GdalTools", "RGB to PCT..."), self.iface.mainWindow())
self.paletted.setObjectName("paletted")
self.paletted.setStatusTip(QCoreApplication.translate("GdalTools", "Convert a 24bit RGB image to 8bit paletted"))
QObject.connect(self.paletted, SIGNAL("triggered()"), self.doPaletted)
self.rgb = QAction(QIcon(":icons/8-to-24-bits.png"), QCoreApplication.translate("GdalTools", "PCT to RGB..."), self.iface.mainWindow())
self.rgb.setObjectName("rgb")
self.rgb.setStatusTip(QCoreApplication.translate("GdalTools", "Convert an 8bit paletted image to 24bit RGB"))
QObject.connect(self.rgb, SIGNAL("triggered()"), self.doRGB)
self.conversionMenu.addActions([self.translate, self.paletted, self.rgb])
# extraction menu (Clipper, Contour)
self.extractionMenu = QMenu(QCoreApplication.translate("GdalTools", "Extraction"), self.iface.mainWindow())
self.extractionMenu.setObjectName("extractionMenu")
if self.GdalVersionNum >= 1600:
self.contour = QAction(QIcon(":/icons/contour.png"), QCoreApplication.translate("GdalTools", "Contour..."), self.iface.mainWindow())
self.contour.setObjectName("contour")
self.contour.setStatusTip(QCoreApplication.translate("GdalTools", "Builds vector contour lines from a DEM"))
QObject.connect(self.contour, SIGNAL("triggered()"), self.doContour)
self.extractionMenu.addAction(self.contour)
self.clipper = QAction(QIcon(":icons/raster-clip.png"), QCoreApplication.translate("GdalTools", "Clipper..."), self.iface.mainWindow())
self.clipper.setObjectName("clipper")
#self.clipper.setStatusTip( QCoreApplication.translate( "GdalTools", "Converts raster data between different formats") )
QObject.connect(self.clipper, SIGNAL("triggered()"), self.doClipper)
self.extractionMenu.addActions([self.clipper])
# analysis menu (DEM (Terrain model), Grid (Interpolation), Near black, Proximity (Raster distance), Sieve)
self.analysisMenu = QMenu(QCoreApplication.translate("GdalTools", "Analysis"), self.iface.mainWindow())
self.analysisMenu.setObjectName("analysisMenu")
if self.GdalVersionNum >= 1600:
self.sieve = QAction(QIcon(":/icons/sieve.png"), QCoreApplication.translate("GdalTools", "Sieve..."), self.iface.mainWindow())
self.sieve.setObjectName("sieve")
self.sieve.setStatusTip(QCoreApplication.translate("GdalTools", "Removes small raster polygons"))
QObject.connect(self.sieve, SIGNAL("triggered()"), self.doSieve)
self.analysisMenu.addAction(self.sieve)
if self.GdalVersionNum >= 1500:
self.nearBlack = QAction(QIcon(":/icons/nearblack.png"), QCoreApplication.translate("GdalTools", "Near Black..."), self.iface.mainWindow())
self.nearBlack.setObjectName("nearBlack")
self.nearBlack.setStatusTip(QCoreApplication.translate("GdalTools", "Convert nearly black/white borders to exact value"))
QObject.connect(self.nearBlack, SIGNAL("triggered()"), self.doNearBlack)
self.analysisMenu.addAction(self.nearBlack)
if self.GdalVersionNum >= 1700:
self.fillNodata = QAction(QIcon(":/icons/fillnodata.png"), QCoreApplication.translate("GdalTools", "Fill nodata..."), self.iface.mainWindow())
self.fillNodata.setObjectName("fillNodata")
self.fillNodata.setStatusTip(QCoreApplication.translate("GdalTools", "Fill raster regions by interpolation from edges"))
QObject.connect(self.fillNodata, SIGNAL("triggered()"), self.doFillNodata)
self.analysisMenu.addAction(self.fillNodata)
if self.GdalVersionNum >= 1600:
self.proximity = QAction(QIcon(":/icons/proximity.png"), QCoreApplication.translate("GdalTools", "Proximity (Raster Distance)..."), self.iface.mainWindow())
self.proximity.setObjectName("proximity")
self.proximity.setStatusTip(QCoreApplication.translate("GdalTools", "Produces a raster proximity map"))
QObject.connect(self.proximity, SIGNAL("triggered()"), self.doProximity)
self.analysisMenu.addAction(self.proximity)
if self.GdalVersionNum >= 1500:
self.grid = QAction(QIcon(":/icons/grid.png"), QCoreApplication.translate("GdalTools", "Grid (Interpolation)..."), self.iface.mainWindow())
self.grid.setObjectName("grid")
self.grid.setStatusTip(QCoreApplication.translate("GdalTools", "Create raster from the scattered data"))
QObject.connect(self.grid, SIGNAL("triggered()"), self.doGrid)
self.analysisMenu.addAction(self.grid)
if self.GdalVersionNum >= 1700:
self.dem = QAction(QIcon(":icons/dem.png"), QCoreApplication.translate("GdalTools", "DEM (Terrain Models)..."), self.iface.mainWindow())
self.dem.setObjectName("dem")
self.dem.setStatusTip(QCoreApplication.translate("GdalTools", "Tool to analyze and visualize DEMs"))
QObject.connect(self.dem, SIGNAL("triggered()"), self.doDEM)
self.analysisMenu.addAction(self.dem)
#self.analysisMenu.addActions( [ ] )
# miscellaneous menu (Build overviews (Pyramids), Tile index, Information, Merge, Build Virtual Raster (Catalog))
self.miscellaneousMenu = QMenu(QCoreApplication.translate("GdalTools", "Miscellaneous"), self.iface.mainWindow())
self.miscellaneousMenu.setObjectName("miscellaneousMenu")
if self.GdalVersionNum >= 1600:
self.buildVRT = QAction(QIcon(":/icons/vrt.png"), QCoreApplication.translate("GdalTools", "Build Virtual Raster (Catalog)..."), self.iface.mainWindow())
self.buildVRT.setObjectName("buildVRT")
self.buildVRT.setStatusTip(QCoreApplication.translate("GdalTools", "Builds a VRT from a list of datasets"))
QObject.connect(self.buildVRT, SIGNAL("triggered()"), self.doBuildVRT)
self.miscellaneousMenu.addAction(self.buildVRT)
self.merge = QAction(QIcon(":/icons/merge.png"), QCoreApplication.translate("GdalTools", "Merge..."), self.iface.mainWindow())
self.merge.setObjectName("merge")
self.merge.setStatusTip(QCoreApplication.translate("GdalTools", "Build a quick mosaic from a set of images"))
QObject.connect(self.merge, SIGNAL("triggered()"), self.doMerge)
self.info = QAction(QIcon(":/icons/raster-info.png"), QCoreApplication.translate("GdalTools", "Information..."), self.iface.mainWindow())
self.info.setObjectName("info")
self.info.setStatusTip(QCoreApplication.translate("GdalTools", "Lists information about raster dataset"))
QObject.connect(self.info, SIGNAL("triggered()"), self.doInfo)
self.overview = QAction(QIcon(":icons/raster-overview.png"), QCoreApplication.translate("GdalTools", "Build Overviews (Pyramids)..."), self.iface.mainWindow())
self.overview.setObjectName("overview")
self.overview.setStatusTip(QCoreApplication.translate("GdalTools", "Builds or rebuilds overview images"))
QObject.connect(self.overview, SIGNAL("triggered()"), self.doOverview)
self.tileindex = QAction(QIcon(":icons/tiles.png"), QCoreApplication.translate("GdalTools", "Tile Index..."), self.iface.mainWindow())
self.tileindex.setObjectName("tileindex")
self.tileindex.setStatusTip(QCoreApplication.translate("GdalTools", "Build a shapefile as a raster tileindex"))
QObject.connect(self.tileindex, SIGNAL("triggered()"), self.doTileIndex)
self.miscellaneousMenu.addActions([self.merge, self.info, self.overview, self.tileindex])
self._menuActions.append(self.menu.addMenu(self.projectionsMenu))
self._menuActions.append(self.menu.addMenu(self.conversionMenu))
self._menuActions.append(self.menu.addMenu(self.extractionMenu))
if not self.analysisMenu.isEmpty():
self._menuActions.append(self.menu.addMenu(self.analysisMenu))
self._menuActions.append(self.menu.addMenu(self.miscellaneousMenu))
self.settings = QAction(QCoreApplication.translate("GdalTools", "GdalTools Settings..."), self.iface.mainWindow())
self.settings.setObjectName("settings")
self.settings.setStatusTip(QCoreApplication.translate("GdalTools", "Various settings for Gdal Tools"))
QObject.connect(self.settings, SIGNAL("triggered()"), self.doSettings)
self.menu.addAction(self.settings)
self._menuActions.append(self.settings)
def unload(self):
if not valid:
return
for a in self._menuActions:
self.menu.removeAction(a)
def doBuildVRT(self):
from tools.doBuildVRT import GdalToolsDialog as BuildVRT
d = BuildVRT(self.iface)
self.runToolDialog(d)
def doContour(self):
from tools.doContour import GdalToolsDialog as Contour
d = Contour(self.iface)
self.runToolDialog(d)
def doRasterize(self):
from tools.doRasterize import GdalToolsDialog as Rasterize
d = Rasterize(self.iface)
self.runToolDialog(d)
def doPolygonize(self):
from tools.doPolygonize import GdalToolsDialog as Polygonize
d = Polygonize(self.iface)
self.runToolDialog(d)
def doMerge(self):
from tools.doMerge import GdalToolsDialog as Merge
d = Merge(self.iface)
self.runToolDialog(d)
def doSieve(self):
from tools.doSieve import GdalToolsDialog as Sieve
d = Sieve(self.iface)
self.runToolDialog(d)
def doProximity(self):
from tools.doProximity import GdalToolsDialog as Proximity
d = Proximity(self.iface)
self.runToolDialog(d)
def doNearBlack(self):
from tools.doNearBlack import GdalToolsDialog as NearBlack
d = NearBlack(self.iface)
self.runToolDialog(d)
def doFillNodata(self):
from tools.doFillNodata import GdalToolsDialog as FillNodata
d = FillNodata(self.iface)
self.runToolDialog(d)
def doWarp(self):
from tools.doWarp import GdalToolsDialog as Warp
d = Warp(self.iface)
self.runToolDialog(d)
def doGrid(self):
from tools.doGrid import GdalToolsDialog as Grid
d = Grid(self.iface)
self.runToolDialog(d)
def doTranslate(self):
from tools.doTranslate import GdalToolsDialog as Translate
d = Translate(self.iface)
self.runToolDialog(d)
def doInfo(self):
from tools.doInfo import GdalToolsDialog as Info
d = Info(self.iface)
self.runToolDialog(d)
def doProjection(self):
from tools.doProjection import GdalToolsDialog as Projection
d = Projection(self.iface)
self.runToolDialog(d)
def doOverview(self):
from tools.doOverview import GdalToolsDialog as Overview
d = Overview(self.iface)
self.runToolDialog(d)
def doClipper(self):
from tools.doClipper import GdalToolsDialog as Clipper
d = Clipper(self.iface)
self.runToolDialog(d)
def doPaletted(self):
from tools.doRgbPct import GdalToolsDialog as RgbPct
d = RgbPct(self.iface)
self.runToolDialog(d)
def doRGB(self):
from tools.doPctRgb import GdalToolsDialog as PctRgb
d = PctRgb(self.iface)
self.runToolDialog(d)
def doTileIndex(self):
from tools.doTileIndex import GdalToolsDialog as TileIndex
d = TileIndex(self.iface)
self.runToolDialog(d)
def doExtractProj(self):
from tools.doExtractProj import GdalToolsDialog as ExtractProj
d = ExtractProj(self.iface)
d.exec_()
def doDEM(self):
from tools.doDEM import GdalToolsDialog as DEM
d = DEM(self.iface)
self.runToolDialog(d)
def runToolDialog(self, dlg):
dlg.show_()
dlg.exec_()
del dlg
def doSettings(self):
from tools.doSettings import GdalToolsSettingsDialog as Settings
d = Settings(self.iface)
d.exec_()
| gpl-2.0 | 2,297,035,724,125,341,200 | 4,762,620,343,519,442,000 | 48.812352 | 172 | 0.657336 | false |
semonte/intellij-community | plugins/hg4idea/testData/bin/mercurial/hbisect.py | 92 | 9226 | # changelog bisection for mercurial
#
# Copyright 2007 Matt Mackall
# Copyright 2005, 2006 Benoit Boissinot <[email protected]>
#
# Inspired by git bisect, extension skeleton taken from mq.py.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, error
from i18n import _
from node import short, hex
import util
def bisect(changelog, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good) tuple.
'nodes' is the final result of the bisect if 'number' is 0.
Otherwise 'number' indicates the remaining possible candidates for
the search and 'nodes' contains the next bisect target.
'good' is True if bisect is searching for a first good changeset, False
if searching for a first bad one.
"""
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state['skip']])
def buildancestors(bad, good):
# only the earliest bad revision matters
badrev = min([changelog.rev(n) for n in bad])
goodrevs = [changelog.rev(n) for n in good]
goodrev = min(goodrevs)
# build visit array
ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
# set nodes descended from goodrevs
for rev in goodrevs:
ancestors[rev] = []
for rev in changelog.revs(goodrev + 1):
for prev in clparents(rev):
if ancestors[prev] == []:
ancestors[rev] = []
# clear good revs from array
for rev in goodrevs:
ancestors[rev] = None
for rev in changelog.revs(len(changelog), goodrev):
if ancestors[rev] is None:
for prev in clparents(rev):
ancestors[prev] = None
if ancestors[badrev] is None:
return badrev, None
return badrev, ancestors
good = False
badrev, ancestors = buildancestors(state['bad'], state['good'])
if not ancestors: # looking for bad to good transition?
good = True
badrev, ancestors = buildancestors(state['good'], state['bad'])
bad = changelog.node(badrev)
if not ancestors: # now we're confused
if len(state['bad']) == 1 and len(state['good']) == 1:
raise util.Abort(_("starting revisions are not directly related"))
raise util.Abort(_("inconsistent state, %s:%s is good and bad")
% (badrev, short(bad)))
# build children dict
children = {}
visit = util.deque([badrev])
candidates = []
while visit:
rev = visit.popleft()
if ancestors[rev] == []:
candidates.append(rev)
for prev in clparents(rev):
if prev != -1:
if prev in children:
children[prev].append(rev)
else:
children[prev] = [rev]
visit.append(prev)
candidates.sort()
# have we narrowed it down to one entry?
# or have all other possible candidates besides 'bad' have been skipped?
tot = len(candidates)
unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
if tot == 1 or not unskipped:
return ([changelog.node(rev) for rev in candidates], 0, good)
perfect = tot // 2
# find the best node to test
best_rev = None
best_len = -1
poison = set()
for rev in candidates:
if rev in poison:
# poison children
poison.update(children.get(rev, []))
continue
a = ancestors[rev] or [rev]
ancestors[rev] = None
x = len(a) # number of ancestors
y = tot - x # number of non-ancestors
value = min(x, y) # how good is this test?
if value > best_len and rev not in skip:
best_len = value
best_rev = rev
if value == perfect: # found a perfect candidate? quit early
break
if y < perfect and rev not in skip: # all downhill from here?
# poison children
poison.update(children.get(rev, []))
continue
for c in children.get(rev, []):
if ancestors[c]:
ancestors[c] = list(set(ancestors[c] + a))
else:
ancestors[c] = a + [c]
assert best_rev is not None
best_node = changelog.node(best_rev)
return ([best_node], tot, good)
def load_state(repo):
state = {'current': [], 'good': [], 'bad': [], 'skip': []}
if os.path.exists(repo.join("bisect.state")):
for l in repo.opener("bisect.state"):
kind, node = l[:-1].split()
node = repo.lookup(node)
if kind not in state:
raise util.Abort(_("unknown bisect kind %s") % kind)
state[kind].append(node)
return state
def save_state(repo, state):
f = repo.opener("bisect.state", "w", atomictemp=True)
wlock = repo.wlock()
try:
for kind in sorted(state):
for node in state[kind]:
f.write("%s %s\n" % (kind, hex(node)))
f.close()
finally:
wlock.release()
def get(repo, status):
"""
Return a list of revision(s) that match the given status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
state = load_state(repo)
if status in ('good', 'bad', 'skip', 'current'):
return map(repo.changelog.rev, state[status])
else:
# In the following sets, we do *not* call 'bisect()' with more
# than one level of recursion, because that can be very, very
# time consuming. Instead, we always develop the expression as
# much as possible.
# 'range' is all csets that make the bisection:
# - have a good ancestor and a bad descendant, or conversely
# that's because the bisection can go either way
range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
_t = repo.revs('bisect(good)::bisect(bad)')
# The sets of topologically good or bad csets
if len(_t) == 0:
# Goods are topologically after bads
goods = 'bisect(good)::' # Pruned good csets
bads = '::bisect(bad)' # Pruned bad csets
else:
# Goods are topologically before bads
goods = '::bisect(good)' # Pruned good csets
bads = 'bisect(bad)::' # Pruned bad csets
# 'pruned' is all csets whose fate is already known: good, bad, skip
skips = 'bisect(skip)' # Pruned skipped csets
pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
# 'untested' is all cset that are- in 'range', but not in 'pruned'
untested = '( (%s) - (%s) )' % (range, pruned)
# 'ignored' is all csets that were not used during the bisection
# due to DAG topology, but may however have had an impact.
# E.g., a branch merged between bads and goods, but whose branch-
# point is out-side of the range.
iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
if status == 'range':
return repo.revs(range)
elif status == 'pruned':
return repo.revs(pruned)
elif status == 'untested':
return repo.revs(untested)
elif status == 'ignored':
return repo.revs(ignored)
elif status == "goods":
return repo.revs(goods)
elif status == "bads":
return repo.revs(bads)
else:
raise error.ParseError(_('invalid bisect state'))
def label(repo, node):
rev = repo.changelog.rev(node)
# Try explicit sets
if rev in get(repo, 'good'):
# i18n: bisect changeset status
return _('good')
if rev in get(repo, 'bad'):
# i18n: bisect changeset status
return _('bad')
if rev in get(repo, 'skip'):
# i18n: bisect changeset status
return _('skipped')
if rev in get(repo, 'untested') or rev in get(repo, 'current'):
# i18n: bisect changeset status
return _('untested')
if rev in get(repo, 'ignored'):
# i18n: bisect changeset status
return _('ignored')
# Try implicit sets
if rev in get(repo, 'goods'):
# i18n: bisect changeset status
return _('good (implicit)')
if rev in get(repo, 'bads'):
# i18n: bisect changeset status
return _('bad (implicit)')
return None
def shortlabel(label):
if label:
return label[0].upper()
return None
| apache-2.0 | 2,303,832,471,518,643,700 | 4,313,668,374,131,919,000 | 34.75969 | 78 | 0.565142 | false |
pubnub/Zopkio | test/samples/sample_input.py | 4 | 1081 | # Copyright 2014 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test = {
"deployment_code": "test/samples/sample_deployment.py",
"test_code": [
"test/samples/sample_test1.py",
"test/samples/sample_test2.py"
],
"perf_code": "test/samples/sample_perf.py",
"configs_directory": "test/samples/sample_configs"
}
| apache-2.0 | 5,736,512,213,290,893,000 | 5,705,598,462,672,772,000 | 37.607143 | 62 | 0.745606 | false |
dfdx2/django | django/core/files/images.py | 56 | 2377 | """
Utility functions for handling images.
Requires Pillow as you might imagine.
"""
import struct
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
@property
def width(self):
return self._get_image_dimensions()[0]
@property
def height(self):
return self._get_image_dimensions()[1]
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Return the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos)
| bsd-3-clause | 8,651,487,321,218,476,000 | 2,733,890,275,597,076,500 | 28.7125 | 78 | 0.561212 | false |
ludbb/secp256k1-py | tests/test_schnorr.py | 1 | 1732 | import pytest
import secp256k1
def test_schnorr_simple():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
inst = secp256k1.PrivateKey()
raw_sig = inst.schnorr_sign(b'hello')
assert inst.pubkey.schnorr_verify(b'hello', raw_sig)
key2 = secp256k1.PrivateKey()
assert not key2.pubkey.schnorr_verify(b'hello', raw_sig)
blank = secp256k1.PublicKey()
pubkey = blank.schnorr_recover(b'hello', raw_sig)
pub = secp256k1.PublicKey(pubkey)
assert pub.serialize() == inst.pubkey.serialize()
def test_schnorr_partial():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
signer1 = secp256k1.PrivateKey()
pubnonce1, privnonce1 = signer1.schnorr_generate_nonce_pair(b'hello')
signer2 = secp256k1.PrivateKey()
pubnonce2, privnonce2 = signer2.schnorr_generate_nonce_pair(b'hello')
# First test partial signatures with only two signers.
partial1 = signer1.schnorr_partial_sign(b'hello', privnonce1, pubnonce2)
partial2 = signer2.schnorr_partial_sign(b'hello', privnonce2, pubnonce1)
blank = secp256k1.PublicKey(flags=secp256k1.NO_FLAGS)
sig = blank.schnorr_partial_combine([partial1, partial2])
# Recover the public key from the combined signature.
pubkey = secp256k1.PublicKey().schnorr_recover(b'hello', sig)
assert blank.public_key is None
# Check that the combined public keys from signer1 and signer2
# match the recovered public key.
blank.combine(
[signer1.pubkey.public_key, signer2.pubkey.public_key])
assert blank.public_key
assert secp256k1.PublicKey(pubkey).serialize() == blank.serialize()
| mit | 2,502,636,973,299,470,000 | -1,223,224,611,708,282,400 | 35.083333 | 76 | 0.711894 | false |
zeurocoin-dev/zeurocoin | qa/rpc-tests/prioritise_transaction.py | 45 | 5993 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in xrange(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in xrange(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print "Assert that prioritised transaction was mined"
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print "Assert that de-prioritised transaction is still in mempool"
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print "Assert that prioritised free transaction is accepted to mempool"
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| mit | 926,844,007,069,559,900 | 6,872,529,726,693,243,000 | 40.909091 | 128 | 0.632071 | false |
manran/django-allauth | allauth/socialaccount/models.py | 35 | 11682 | from __future__ import absolute_import
from django.core.exceptions import PermissionDenied
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
import allauth.app_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import get_next_redirect_url, setup_user_email
from allauth.utils import (get_user_model, get_current_site,
serialize_instance, deserialize_instance)
from . import app_settings
from . import providers
from .fields import JSONField
from ..utils import get_request_param
class SocialAppManager(models.Manager):
def get_current(self, provider, request=None):
site = get_current_site(request)
return self.get(sites__id=site.id,
provider=provider)
@python_2_unicode_compatible
class SocialApp(models.Model):
objects = SocialAppManager()
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
name = models.CharField(verbose_name=_('name'),
max_length=40)
client_id = models.CharField(verbose_name=_('client id'),
max_length=100,
help_text=_('App ID, or consumer key'))
secret = models.CharField(verbose_name=_('secret key'),
max_length=100,
help_text=_('API secret, client secret, or'
' consumer secret'))
key = models.CharField(verbose_name=_('key'),
max_length=100,
blank=True,
help_text=_('Key'))
# Most apps can be used across multiple domains, therefore we use
# a ManyToManyField. Note that Facebook requires an app per domain
# (unless the domains share a common base name).
# blank=True allows for disabling apps without removing them
sites = models.ManyToManyField(Site, blank=True)
class Meta:
verbose_name = _('social application')
verbose_name_plural = _('social applications')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SocialAccount(models.Model):
user = models.ForeignKey(allauth.app_settings.USER_MODEL)
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 255
# for URLField. How about models.TextField(unique=True) then?
# Well, that won't work either for MySQL due to another bug[1]. So
# the only way out would be to drop the unique constraint, or
# switch to shorter identity URLs. Opted for the latter, as [2]
# suggests that identity URLs are supposed to be short anyway, at
# least for the old spec.
#
# [1] http://code.djangoproject.com/ticket/2495.
# [2] http://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(verbose_name=_('uid'), max_length=255)
last_login = models.DateTimeField(verbose_name=_('last login'),
auto_now=True)
date_joined = models.DateTimeField(verbose_name=_('date joined'),
auto_now_add=True)
extra_data = JSONField(verbose_name=_('extra data'), default='{}')
class Meta:
unique_together = ('provider', 'uid')
verbose_name = _('social account')
verbose_name_plural = _('social accounts')
def authenticate(self):
return authenticate(account=self)
def __str__(self):
return force_text(self.user)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self):
return providers.registry.by_id(self.provider)
def get_provider_account(self):
return self.get_provider().wrap_account(self)
@python_2_unicode_compatible
class SocialToken(models.Model):
app = models.ForeignKey(SocialApp)
account = models.ForeignKey(SocialAccount)
token = models.TextField(
verbose_name=_('token'),
help_text=_(
'"oauth_token" (OAuth1) or access token (OAuth2)'))
token_secret = models.TextField(
blank=True,
verbose_name=_('token secret'),
help_text=_(
'"oauth_token_secret" (OAuth1) or refresh token (OAuth2)'))
expires_at = models.DateTimeField(blank=True, null=True,
verbose_name=_('expires at'))
class Meta:
unique_together = ('app', 'account')
verbose_name = _('social application token')
verbose_name_plural = _('social application tokens')
def __str__(self):
return self.token
class SocialLogin(object):
"""
Represents a social user that is in the process of being logged
in. This consists of the following information:
`account` (`SocialAccount` instance): The social account being
logged in. Providers are not responsible for checking whether or
not an account already exists or not. Therefore, a provider
typically creates a new (unsaved) `SocialAccount` instance. The
`User` instance pointed to by the account (`account.user`) may be
prefilled by the provider for use as a starting point later on
during the signup process.
`token` (`SocialToken` instance): An optional access token token
that results from performing a successful authentication
handshake.
`state` (`dict`): The state to be preserved during the
authentication handshake. Note that this state may end up in the
url -- do not put any secrets in here. It currently only contains
the url to redirect to after login.
`email_addresses` (list of `EmailAddress`): Optional list of
e-mail addresses retrieved from the provider.
"""
def __init__(self, user=None, account=None, token=None,
email_addresses=[]):
if token:
assert token.account is None or token.account == account
self.token = token
self.user = user
self.account = account
self.email_addresses = email_addresses
self.state = {}
def connect(self, request, user):
self.user = user
self.save(request, connect=True)
def serialize(self):
ret = dict(account=serialize_instance(self.account),
user=serialize_instance(self.user),
state=self.state,
email_addresses=[serialize_instance(ea)
for ea in self.email_addresses])
if self.token:
ret['token'] = serialize_instance(self.token)
return ret
@classmethod
def deserialize(cls, data):
account = deserialize_instance(SocialAccount, data['account'])
user = deserialize_instance(get_user_model(), data['user'])
if 'token' in data:
token = deserialize_instance(SocialToken, data['token'])
else:
token = None
email_addresses = []
for ea in data['email_addresses']:
email_address = deserialize_instance(EmailAddress, ea)
email_addresses.append(email_address)
ret = SocialLogin()
ret.token = token
ret.account = account
ret.user = user
ret.email_addresses = email_addresses
ret.state = data['state']
return ret
def save(self, request, connect=False):
"""
Saves a new account. Note that while the account is new,
the user may be an existing one (when connecting accounts)
"""
assert not self.is_existing
user = self.user
user.save()
self.account.user = user
self.account.save()
if app_settings.STORE_TOKENS and self.token:
self.token.account = self.account
self.token.save()
if connect:
# TODO: Add any new email addresses automatically?
pass
else:
setup_user_email(request, user, self.email_addresses)
@property
def is_existing(self):
"""
Account is temporary, not yet backed by a database record.
"""
return self.account.pk
def lookup(self):
"""
Lookup existing account, if any.
"""
assert not self.is_existing
try:
a = SocialAccount.objects.get(provider=self.account.provider,
uid=self.account.uid)
# Update account
a.extra_data = self.account.extra_data
self.account = a
self.user = self.account.user
a.save()
# Update token
if app_settings.STORE_TOKENS and self.token:
assert not self.token.pk
try:
t = SocialToken.objects.get(account=self.account,
app=self.token.app)
t.token = self.token.token
if self.token.token_secret:
# only update the refresh token if we got one
# many oauth2 providers do not resend the refresh token
t.token_secret = self.token.token_secret
t.expires_at = self.token.expires_at
t.save()
self.token = t
except SocialToken.DoesNotExist:
self.token.account = a
self.token.save()
except SocialAccount.DoesNotExist:
pass
def get_redirect_url(self, request):
url = self.state.get('next')
return url
@classmethod
def state_from_request(cls, request):
state = {}
next_url = get_next_redirect_url(request)
if next_url:
state['next'] = next_url
state['process'] = get_request_param(request, 'process', 'login')
state['scope'] = get_request_param(request, 'scope', '')
state['auth_params'] = get_request_param(request, 'auth_params', '')
return state
@classmethod
def stash_state(cls, request):
state = cls.state_from_request(request)
verifier = get_random_string()
request.session['socialaccount_state'] = (state, verifier)
return verifier
@classmethod
def unstash_state(cls, request):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier = request.session.pop('socialaccount_state')
return state
@classmethod
def verify_and_unstash_state(cls, request, verifier):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier2 = request.session.pop('socialaccount_state')
if verifier != verifier2:
raise PermissionDenied()
return state
| mit | 8,024,126,152,662,901,000 | 5,904,124,247,760,913,000 | 36.683871 | 79 | 0.603921 | false |
aliyun/oss-ftp | python27/win32/Lib/pickle.py | 35 | 45163 | """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 72223 $" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid is not None:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType is UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if len(rep) < 2 or not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in state.iteritems():
d[intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| mit | 1,041,480,889,807,584,800 | 5,903,231,207,270,386,000 | 31.468009 | 80 | 0.546089 | false |
Youwotma/portia | slybot/slybot/pageactions.py | 1 | 1528 | import json
import re
LUA_SOURCE = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:runjs(splash.args.js_source))
assert(splash:wait_for_resume(splash.args.slybot_actions_source))
splash:set_result_content_type("text/html")
return splash.html()
end
"""
JS_SOURCE = """
function main(splash) {
var events = (%s);
try{
__slybot__performEvents(events, function(){
splash.resume();
});
}catch(e){
splash.error(e);
}
}
"""
def filter_for_url(url):
def _filter(page_action):
accept = page_action.get('accept')
reject = page_action.get('reject')
if reject and re.search(reject, url):
return False
if accept and not re.search(accept, url):
return False
return True
return _filter
class PageActionsMiddleware(object):
def process_request(self, request, spider):
splash_options = request.meta.get('splash', None)
if not splash_options: # Already processed or JS disabled
return
splash_args = splash_options.get('args', {})
events = spider.page_actions
url = splash_args['url']
events = filter(filter_for_url(url), events)
if len(events):
splash_options['endpoint'] = 'execute'
splash_args.update({
"lua_source": LUA_SOURCE,
"slybot_actions_source": (JS_SOURCE % json.dumps(events)),
})
__all__ = ['PageActionsMiddleware']
| bsd-3-clause | -1,383,190,662,352,601,300 | -5,893,044,056,192,630,000 | 26.781818 | 74 | 0.590314 | false |
prospwro/odoo | addons/mrp_byproduct/mrp_byproduct.py | 108 | 8932 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
qty2 = production.product_uos and production.product_uos_qty or False
product_uos_factor = 0.0
if qty2 and production.bom_id.product_uos.id:
product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id)
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
if production.product_uos_qty:
qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'product_uos_qty': qty2,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,477,407,298,728,196,000 | -5,482,568,783,677,347,000 | 53.463415 | 218 | 0.620242 | false |
Erotemic/utool | utool/util_decor.py | 1 | 35459 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import builtins
import inspect
import textwrap
import six
import sys
import functools
import os
from utool import util_print
from utool import util_time
from utool import util_iter
from utool import util_dbg
from utool import util_arg
from utool import util_type
from utool import util_inject
from utool._internal import meta_util_six
(print, rrr, profile) = util_inject.inject2(__name__, '[decor]')
if util_type.HAVE_NUMPY:
import numpy as np
# Commandline to toggle certain convinience decorators
SIG_PRESERVE = util_arg.get_argflag('--sigpreserve')
#SIG_PRESERVE = not util_arg.SAFE or util_arg.get_argflag('--sigpreserve')
ONEX_REPORT_INPUT = '--onex-report-input' in sys.argv
#IGNORE_TRACEBACK = '--smalltb' in sys.argv or '--ignoretb' in sys.argv
# FIXME: dupliated in _internal/py2_syntax_funcs
IGNORE_TRACEBACK = not ('--nosmalltb' in sys.argv or '--noignoretb' in sys.argv)
#if util_arg.STRICT:
# IGNORE_TRACEBACK = False
# do not ignore traceback when profiling
PROFILING = hasattr(builtins, 'profile')
UNIQUE_NUMPY = True
NOINDENT_DECOR = False
#os.environ.get('UTOOL_AUTOGEN_SPHINX_RUNNING', 'OFF')
#def composed(*decs):
# """ combines multiple decorators """
# def deco(f):
# for dec in reversed(decs):
# f = dec(f)
# return f
# return deco
def test_ignore_exec_traceback():
r"""
CommandLine:
python -m utool.util_decor --test-test_ignore_exec_traceback
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> result = test_ignore_exec_traceback()
>>> print(result)
"""
import utool as ut
@ut.indent_func
def foobar():
print('foobar')
raise AssertionError('This error is exepcted')
try:
print('printing foobar')
foobar()
except AssertionError as ex:
#import sys
#exc_type, exc_value, exc_traceback = sys.exc_info()
#print(exc_traceback)
# TODO: ensure decorators are not printed in stack trace
ut.printex(ex, 'There is no error. This is a test', tb=True)
if six.PY2:
# Use version that has special python2 only syntax.
# can not include it here for that reason
from utool._internal import py2_syntax_funcs
ignores_exc_tb = py2_syntax_funcs.ignores_exc_tb
else:
def ignores_exc_tb(*args, **kwargs):
"""
PYTHON 3 VERSION
ignore_exc_tb decorates a function and remove both itself
and the function from any exception traceback that occurs.
This is useful to decorate other trivial decorators
which are polluting your stacktrace.
if IGNORE_TRACEBACK is False then this decorator does nothing
(and it should do nothing in production code!)
References:
https://github.com/jcrocholl/pep8/issues/34 # NOQA
http://legacy.python.org/dev/peps/pep-3109/
"""
outer_wrapper = kwargs.get('outer_wrapper', True)
def ignores_exc_tb_closure(func):
# HACK JUST TURN THIS OFF
return func
if not IGNORE_TRACEBACK:
# if the global enforces that we should not ignore anytracebacks
# then just return the original function without any modifcation
return func
#@wraps(func)
def wrp_noexectb(*args, **kwargs):
try:
#import utool
#if utool.DEBUG:
# print('[IN IGNORETB] args=%r' % (args,))
# print('[IN IGNORETB] kwargs=%r' % (kwargs,))
return func(*args, **kwargs)
except Exception:
# PYTHON 3.3 NEW METHODS
exc_type, exc_value, exc_traceback = sys.exc_info()
# Code to remove this decorator from traceback
# Remove two levels to remove this one as well
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
exc_traceback = exc_traceback.tb_next
exc_traceback = exc_traceback.tb_next
except Exception:
pass
ex = exc_type(exc_value)
ex.__traceback__ = exc_traceback
raise ex
if outer_wrapper:
wrp_noexectb = preserve_sig(wrp_noexectb, func)
return wrp_noexectb
if len(args) == 1:
# called with one arg means its a function call
func = args[0]
return ignores_exc_tb_closure(func)
else:
# called with no args means kwargs as specified
return ignores_exc_tb_closure
# NEW PYTHON 2.7/3 VERSION
#def ignores_exc_tb(*args, **kwargs):
# """
# ignore_exc_tb decorates a function and remove both itself
# and the function from any exception traceback that occurs.
# This is useful to decorate other trivial decorators
# which are polluting your stacktrace.
# if IGNORE_TRACEBACK is False then this decorator does nothing
# (and it should do nothing in production code!)
# References:
# https://github.com/jcrocholl/pep8/issues/34 # NOQA
# http://legacy.python.org/dev/peps/pep-3109/
# """
# outer_wrapper = kwargs.get('outer_wrapper', True)
# def ignores_exc_tb_closure(func):
# if not IGNORE_TRACEBACK:
# # if the global enforces that we should not ignore anytracebacks
# # then just return the original function without any modifcation
# return func
# if six.PY2:
# #python2_func = """
# common_wrp_noexcept_tb = """
# def wrp_noexectb(*args, **kwargs):
# try:
# return func(*args, **kwargs)
# except Exception:
# exc_type, exc_value, exc_traceback = sys.exc_info()
# # Code to remove this decorator from traceback
# # Remove two levels to remove this one as well
# exc_type, exc_value, exc_traceback = sys.exc_info()
# try:
# exc_traceback = exc_traceback.tb_next
# exc_traceback = exc_traceback.tb_next
# except Exception:
# pass
# """
# if six.PY2:
# python2_reraise = """
# raise exc_type, exc_value, exc_traceback
# """
# six_reraise = python2_reraise
# elif six.PY3:
# python3_reraise = """
# ex = exc_type(exc_value)
# ex.__traceback__ = exc_traceback
# raise ex
# """
# six_reraise = python3_reraise
# wrp_noexcept_tb_codeblock = common_wrp_noexcept_tb + six_reraise
# globals_ = globals()
# locals_ = locals()
# six.exec_(wrp_noexcept_tb_codeblock, globals_, locals_)
# wrp_noexectb = locals_['wrp_noexectb']
# if outer_wrapper:
# wrp_noexectb = preserve_sig(wrp_noexectb, func)
# return wrp_noexectb
# if len(args) == 1:
# # called with one arg means its a function call
# func = args[0]
# return ignores_exc_tb_closure(func)
# else:
# # called with no args means kwargs as specified
# return ignores_exc_tb_closure
def on_exception_report_input(func_=None, force=False, keys=None):
"""
If an error is thrown in the scope of this function's stack frame then the
decorated function name and the arguments passed to it will be printed to
the utool print function.
"""
def _closure_onexceptreport(func):
if not ONEX_REPORT_INPUT and not force:
return func
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_onexceptreport(*args, **kwargs):
try:
#import utool
#if utool.DEBUG:
# print('[IN EXCPRPT] args=%r' % (args,))
# print('[IN EXCPRPT] kwargs=%r' % (kwargs,))
return func(*args, **kwargs)
except Exception as ex:
from utool import util_str
print('ERROR occured! Reporting input to function')
if keys is not None:
from utool import util_inspect
from utool import util_list
from utool import util_dict
argspec = util_inspect.get_func_argspec(func)
in_kwargs_flags = [key in kwargs for key in keys]
kwarg_keys = util_list.compress(keys, in_kwargs_flags)
kwarg_vals = [kwargs.get(key) for key in kwarg_keys]
flags = util_list.not_list(in_kwargs_flags)
arg_keys = util_list.compress(keys, flags)
arg_idxs = [argspec.args.index(key) for key in arg_keys]
num_nodefault = len(argspec.args) - len(argspec.defaults)
default_vals = (([None] * (num_nodefault)) +
list(argspec.defaults))
args_ = list(args) + default_vals[len(args) + 1:]
arg_vals = util_list.take(args_, arg_idxs)
requested_dict = dict(util_list.flatten(
[zip(kwarg_keys, kwarg_vals), zip(arg_keys, arg_vals)]))
print('input dict = ' + util_str.repr4(
util_dict.dict_subset(requested_dict, keys)))
# (print out specific keys only)
pass
arg_strs = ', '.join([repr(util_str.truncate_str(str(arg)))
for arg in args])
kwarg_strs = ', '.join([
util_str.truncate_str('%s=%r' % (key, val))
for key, val in six.iteritems(kwargs)])
msg = ('\nERROR: funcname=%r,\n * args=%s,\n * kwargs=%r\n' % (
meta_util_six.get_funcname(func), arg_strs, kwarg_strs))
msg += ' * len(args) = %r\n' % len(args)
msg += ' * len(kwargs) = %r\n' % len(kwargs)
util_dbg.printex(ex, msg, pad_stdout=True)
raise
wrp_onexceptreport = preserve_sig(wrp_onexceptreport, func)
return wrp_onexceptreport
if func_ is None:
return _closure_onexceptreport
else:
return _closure_onexceptreport(func_)
def debug_function_exceptions(func):
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
import utool as ut
ut.printex(ex)
import inspect # NOQA
trace = inspect.trace()
locals_ = trace[-1][0].f_locals
print('-- <TRACE LOCALS> --')
for level, t in enumerate(trace[1:]):
frame = t[0]
locals_ = frame.f_locals
local_repr_dict = {key: ut.trunc_repr(val)
for key, val in locals_.items()}
print('LOCALS LEVEL %d' % (level,))
print(ut.repr3(local_repr_dict, strvals=True, nl=1))
print('-- </TRACE LOCALS> --')
#import utool
#utool.embed()
raise
return _wrapper
#class DebugContext(object):
# def __enter__():
# pass
# def __exit__(self, exc_type, exc_value, exc_traceback):
# pass
def _indent_decor(lbl):
"""
does the actual work of indent_func
"""
def closure_indent(func):
if util_arg.TRACE:
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_indent(*args, **kwargs):
with util_print.Indenter(lbl):
print(' ...trace[in]')
ret = func(*args, **kwargs)
print(' ...trace[out]')
return ret
else:
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_indent(*args, **kwargs):
with util_print.Indenter(lbl):
ret = func(*args, **kwargs)
return ret
wrp_indent_ = ignores_exc_tb(wrp_indent)
wrp_indent_ = preserve_sig(wrp_indent, func)
return wrp_indent_
return closure_indent
def indent_func(input_):
"""
Takes either no arguments or an alias label
"""
if isinstance(input_, six.string_types):
# A label was specified
lbl = input_
return _indent_decor(lbl)
elif isinstance(input_, (bool, tuple)):
# Allow individually turning of of this decorator
func = input_
return func
else:
# Use the function name as the label
func = input_
lbl = '[' + meta_util_six.get_funcname(func) + ']'
return _indent_decor(lbl)(func)
def tracefunc_xml(func):
"""
Causes output of function to be printed in an XML style block
"""
funcname = meta_util_six.get_funcname(func)
def wrp_tracefunc2(*args, **kwargs):
verbose = kwargs.get('verbose', True)
if verbose:
print('<%s>' % (funcname,))
with util_print.Indenter(' '):
ret = func(*args, **kwargs)
if verbose:
print('</%s>' % (funcname,))
return ret
wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2)
wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func)
return wrp_tracefunc2_
#----------
def accepts_scalar_input(func):
"""
DEPRICATE in favor of accepts_scalar_input2
only accepts one input as vector
accepts_scalar_input is a decorator which expects to be used on class
methods. It lets the user pass either a vector or a scalar to a function,
as long as the function treats everything like a vector. Input and output
is sanitized to the user expected format on return.
Args:
func (func):
Returns:
func: wrp_asi
CommandLine:
python -m utool.util_decor --test-accepts_scalar_input
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> @accepts_scalar_input
... def foobar(self, list_):
... return [x + 1 for x in list_]
>>> self = None # dummy self because this decorator is for classes
>>> assert 2 == foobar(self, 1)
>>> assert [2, 3] == foobar(self, [1, 2])
"""
#@on_exception_report_input
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_asi(self, input_, *args, **kwargs):
#if HAVE_PANDAS:
# if isinstance(input_, (pd.DataFrame, pd.Series)):
# input_ = input_.values
if util_iter.isiterable(input_):
# If input is already iterable do default behavior
return func(self, input_, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
#ret = func(self, (input_,), *args, **kwargs)
ret = func(self, [input_], *args, **kwargs)
if ret is not None:
return ret[0]
wrp_asi = preserve_sig(wrp_asi, func)
return wrp_asi
def accepts_scalar_input2(argx_list=[0], outer_wrapper=True):
r"""
FIXME: change to better name. Complete implementation.
used in IBEIS setters
accepts_scalar_input2 is a decorator which expects to be used on class
methods. It lets the user pass either a vector or a scalar to a function,
as long as the function treats everything like a vector. Input and output
is sanitized to the user expected format on return.
Args:
argx_list (list): indexes of args that could be passed in as scalars to
code that operates on lists. Ensures that decorated function gets
the argument as an iterable.
"""
assert isinstance(argx_list, (list, tuple)), (
'accepts_scalar_input2 must be called with argument positions')
def closure_asi2(func):
#@on_exception_report_input
@ignores_exc_tb(outer_wrapper=False)
def wrp_asi2(self, *args, **kwargs):
# Hack in case wrapping a function with varargs
argx_list_ = [argx for argx in argx_list if argx < len(args)]
__assert_param_consistency(args, argx_list_)
if all([util_iter.isiterable(args[ix]) for ix in argx_list_]):
# If input is already iterable do default behavior
return func(self, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
args_wrapped = [(arg,) if ix in argx_list_ else arg
for ix, arg in enumerate(args)]
ret = func(self, *args_wrapped, **kwargs)
if ret is not None:
return ret[0]
if outer_wrapper:
wrp_asi2 = on_exception_report_input(preserve_sig(wrp_asi2, func))
return wrp_asi2
return closure_asi2
def __assert_param_consistency(args, argx_list_):
"""
debugging function for accepts_scalar_input2
checks to make sure all the iterable inputs are of the same length
"""
if util_arg.NO_ASSERTS:
return
if len(argx_list_) == 0:
return True
argx_flags = [util_iter.isiterable(args[argx]) for argx in argx_list_]
try:
assert all([argx_flags[0] == flag for flag in argx_flags]), (
'invalid mixing of iterable and scalar inputs')
except AssertionError as ex:
print('!!! ASSERTION ERROR IN UTIL_DECOR !!!')
for argx in argx_list_:
print('[util_decor] args[%d] = %r' % (argx, args[argx]))
raise ex
def accepts_scalar_input_vector_output(func):
"""
DEPRICATE IN FAVOR OF accepts_scalar_input2
accepts_scalar_input_vector_output
Notes:
Input: Excpeted Output 1to1 Expected Output 1toM
scalar : 1 x [X]
n element list : [1, 2, 3] [x, y, z] [[X], [Y], [Z]]
1 element list : [1] [x] [[X]]
0 element list : [] [] []
There seems to be no real issue here, I be the thing that tripped me up
was when using sql and getting multiple columns that returned the
values inside of the N-tuple whereas when you get one column you get
one element inside of a 1-tuple, no that still makes sense. There was
something where when you couln't unpack it becuase it was already
empty...
"""
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_asivo(self, input_, *args, **kwargs):
#import utool
#if utool.DEBUG:
# print('[IN SIVO] args=%r' % (args,))
# print('[IN SIVO] kwargs=%r' % (kwargs,))
if util_iter.isiterable(input_):
# If input is already iterable do default behavior
return func(self, input_, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
result = func(self, (input_,), *args, **kwargs)
# The output length could be 0 on a scalar input
if len(result) == 0:
return []
else:
assert len(result) == 1, 'error in asivo'
return result[0]
return wrp_asivo
# TODO: Rename to listget_1to1 1toM etc...
getter_1to1 = accepts_scalar_input
getter_1toM = accepts_scalar_input_vector_output
#----------
def accepts_numpy(func):
""" Allows the first input to be a numpy array and get result in numpy form """
#@ignores_exc_tb
#@wraps(func)
def wrp_accepts_numpy(self, input_, *args, **kwargs):
if not (util_type.HAVE_NUMPY and isinstance(input_, np.ndarray)):
# If the input is not numpy, just call the function
return func(self, input_, *args, **kwargs)
else:
# TODO: use a variant of util_list.unflat_unique_rowid_map
# If the input is a numpy array, and return the output with the same
# shape as the input
if UNIQUE_NUMPY:
# Remove redundant input (because we are passing it to SQL)
input_list, inverse_unique = np.unique(input_, return_inverse=True)
else:
input_list = input_.flatten()
# Call the function in list format
# TODO: is this necessary?
input_list = input_list.tolist()
output_list = func(self, input_list, *args, **kwargs)
# Put the output back into numpy
if UNIQUE_NUMPY:
# Reconstruct redundant queries
output_arr = np.array(output_list)[inverse_unique]
output_shape = tuple(list(input_.shape) + list(output_arr.shape[1:]))
return np.array(output_arr).reshape(output_shape)
else:
return np.array(output_list).reshape(input_.shape)
wrp_accepts_numpy = preserve_sig(wrp_accepts_numpy, func)
return wrp_accepts_numpy
def memoize_nonzero(func):
"""
Memoization decorator for functions taking a nonzero number of arguments.
References:
http://code.activestate.com/recipes/578231-fastest-memoization-decorator
"""
class _memorizer(dict):
def __init__(self, func):
self.func = func
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.func(*key)
return ret
return _memorizer(func)
def memoize_single(func):
""" Memoization decorator for a function taking a single argument
References:
http://code.activestate.com/recipes/578231-fastest-memoization-decorator
"""
class memodict_single(dict):
def __missing__(self, key):
ret = self[key] = func(key)
return ret
return memodict_single().__getitem__
def memoize_zero(func):
""" Memoization decorator for a function taking no arguments """
wrp_memoize_single = memoize_single(func)
def wrp_memoize_zero():
return wrp_memoize_single(None)
return wrp_memoize_zero
def memoize(func):
"""
simple memoization decorator
References:
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Args:
func (function): live python function
Returns:
func:
CommandLine:
python -m utool.util_decor memoize
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> import utool as ut
>>> closure = {'a': 'b', 'c': 'd'}
>>> incr = [0]
>>> def foo(key):
>>> value = closure[key]
>>> incr[0] += 1
>>> return value
>>> foo_memo = memoize(foo)
>>> assert foo('a') == 'b' and foo('c') == 'd'
>>> assert incr[0] == 2
>>> print('Call memoized version')
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> assert incr[0] == 4
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> print('Counter should no longer increase')
>>> assert incr[0] == 4
>>> print('Closure changes result without memoization')
>>> closure = {'a': 0, 'c': 1}
>>> assert foo('a') == 0 and foo('c') == 1
>>> assert incr[0] == 6
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
"""
cache = func._util_decor_memoize_cache = {}
# @functools.wraps(func)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
memoizer = preserve_sig(memoizer, func)
memoizer.cache = cache
return memoizer
def interested(func):
@indent_func
#@ignores_exc_tb
#@wraps(func)
def wrp_interested(*args, **kwargs):
sys.stdout.write('#\n')
sys.stdout.write('#\n')
sys.stdout.write(
'<!INTERESTED>: ' + meta_util_six.get_funcname(func) + '\n')
print('INTERESTING... ' + (' ' * 30) + ' <----')
return func(*args, **kwargs)
return wrp_interested
def tracefunc(func):
lbl = '[trace.' + meta_util_six.get_funcname(func) + ']'
def wrp_tracefunc(*args, **kwargs):
print(lbl + ' +--- ENTER ---')
with util_print.Indenter(lbl + ' |'):
ret = func(*args, **kwargs)
print(lbl + ' L___ EXIT ____')
return ret
return wrp_tracefunc
def show_return_value(func):
from utool.util_str import func_str
#@wraps(func)
def wrp_show_return_value(*args, **kwargs):
ret = func(*args, **kwargs)
#print('%s(*%r, **%r) returns %r' % (meta_util_six.get_funcname(func), args, kwargs, rv))
print(func_str(func, args, kwargs) + ' -> ret=%r' % (ret,))
return ret
return wrp_show_return_value
def time_func(func):
#@wraps(func)
def wrp_time(*args, **kwargs):
with util_time.Timer(meta_util_six.get_funcname(func)):
return func(*args, **kwargs)
wrp_time = preserve_sig(wrp_time, func)
return wrp_time
#def rename_func(newname):
# import utool as ut
# return ut.partial(ut.set_funcname, newname=newname)
#class copy_argspec(object):
# """
# copy_argspec is a signature modifying decorator.
# Specifically, it copies the signature from `source_func` to the wrapper, and
# the wrapper will call the original function (which should be using *args,
# **kwds). The argspec, docstring, and default values are copied from
# src_func, and __module__ and __dict__ from tgt_func.
# .. References
# http://stackoverflow.com/questions/18625510/how-can-i-programmatically-change-the-argspec-of-a-function-not-in-a-python-de
# """
# def __init__(self, src_func):
# self.argspec = inspect.getargspec(src_func)
# self.src_doc = src_func.__doc__
# self.src_defaults = src_func.func_defaults
# def __call__(self, tgt_func):
# try:
# tgt_argspec = inspect.getargspec(tgt_func)
# need_self = False
# if len(tgt_argspec) > 0 and len(tgt_argspec[0]) > 0 and tgt_argspec[0][0] == 'self':
# need_self = True
# name = tgt_func.__name__
# argspec = self.argspec
# if len(argspec) > 0 and len(argspec[0]) > 0 and argspec[0][0] == 'self':
# need_self = False
# if need_self:
# newargspec = (['self'] + argspec[0],) + argspec[1:]
# else:
# newargspec = argspec
# signature = inspect.formatargspec(formatvalue=lambda val: "",
# *newargspec)[1:-1]
# new_func = (
# 'def _wrapper_({signature}):\n'
# ' return {tgt_func}({signature})'
# ).format(signature=signature, tgt_func='tgt_func')
# evaldict = {'tgt_func' : tgt_func}
# exec new_func in evaldict
# wrapped = evaldict['_wrapper_']
# wrapped.__name__ = name
# wrapped.__doc__ = self.src_doc
# wrapped.func_defaults = self.src_defaults
# wrapped.__module__ = tgt_func.__module__
# wrapped.__dict__ = tgt_func.__dict__
# return wrapped
# except Exception as ex:
# util_dbg.printex(ex, 'error wrapping: %r' % (tgt_func,))
# raise
def lazyfunc(func):
"""
Returns a memcached version of a function
"""
closuremem_ = [{}]
def wrapper(*args, **kwargs):
mem = closuremem_[0]
key = (repr(args), repr(kwargs))
try:
return mem[key]
except KeyError:
mem[key] = func(*args, **kwargs)
return mem[key]
return wrapper
def apply_docstr(docstr_func):
"""
Changes docstr of one functio to that of another
"""
def docstr_applier(func):
#docstr = meta_util_six.get_funcdoc(docstr_func)
#meta_util_six.set_funcdoc(func, docstr)
if isinstance(docstr_func, six.string_types):
olddoc = meta_util_six.get_funcdoc(func)
if olddoc is None:
olddoc = ''
newdoc = olddoc + docstr_func
meta_util_six.set_funcdoc(func, newdoc)
return func
else:
preserved_func = preserve_sig(func, docstr_func)
return preserved_func
return docstr_applier
def preserve_sig(wrapper, orig_func, force=False):
"""
Decorates a wrapper function.
It seems impossible to presever signatures in python 2 without eval
(Maybe another option is to write to a temporary module?)
Args:
wrapper: the function wrapping orig_func to change the signature of
orig_func: the original function to take the signature from
References:
http://emptysqua.re/blog/copying-a-python-functions-signature/
https://code.google.com/p/micheles/source/browse/decorator/src/decorator.py
TODO:
checkout funcsigs
https://funcsigs.readthedocs.org/en/latest/
CommandLine:
python -m utool.util_decor --test-preserve_sig
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #ut.rrrr(False)
>>> def myfunction(self, listinput_, arg1, *args, **kwargs):
>>> " just a test function "
>>> return [x + 1 for x in listinput_]
>>> #orig_func = ut.take
>>> orig_func = myfunction
>>> wrapper = ut.accepts_scalar_input2([0])(orig_func)
>>> _wrp_preserve1 = ut.preserve_sig(wrapper, orig_func, True)
>>> _wrp_preserve2 = ut.preserve_sig(wrapper, orig_func, False)
>>> print('_wrp_preserve2 = %r' % (_wrp_preserve1,))
>>> print('_wrp_preserve2 = %r' % (_wrp_preserve2,))
>>> #print('source _wrp_preserve1 = %s' % (ut.get_func_sourcecode(_wrp_preserve1),))
>>> #print('source _wrp_preserve2 = %s' % (ut.get_func_sourcecode(_wrp_preserve2)),)
>>> result = str(_wrp_preserve1)
>>> print(result)
"""
#if True:
# import functools
# return functools.wraps(orig_func)(wrapper)
from utool._internal import meta_util_six
from utool import util_str
from utool import util_inspect
if wrapper is orig_func:
# nothing to do
return orig_func
orig_docstr = meta_util_six.get_funcdoc(orig_func)
orig_docstr = '' if orig_docstr is None else orig_docstr
orig_argspec = util_inspect.get_func_argspec(orig_func)
wrap_name = meta_util_six.get_funccode(wrapper).co_name
orig_name = meta_util_six.get_funcname(orig_func)
# At the very least preserve info in a dictionary
_utinfo = {}
_utinfo['orig_func'] = orig_func
_utinfo['wrap_name'] = wrap_name
_utinfo['orig_name'] = orig_name
_utinfo['orig_argspec'] = orig_argspec
if hasattr(wrapper, '_utinfo'):
parent_wrapper_utinfo = wrapper._utinfo
_utinfo['parent_wrapper_utinfo'] = parent_wrapper_utinfo
if hasattr(orig_func, '_utinfo'):
parent_orig_utinfo = orig_func._utinfo
_utinfo['parent_orig_utinfo'] = parent_orig_utinfo
# environment variable is set if you are building documentation
# preserve sig if building docs
building_docs = os.environ.get('UTOOL_AUTOGEN_SPHINX_RUNNING', 'OFF') == 'ON'
if force or SIG_PRESERVE or building_docs:
# PRESERVES ALL SIGNATURES WITH EXECS
src_fmt = r'''
def _wrp_preserve{defsig}:
""" {orig_docstr} """
try:
return wrapper{callsig}
except Exception as ex:
import utool as ut
msg = ('Failure in signature preserving wrapper:\n')
ut.printex(ex, msg)
raise
'''
# Put wrapped function into a scope
globals_ = {'wrapper': wrapper}
locals_ = {}
# argspec is :ArgSpec(args=['bar', 'baz'], varargs=None, keywords=None,
# defaults=(True,))
# get orig functions argspec
# get functions signature
# Get function call signature (no defaults)
# Define an exec function
argspec = inspect.getargspec(orig_func)
(args, varargs, varkw, defaults) = argspec
defsig = inspect.formatargspec(*argspec)
callsig = inspect.formatargspec(*argspec[0:3])
# TODO:
# ut.func_defsig
# ut.func_callsig
src_fmtdict = dict(defsig=defsig, callsig=callsig, orig_docstr=orig_docstr)
src = textwrap.dedent(src_fmt).format(**src_fmtdict)
# Define the new function on the fly
# (I wish there was a non exec / eval way to do this)
#print(src)
code = compile(src, '<string>', 'exec')
six.exec_(code, globals_, locals_)
#six.exec_(src, globals_, locals_)
# Use functools.update_wapper to complete preservation
_wrp_preserve = functools.update_wrapper(locals_['_wrp_preserve'], orig_func)
# Keep debug info
_utinfo['src'] = src
# Set an internal sig variable that we may use
#_wrp_preserve.__sig__ = defsig
else:
# PRESERVES SOME SIGNATURES NO EXEC
# signature preservation is turned off. just preserve the name.
# Does not use any exec or eval statments.
_wrp_preserve = functools.update_wrapper(wrapper, orig_func)
# Just do something to preserve signature
DEBUG_WRAPPED_DOCSTRING = False
if DEBUG_WRAPPED_DOCSTRING:
new_docstr_fmtstr = util_str.codeblock(
'''
Wrapped function {wrap_name}({orig_name})
orig_argspec = {orig_argspec}
orig_docstr = {orig_docstr}
'''
)
else:
new_docstr_fmtstr = util_str.codeblock(
'''
{orig_docstr}
'''
)
new_docstr = new_docstr_fmtstr.format(
wrap_name=wrap_name, orig_name=orig_name, orig_docstr=orig_docstr,
orig_argspec=orig_argspec)
meta_util_six.set_funcdoc(_wrp_preserve, new_docstr)
_wrp_preserve._utinfo = _utinfo
return _wrp_preserve
def dummy_args_decor(*args, **kwargs):
def dummy_args_closure(func):
return func
return dummy_args_closure
class classproperty(property):
"""
Decorates a method turning it into a classattribute
References:
https://stackoverflow.com/questions/1697501/python-staticmethod-with-property
"""
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
if __name__ == '__main__':
"""
CommandLine:
python -c "import utool, utool.util_decor; utool.doctest_funcs(utool.util_decor)"
python -m utool.util_decor
python -m utool.util_decor --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | -2,126,047,882,031,010,800 | 6,217,000,917,167,990,000 | 35.745078 | 127 | 0.564145 | false |
phlax/pootle | pootle/apps/pootle_store/migrations/0001_initial.py | 5 | 7063 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
import translate.storage.base
import pootle_store.fields
import pootle.core.mixins.treeitem
from django.conf import settings
import django.db.models.fields.files
class Migration(migrations.Migration):
dependencies = [
('pootle_translationproject', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pootle_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Store',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', django.db.models.fields.files.FileField(upload_to='', max_length=255, editable=False, db_index=True)),
('pootle_path', models.CharField(unique=True, max_length=255, verbose_name='Path', db_index=True)),
('name', models.CharField(max_length=128, editable=False)),
('file_mtime', models.DateTimeField(default=datetime.datetime(1, 1, 1, 0, 0, tzinfo=utc))),
('state', models.IntegerField(default=0, editable=False, db_index=True)),
('creation_time', models.DateTimeField(db_index=True, auto_now_add=True, null=True)),
('last_sync_revision', models.IntegerField(null=True, db_index=True)),
('obsolete', models.BooleanField(default=False)),
('parent', models.ForeignKey(related_name='child_stores', editable=False, to='pootle_app.Directory', on_delete=models.CASCADE)),
('translation_project', models.ForeignKey(related_name='stores', editable=False, to='pootle_translationproject.TranslationProject', on_delete=models.CASCADE)),
],
options={
'ordering': ['pootle_path'],
},
bases=(models.Model, pootle.core.mixins.treeitem.CachedTreeItem, translate.storage.base.TranslationStore),
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('index', models.IntegerField(db_index=True)),
('unitid', models.TextField(editable=False)),
('unitid_hash', models.CharField(max_length=32, editable=False, db_index=True)),
('source_f', pootle_store.fields.MultiStringField(null=True)),
('source_hash', models.CharField(max_length=32, editable=False, db_index=True)),
('source_wordcount', models.SmallIntegerField(default=0, editable=False)),
('source_length', models.SmallIntegerField(default=0, editable=False, db_index=True)),
('target_f', pootle_store.fields.MultiStringField(null=True, blank=True)),
('target_wordcount', models.SmallIntegerField(default=0, editable=False)),
('target_length', models.SmallIntegerField(default=0, editable=False, db_index=True)),
('developer_comment', models.TextField(null=True, blank=True)),
('translator_comment', models.TextField(null=True, blank=True)),
('locations', models.TextField(null=True, editable=False)),
('context', models.TextField(null=True, editable=False)),
('state', models.IntegerField(default=0, db_index=True)),
('revision', models.IntegerField(default=0, db_index=True, blank=True)),
('creation_time', models.DateTimeField(db_index=True, auto_now_add=True, null=True)),
('mtime', models.DateTimeField(auto_now=True, auto_now_add=True, db_index=True)),
('submitted_on', models.DateTimeField(null=True, db_index=True)),
('commented_on', models.DateTimeField(null=True, db_index=True)),
('reviewed_on', models.DateTimeField(null=True, db_index=True)),
('commented_by', models.ForeignKey(related_name='commented', to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
('reviewed_by', models.ForeignKey(related_name='reviewed', to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
('store', models.ForeignKey(to='pootle_store.Store', on_delete=models.CASCADE)),
('submitted_by', models.ForeignKey(related_name='submitted', to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
options={
'ordering': ['store', 'index'],
'get_latest_by': 'mtime',
},
bases=(models.Model, translate.storage.base.TranslationUnit),
),
migrations.CreateModel(
name='Suggestion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('target_f', pootle_store.fields.MultiStringField()),
('target_hash', models.CharField(max_length=32, db_index=True)),
('translator_comment_f', models.TextField(null=True, blank=True)),
('state', models.CharField(default='pending', max_length=16, db_index=True, choices=[('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected')])),
('creation_time', models.DateTimeField(null=True, db_index=True)),
('review_time', models.DateTimeField(null=True, db_index=True)),
('unit', models.ForeignKey(to='pootle_store.Unit', on_delete=models.CASCADE)),
('reviewer', models.ForeignKey(related_name='reviews', to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
('user', models.ForeignKey(related_name='suggestions', to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model, translate.storage.base.TranslationUnit),
),
migrations.CreateModel(
name='QualityCheck',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64, db_index=True)),
('category', models.IntegerField(default=0)),
('message', models.TextField()),
('false_positive', models.BooleanField(default=False, db_index=True)),
('unit', models.ForeignKey(to='pootle_store.Unit', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='unit',
unique_together=set([('store', 'unitid_hash')]),
),
migrations.AlterUniqueTogether(
name='store',
unique_together=set([('parent', 'name')]),
),
]
| gpl-3.0 | -4,475,233,713,678,447,000 | -3,396,580,734,807,227,000 | 57.858333 | 179 | 0.60017 | false |
jimmycallin/master-thesis | architectures/nn_discourse_parser/nets/data_reader.py | 1 | 6857 | import json
import codecs
class DRelation(object):
"""Implicit discourse relation object
The object is created from the CoNLL-json formatted data.
The format can be a bit clunky to get certain information.
So convenient methods should be implemented here mostly to be used
by the feature functions
"""
def __init__(self, relation_dict, parse):
self.relation_dict = relation_dict
self.parse = parse
self._arg_tokens = {}
self._arg_tokens[1] = None
self._arg_tokens[2] = None
self._arg_words = {}
self._arg_words[1] = None
self._arg_words[2] = None
self._arg_tree = {}
self._arg_tree[1] = None
self._arg_tree[2] = None
self._arg1_tree = None
self._arg1_tree_token_indices = None
self._arg2_tree = None
self._arg2_tree_token_indices = None
@property
def senses(self):
return self.relation_dict['Sense']
def arg_words(self, arg_pos):
"""Returns a list of Word objects"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_words[arg_pos] is None:
key = 'Arg%s' % arg_pos
word_list = self.relation_dict[key]['TokenList']
self._arg_words[arg_pos] = [Word(x, self.parse[self.doc_id]) for x in word_list]
return self._arg_words[arg_pos]
def arg_tree(self, arg_pos):
"""Extract the tree for the argument
One tree only. Truncated as needed
Returns:
1) tree string
2) token indices (not address tuples) of that tree.
"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tree[arg_pos] is None:
trees, sentence_indices = self.arg_trees(arg_pos)
if arg_pos == 1:
tree = trees[-1]
sentence_index = sentence_indices[-1]
elif arg_pos == 2:
tree = trees[0]
sentence_index = sentence_indices[0]
key = 'Arg%s' % arg_pos
token_indices = [x[4] for x in self.relation_dict[key]['TokenList'] if x[3] == sentence_index]
self._arg_tree[arg_pos] = (tree, token_indices)
return self._arg_tree[arg_pos]
def arg_dtree_rule_list(self, arg_pos):
"""Returns a list of arcs in the dependency tree(s) for the arg """
assert(arg_pos == 1 or arg_pos == 2)
token_list = self.arg_token_addresses(arg_pos)
sentence_indices = set([x[3] for x in token_list])
sentence_index_to_dependency_tree = {}
for sentence_index in sentence_indices:
dependencies = \
self.parse[self.doc_id]['sentences'][sentence_index]['dependencies']
index_to_dependency = {}
# a dependency looks like this [u'prep', u'reported-8', u'In-1']
for dep in dependencies:
rel_type = dep[0]
head, _ = dep[1].rsplit('-', 1)
dependent, index = dep[2].rsplit('-', 1)
index_to_dependency[int(index)] = [rel_type, head, dependent]
sentence_index_to_dependency_tree[sentence_index] = index_to_dependency
rule_list = []
for token_address in token_list:
_, _, _, sentence_index, token_index = token_address
dtree = sentence_index_to_dependency_tree[sentence_index]
if token_index in dtree:
rule_list.append('_'.join(dtree[token_index]))
return rule_list
def arg_token_addresses(self, arg_pos):
assert(arg_pos == 1 or arg_pos == 2)
key = 'Arg%s' % arg_pos
return self.relation_dict[key]['TokenList']
@property
def doc_id(self):
return self.relation_dict['DocID']
@property
def relation_id(self):
return self.relation_dict['ID']
@property
def relation_type(self):
return self.relation_dict['Type']
@property
def doc_relation_id(self):
return '%s_%s' % (self.doc_id, self.relation_id)
def arg_tokens(self, arg_pos):
"""Returns a list of raw tokens"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tokens[arg_pos] is None:
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
self._arg_tokens[arg_pos] = [self.parse[self.doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in token_list]
return self._arg_tokens[arg_pos]
def arg_trees(self, arg_pos):
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
sentence_indices = set([x[3] for x in token_list])
return [self.parse[self.doc_id]['sentences'][x]['parsetree'] for x in sentence_indices], list(sentence_indices)
def __repr__(self):
return self.relation_dict.__repr__()
def __str__(self):
return self.relation_dict.__str__()
class Word(object):
"""Word class wrapper
[u"'ve",
{u'CharacterOffsetBegin':2449,
u'CharacterOffsetEnd':2452,
u'Linkers':[u'arg2_15006',u'arg1_15008'],
u'PartOfSpeech':u'VBP'}]
"""
def __init__(self, word_address, parse):
self.word_address = word_address
self.word_token, self.word_info = parse['sentences'][word_address[3]]['words'][word_address[4]]
@property
def pos(self):
return self.word_info['PartOfSpeech']
@property
def lemma(self):
return self.word_info['Lemma']
@property
def sentence_index(self):
return self.word_address[3]
def extract_implicit_relations(data_folder, label_function=None):
#parse_file = '%s/pdtb-parses-plus.json' % data_folder
#parse_file = '%s/pdtb-parses.json' % data_folder
parse_file = '%s/parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
#relation_file = '%s/pdtb-data-plus.json' % data_folder
#relation_file = '%s/pdtb-data.json' % data_folder
relation_file = '%s/relations.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] == 'Implicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
def extract_non_explicit_relations(data_folder, label_function=None):
parse_file = '%s/pdtb-parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_file = '%s/pdtb-data.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] != 'Explicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
| mit | 5,645,178,244,180,681,000 | 3,801,957,238,516,310,000 | 35.473404 | 122 | 0.589616 | false |
m0re4u/LeRoT-SCLP | lerot/tests/test_utils.py | 1 | 4440 | # This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
import unittest
import cStringIO
import numpy as np
import lerot.query as query
import lerot.utils as utils
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def testSplitArgStr(self):
split = utils.split_arg_str("--a 10 --b foo --c \"--d bar --e 42\"")
self.assertEqual(split, ["--a", "10", "--b", "foo", "--c",
"--d bar --e 42"], "wrong split (1): %s" % ", ".join(split))
split = utils.split_arg_str("\"--a\" 10 --b foo --c --d bar --e 42")
self.assertEqual(split, ["--a", "10", "--b", "foo", "--c", "--d",
"bar", "--e", "42"], "wrong split (2): %s" % ", ".join(split))
split = utils.split_arg_str("\"--a\"\" 10\"--b foo --c --d bar --e 42")
self.assertEqual(split, ["--a", " 10", "--b", "foo", "--c", "--d",
"bar", "--e", "42"], "wrong split (2): %s" % ", ".join(split))
def testRank(self):
scores = [2.1, 2.9, 2.3, 2.3, 5.5]
self.assertIn(utils.rank(scores, ties="random"),
[[0, 3, 1, 2, 4], [0, 3, 2, 1, 4]])
self.assertIn(utils.rank(scores, reverse=True, ties="random"),
[[4, 1, 3, 2, 0], [4, 1, 2, 3, 0]])
self.assertEqual(utils.rank(scores, reverse=True, ties="first"),
[4, 1, 2, 3, 0])
self.assertEqual(utils.rank(scores, reverse=True, ties="last"),
[4, 1, 3, 2, 0])
scores = [2.1, 2.9, 2.3, 2.3, 5.5, 2.9]
self.assertIn(utils.rank(scores, ties="random"),
[[0, 4, 2, 1, 5, 3],
[0, 3, 2, 1, 5, 4],
[0, 4, 1, 2, 5, 3],
[0, 3, 1, 2, 5, 4]])
self.assertIn(utils.rank(scores, reverse=True, ties="random"),
[[5, 1, 3, 4, 0, 2],
[5, 2, 3, 4, 0, 1],
[5, 1, 4, 3, 0, 2],
[5, 2, 4, 3, 0, 1]])
self.assertEqual(utils.rank(scores, reverse=True, ties="first"),
[5, 1, 3, 4, 0, 2])
self.assertEqual(utils.rank(scores, reverse=True, ties="last"),
[5, 2, 4, 3, 0, 1])
def test_create_ranking_vector(self):
feature_count = 5
# Create queries to test with
test_queries = """
1 qid:373 1:0.080000 2:0.500000 3:0.500000 4:0.500000 5:0.160000
0 qid:373 1:0.070000 2:0.180000 3:0.000000 4:0.250000 5:0.080000
0 qid:373 1:0.150000 2:0.016000 3:0.250000 4:0.250000 5:0.150000
0 qid:373 1:0.100000 2:0.250000 3:0.500000 4:0.750000 5:0.130000
0 qid:373 1:0.050000 2:0.080000 3:0.250000 4:0.250000 5:0.060000
0 qid:373 1:0.050000 2:1.000000 3:0.250000 4:0.250000 5:0.160000
"""
hard_gamma = [1, 0.63092975357, 0.5, 0.43067655807, 0.38685280723,
0.3562071871]
hard_ranking_vector = [0.27938574, 1.11639191, 1.02610328, 1.29150486,
0.42166665]
query_fh = cStringIO.StringIO(test_queries)
this_query = query.Queries(query_fh, feature_count)['373']
query_fh.close()
fake_ranking = sorted(this_query.get_docids())
# gamma, ranking_vector = utils.create_ranking_vector(
ranking_vector = utils.create_ranking_vector(
this_query, fake_ranking)
# self.assertEqual(len(gamma), len(hard_gamma))
self.assertEqual(feature_count, len(ranking_vector))
# for i in xrange(0, len(gamma)):
# self.assertAlmostEqual(gamma[i], hard_gamma[i])
for j in xrange(0, feature_count):
self.assertAlmostEqual(ranking_vector[j], hard_ranking_vector[j])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,450,430,597,426,258,200 | -3,944,899,879,153,267,000 | 43.848485 | 79 | 0.537838 | false |
mclois/iteexe | twisted/python/compat.py | 17 | 5524 | # -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatability module to provide backwards compatability
for useful Python features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
"""
import sys, string, socket, struct
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
socket.AF_INET6 = 'AF_INET6'
adict = dict
# OpenSSL/__init__.py imports OpenSSL.tsafe. OpenSSL/tsafe.py imports
# threading. threading imports thread. All to make this stupid threadsafe
# version of its Connection class. We don't even care about threadsafe
# Connections. In the interest of not screwing over some crazy person
# calling into OpenSSL from another thread and trying to use Twisted's SSL
# support, we don't totally destroy OpenSSL.tsafe, but we will replace it
# with our own version which imports threading as late as possible.
class tsafe(object):
class Connection:
"""
OpenSSL.tsafe.Connection, defined in such a way as to not blow.
"""
__module__ = 'OpenSSL.tsafe'
def __init__(self, *args):
from OpenSSL import SSL as _ssl
self._ssl_conn = apply(_ssl.Connection, args)
from threading import _RLock
self._lock = _RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv',
'read', 'renegotiate', 'bind', 'listen', 'connect',
'accept', 'setblocking', 'fileno', 'shutdown',
'close', 'get_cipher_list', 'getpeername',
'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data',
'state_string', 'sock_shutdown',
'get_peer_certificate', 'want_read', 'want_write',
'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall'):
exec """def %s(self, *args):
self._lock.acquire()
try:
return apply(self._ssl_conn.%s, args)
finally:
self._lock.release()\n""" % (f, f)
sys.modules['OpenSSL.tsafe'] = tsafe
import operator
try:
operator.attrgetter
except AttributeError:
class attrgetter(object):
def __init__(self, name):
self.name = name
def __call__(self, obj):
return getattr(obj, self.name)
operator.attrgetter = attrgetter
| gpl-2.0 | -7,240,802,351,406,492,000 | -5,733,924,610,428,556,000 | 34.63871 | 75 | 0.552136 | false |
edx-solutions/edx-platform | common/test/acceptance/pages/common/auto_auth.py | 4 | 3779 | """
Auto-auth page (used to automatically log in during testing).
"""
import json
import os
from six.moves import urllib
from bok_choy.page_object import PageObject, unguarded
# The URL used for user auth in testing
HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
CMS_PORT = os.environ.get('BOK_CHOY_CMS_PORT', 8031)
AUTH_BASE_URL = os.environ.get('test_url', 'http://{}:{}'.format(HOSTNAME, CMS_PORT))
FULL_NAME = 'Test'
class AutoAuthPage(PageObject):
"""
The automatic authorization page.
When enabled via the Django settings file, visiting this url will create a user and log them in.
"""
# Internal cache for parsed user info.
_user_info = None
def __init__(self, browser, username=None, email=None, password=None, full_name=FULL_NAME, staff=False,
superuser=None, course_id=None, enrollment_mode=None, roles=None, no_login=False, is_active=True,
course_access_roles=None, should_manually_verify=False):
"""
Auto-auth is an end-point for HTTP GET requests.
By default, it will create accounts with random user credentials,
but you can also specify credentials using querystring parameters.
`username`, `email`, and `password` are the user's credentials (strings)
'full_name' is the profile full name value
`staff` is a boolean indicating whether the user is global staff.
`superuser` is a boolean indicating whether the user is a super user.
`course_id` is the ID of the course to enroll the student in.
Currently, this has the form "org/number/run"
`should_manually_verify` is a boolean indicating whether the
created user should have their identification verified
Note that "global staff" is NOT the same as course staff.
"""
super(AutoAuthPage, self).__init__(browser)
# This will eventually hold the details about the user account
self._user_info = None
course_access_roles = course_access_roles or []
course_access_roles = ','.join(course_access_roles)
self._params = {
'full_name': full_name,
'staff': staff,
'superuser': superuser,
'is_active': is_active,
'course_access_roles': course_access_roles,
}
if username:
self._params['username'] = username
if email:
self._params['email'] = email
if password:
self._params['password'] = password
if superuser is not None:
self._params['superuser'] = "true" if superuser else "false"
if course_id:
self._params['course_id'] = course_id
if enrollment_mode:
self._params['enrollment_mode'] = enrollment_mode
if roles:
self._params['roles'] = roles
if no_login:
self._params['no_login'] = True
if should_manually_verify:
self._params['should_manually_verify'] = True
@property
def url(self):
"""
Construct the URL.
"""
url = AUTH_BASE_URL + "/auto_auth"
query_str = urllib.parse.urlencode(self._params)
if query_str:
url += "?" + query_str
return url
def is_browser_on_page(self):
return bool(self.user_info)
@property
@unguarded
def user_info(self):
"""A dictionary containing details about the user account."""
if not self._user_info:
body = self.q(css='BODY').text[0]
self._user_info = json.loads(body)
return self._user_info
def get_user_id(self):
"""
Finds and returns the user_id
"""
return self.user_info['user_id']
| agpl-3.0 | -2,733,069,861,667,591,000 | 8,129,782,375,656,812,000 | 30.231405 | 114 | 0.605187 | false |
rwl/openpowersystem | cdpsm/iec61970/core/voltage_level.py | 1 | 2591 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< imports
# @generated
from cdpsm.iec61970.core.equipment_container import EquipmentContainer
from cdpsm.iec61970.core.base_voltage import BaseVoltage
from cdpsm.iec61970.core.substation import Substation
from cdpsm.iec61970.domain import Voltage
from google.appengine.ext import db
# >>> imports
class VoltageLevel(EquipmentContainer):
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< voltage_level.attributes
# @generated
# The bus bar's low voltage limit
low_voltage_limit = Voltage
# The bus bar's high voltage limit
high_voltage_limit = Voltage
# >>> voltage_level.attributes
# <<< voltage_level.references
# @generated
# The base voltage used for all equipment within the VoltageLevel.
base_voltage = db.ReferenceProperty(BaseVoltage,
collection_name="voltage_level")
# Virtual property. The association is used in the naming hierarchy.
pass # bays
# The association is used in the naming hierarchy.
substation = db.ReferenceProperty(Substation,
collection_name="voltage_levels")
# >>> voltage_level.references
# <<< voltage_level.operations
# @generated
# >>> voltage_level.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 | -9,025,326,508,152,414,000 | -8,994,552,250,510,935,000 | 38.861538 | 235 | 0.677345 | false |
armab/st2contrib | packs/typeform/sensors/registration_sensor.py | 7 | 5690 | # Requirements:
# See ../requirements.txt
import eventlet
import httplib
import MySQLdb
import MySQLdb.cursors
import requests
from six.moves import urllib_parse
from st2reactor.sensor.base import PollingSensor
BASE_URL = 'https://api.typeform.com/v0/form/'
EMAIL_FIELD = "email_7723200"
FIRST_NAME_FIELD = "textfield_7723291"
LAST_NAME_FIELD = "textfield_7723236"
SOURCE_FIELD = "textarea_7723206"
NEWSLETTER_FIELD = "yesno_7723486"
REFERER_FIELD = "referer"
DATE_LAND_FIELD = "date_land"
DATE_SUBMIT_FIELD = "date_submit"
# pylint: disable=no-member
class TypeformRegistrationSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=180):
super(TypeformRegistrationSensor, self).__init__(
sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self.logger = self._sensor_service.get_logger(
name=self.__class__.__name__)
self._trigger_pack = 'typeform'
self._trigger_ref = '.'.join([self._trigger_pack, 'registration'])
db_config = self._config.get('mysql', False)
self.db = self._conn_db(host=db_config.get('host', None),
user=db_config.get('user', None),
passwd=db_config.get('pass', None),
db=db_config.get('name', None))
self.request_data = {"key": self._config.get('api_key', None),
"completed": str(self._config.get('completed',
True)).lower()}
self.url = self._get_url(self._config.get('form_id', None))
# sensor specific config.
self.sensor_config = self._config.get('sensor', {})
self.retries = int(self.sensor_config.get('retries', 3))
if self.retries < 0:
self.retries = 0
self.retry_delay = int(self.sensor_config.get('retry_delay', 30))
if self.retry_delay < 0:
self.retry_delay = 30
self.timeout = int(self.sensor_config.get('timeout', 20))
if self.timeout < 0:
self.timeout = 20
def setup(self):
pass
def poll(self):
registration = {}
api_registration_list = self._get_api_registrations(self.request_data)
for r in api_registration_list.get('responses', None):
user = r.get('answers', None)
meta = r.get('metadata', None)
if self._check_new_registration(user.get(EMAIL_FIELD, False)):
registration['email'] = user.get(EMAIL_FIELD, None)
registration['first_name'] = user.get(FIRST_NAME_FIELD, None)
registration['last_name'] = user.get(LAST_NAME_FIELD, None)
registration['source'] = user.get(SOURCE_FIELD, None)
registration['newsletter'] = user.get(NEWSLETTER_FIELD, None)
registration['referer'] = meta.get(REFERER_FIELD, None)
registration['date_land'] = meta.get(DATE_LAND_FIELD, None)
registration['date_submit'] = meta.get(DATE_SUBMIT_FIELD, None)
self._dispatch_trigger(self._trigger_ref, data=registration)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _dispatch_trigger(self, trigger, data):
self._sensor_service.dispatch(trigger, data)
def _get_url(self, endpoint):
url = urllib_parse.urljoin(BASE_URL, endpoint)
return url
def _get_api_registrations(self, params):
data = urllib_parse.urlencode(params)
headers = {}
headers['Content-Type'] = 'application/x-www-form-urlencoded'
response = None
attempts = 0
while attempts < self.retries:
try:
response = requests.request(
method='GET',
url=self.url,
headers=headers,
timeout=self.timeout,
params=data)
self.logger.debug('Got repsonse: %s.', response.json())
break
except Exception:
msg = 'Unable to connect to registrations API.'
self.logger.exception(msg)
attempts += 1
eventlet.sleep(self.retry_delay)
if not response:
raise Exception('Failed to connect to TypeForm API.')
if response.status_code != httplib.OK:
failure_reason = ('Failed to retrieve registrations: %s \
(status code: %s)' % (response.text, response.status_code))
self.logger.error(failure_reason)
raise Exception(failure_reason)
return response.json()
def _check_new_registration(self, email):
email = MySQLdb.escape_string(email)
c = self.db.cursor()
query = 'SELECT * FROM user_registration WHERE email="%s"' % email
try:
c.execute(query)
self.db.commit()
except MySQLdb.Error, e:
self.logger.info(str(e))
return False
row = c.fetchone()
c.close()
if row:
return False
self.logger.info("%s is not a currently registered user." % email)
return True
def _conn_db(self, host, user, passwd, db):
return MySQLdb.connect(host=host,
user=user,
passwd=passwd,
db=db,
cursorclass=MySQLdb.cursors.DictCursor)
| apache-2.0 | -7,564,416,176,812,837,000 | -6,270,012,442,126,686,000 | 33.484848 | 79 | 0.560984 | false |
camilonova/sentry | src/sentry/utils/runner.py | 1 | 11831 | #!/usr/bin/env python
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from logan.runner import run_app, configure_app
import base64
import os
import pkg_resources
import warnings
USE_GEVENT = os.environ.get('USE_GEVENT')
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
# This file is just Python, with a touch of Django which means you
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
# If you change this, you'll also need to install the appropriate python
# package: psycopg2 (Postgres) or mysql-python
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
## Redis ##
###########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
###########
## Cache ##
###########
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
#
# SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
###########
## Queue ##
###########
# See http://sentry.readthedocs.org/en/latest/queue/index.html for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
#################
## Rate Limits ##
#################
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
####################
## Update Buffers ##
####################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
############
## Quotas ##
############
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
##########
## TSDB ##
##########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
##################
## File storage ##
##################
# Any Django storage backend is compatible with Sentry. For more solutions see
# the django-storages package: https://django-storages.readthedocs.org/en/latest/
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
################
## Web Server ##
################
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# and X-Forwarded-Host headers, and uncomment the following settings
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# USE_X_FORWARDED_HOST = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3, # the number of gunicorn workers
'limit_request_line': 0, # required for raven-js
'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
#################
## Mail Server ##
#################
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
MAILGUN_API_KEY = ''
###########
## etc. ##
###########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = %(default_key)r
# http://twitter.com/apps/new
# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
# http://developers.facebook.com/setup/
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
# https://github.com/settings/applications/new
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
# https://trello.com/1/appKey/generate
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
# https://confluence.atlassian.com/display/BITBUCKET/OAuth+Consumers
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load app %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
register(plugin)
def initialize_receivers():
# force signal registration
import sentry.receivers # NOQA
def initialize_gevent():
from gevent import monkey
monkey.patch_all()
try:
import psycopg2 # NOQA
except ImportError:
pass
else:
from sentry.utils.gevent import make_psycopg_green
make_psycopg_green()
def initialize_app(config):
from django.utils import timezone
from sentry.app import env
if USE_GEVENT:
from django.db import connections
connections['default'].allow_thread_sharing = True
env.data['config'] = config.get('config_path')
env.data['start_date'] = timezone.now()
settings = config['settings']
install_plugins(settings)
skip_migration_if_applied(
settings, 'kombu.contrib.django', 'djkombu_queue')
skip_migration_if_applied(
settings, 'social_auth', 'social_auth_association')
apply_legacy_settings(config)
# Commonly setups don't correctly configure themselves for production envs
# so lets try to provide a bit more guidance
if settings.CELERY_ALWAYS_EAGER and not settings.DEBUG:
warnings.warn('Sentry is configured to run asynchronous tasks in-process. '
'This is not recommended within production environments. '
'See http://sentry.readthedocs.org/en/latest/queue/index.html for more information.')
initialize_receivers()
def apply_legacy_settings(config):
settings = config['settings']
# SENTRY_USE_QUEUE used to determine if Celery was eager or not
if hasattr(settings, 'SENTRY_USE_QUEUE'):
warnings.warn('SENTRY_USE_QUEUE is deprecated. Please use CELERY_ALWAYS_EAGER instead. '
'See http://sentry.readthedocs.org/en/latest/queue/index.html for more information.', DeprecationWarning)
settings.CELERY_ALWAYS_EAGER = (not settings.SENTRY_USE_QUEUE)
if settings.SENTRY_URL_PREFIX in ('', 'http://sentry.example.com'):
# Maybe also point to a piece of documentation for more information?
# This directly coincides with users getting the awkward
# `ALLOWED_HOSTS` exception.
print('')
print('\033[91m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m')
print('\033[91m!! SENTRY_URL_PREFIX is not configured !!\033[0m')
print('\033[91m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m')
print('')
# Set `ALLOWED_HOSTS` to the catch-all so it works
settings.ALLOWED_HOSTS = ['*']
# Set ALLOWED_HOSTS if it's not already available
if not settings.ALLOWED_HOSTS:
from urlparse import urlparse
urlbits = urlparse(settings.SENTRY_URL_PREFIX)
if urlbits.hostname:
settings.ALLOWED_HOSTS = (urlbits.hostname,)
if not settings.SERVER_EMAIL and hasattr(settings, 'SENTRY_SERVER_EMAIL'):
warnings.warn('SENTRY_SERVER_EMAIL is deprecated. Please use SERVER_EMAIL instead.', DeprecationWarning)
settings.SERVER_EMAIL = settings.SENTRY_SERVER_EMAIL
def skip_migration_if_applied(settings, app_name, table_name,
name='0001_initial'):
from south.migration import Migrations
from sentry.utils.db import table_exists
import types
migration = Migrations(app_name)[name]
def skip_if_table_exists(original):
def wrapped(self):
# TODO: look into why we're having to return some ridiculous
# lambda
if table_exists(table_name):
return lambda x=None: None
return original()
wrapped.__name__ = original.__name__
return wrapped
migration.forwards = types.MethodType(
skip_if_table_exists(migration.forwards), migration)
def configure(config_path=None):
configure_app(
project='sentry',
config_path=config_path,
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
def main():
if USE_GEVENT:
print("Configuring Sentry with gevent bindings")
initialize_gevent()
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main()
| bsd-3-clause | 1,507,784,926,802,360 | 5,501,725,654,327,456,000 | 28.284653 | 127 | 0.646944 | false |
biswajitsahu/kuma | vendor/packages/translate/misc/dictutils.py | 24 | 5941 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implements a case-insensitive (on keys) dictionary and
order-sensitive dictionary"""
# Copyright 2002, 2003 St James Software
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
class cidict(dict):
def __init__(self, fromdict=None):
"""constructs the cidict, optionally using another dict to do so"""
if fromdict is not None:
self.update(fromdict)
def __getitem__(self, key):
if type(key) != str and type(key) != unicode:
raise TypeError("cidict can only have str or unicode as key (got %r)" %
type(key))
for akey in self.keys():
if akey.lower() == key.lower():
return dict.__getitem__(self, akey)
raise IndexError
def __setitem__(self, key, value):
if type(key) != str and type(key) != unicode:
raise TypeError("cidict can only have str or unicode as key (got %r)" %
type(key))
for akey in self.keys():
if akey.lower() == key.lower():
return dict.__setitem__(self, akey, value)
return dict.__setitem__(self, key, value)
def update(self, updatedict):
"""D.update(E) -> None.
Update D from E: for k in E.keys(): D[k] = E[k]"""
for key, value in updatedict.iteritems():
self[key] = value
def __delitem__(self, key):
if type(key) != str and type(key) != unicode:
raise TypeError("cidict can only have str or unicode as key (got %r)" %
type(key))
for akey in self.keys():
if akey.lower() == key.lower():
return dict.__delitem__(self, akey)
raise IndexError
def __contains__(self, key):
if type(key) != str and type(key) != unicode:
raise TypeError("cidict can only have str or unicode as key (got %r)" %
type(key))
for akey in self.keys():
if akey.lower() == key.lower():
return 1
return 0
def has_key(self, key):
return self.__contains__(key)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
class ordereddict(dict):
"""a dictionary which remembers its keys in the order in which they
were given"""
def __init__(self, *args):
if len(args) == 0:
super(ordereddict, self).__init__()
self.order = []
elif len(args) > 1:
raise TypeError("ordereddict() takes at most 1 argument (%d given)" %
len(args))
else:
initarg = args[0]
apply(super(ordereddict, self).__init__, args)
if hasattr(initarg, "keys"):
self.order = initarg.keys()
else:
# danger: could have duplicate keys...
self.order = []
checkduplicates = {}
for key, value in initarg:
if not key in checkduplicates:
self.order.append(key)
checkduplicates[key] = None
def __setitem__(self, key, value):
alreadypresent = key in self
result = dict.__setitem__(self, key, value)
if not alreadypresent:
self.order.append(key)
return result
def update(self, updatedict):
"""D.update(E) -> None.
Update D from E: for k in E.keys(): D[k] = E[k]"""
for key, value in updatedict.iteritems():
self[key] = value
def __delitem__(self, key):
alreadypresent = key in self
result = dict.__delitem__(self, key)
if alreadypresent:
del self.order[self.order.index(key)]
return result
def copy(self):
"""D.copy() -> a shallow copy of D"""
thecopy = ordereddict(super(ordereddict, self).copy())
thecopy.order = self.order[:]
return thecopy
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return [(key, self[key]) for key in self.order]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
for key in self.order:
yield (key, self[key])
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
for key in self.order:
yield key
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
for key in self.order:
yield self[key]
def keys(self):
"""D.keys() -> list of D's keys"""
return self.order[:]
def popitem(self):
"""D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty"""
if len(self.order) == 0:
raise KeyError("popitem(): ordered dictionary is empty")
k = self.order.pop()
v = self[k]
del self[k]
return (k, v)
def pop(self, key):
"""remove entry from dict and internal list"""
value = super(ordereddict, self).pop(key)
del self.order[self.order.index(key)]
return value
| mpl-2.0 | 1,772,330,614,409,591,800 | 2,977,687,046,658,033,700 | 33.143678 | 83 | 0.550749 | false |
wuhengzhi/chromium-crosswalk | tools/perf/core/trybot_command.py | 3 | 19574 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import logging
import platform
import re
import subprocess
import urllib2
import json
from core import path_util
from telemetry import benchmark
from telemetry.core import discover
from telemetry.util import command_line
from telemetry.util import matching
CHROMIUM_CONFIG_FILENAME = 'tools/run-perf-test.cfg'
BLINK_CONFIG_FILENAME = 'Tools/run-perf-test.cfg'
SUCCESS, NO_CHANGES, ERROR = range(3)
# Unsupported Perf bisect bots.
EXCLUDED_BOTS = {
'win_xp_perf_bisect', # Goma issues: crbug.com/330900
'win_perf_bisect_builder',
'win64_nv_tester',
'winx64_bisect_builder',
'linux_perf_bisect_builder',
'mac_perf_bisect_builder',
'android_perf_bisect_builder',
'android_arm64_perf_bisect_builder',
# Bisect FYI bots are not meant for testing actual perf regressions.
# Hardware configuration on these bots is different from actual bisect bot
# and these bots runs E2E integration tests for auto-bisect
# using dummy benchmarks.
'linux_fyi_perf_bisect',
'mac_fyi_perf_bisect',
'win_fyi_perf_bisect',
# CQ bots on tryserver.chromium.perf
'android_s5_perf_cq',
'winx64_10_perf_cq',
'mac_retina_perf_cq',
'linux_perf_cq',
}
INCLUDE_BOTS = [
'all',
'all-win',
'all-mac',
'all-linux',
'all-android'
]
# Default try bot to use incase builbot is unreachable.
DEFAULT_TRYBOTS = [
'linux_perf_bisect',
'mac_10_11_perf_bisect',
'winx64_10_perf_bisect',
'android_s5_perf_bisect',
]
assert not set(DEFAULT_TRYBOTS) & set(EXCLUDED_BOTS), ( 'A trybot cannot '
'present in both Default as well as Excluded bots lists.')
class TrybotError(Exception):
def __str__(self):
return '%s\nError running tryjob.' % self.args[0]
def _GetTrybotList(builders):
builders = ['%s' % bot.replace('_perf_bisect', '').replace('_', '-')
for bot in builders]
builders.extend(INCLUDE_BOTS)
return sorted(builders)
def _GetBotPlatformFromTrybotName(trybot_name):
os_names = ['linux', 'android', 'mac', 'win']
try:
return next(b for b in os_names if b in trybot_name)
except StopIteration:
raise TrybotError('Trybot "%s" unsupported for tryjobs.' % trybot_name)
def _GetBuilderNames(trybot_name, builders):
""" Return platform and its available bot name as dictionary."""
os_names = ['linux', 'android', 'mac', 'win']
if 'all' not in trybot_name:
bot = ['%s_perf_bisect' % trybot_name.replace('-', '_')]
bot_platform = _GetBotPlatformFromTrybotName(trybot_name)
if 'x64' in trybot_name:
bot_platform += '-x64'
return {bot_platform: bot}
platform_and_bots = {}
for os_name in os_names:
platform_and_bots[os_name] = [bot for bot in builders if os_name in bot]
# Special case for Windows x64, consider it as separate platform
# config config should contain target_arch=x64 and --browser=release_x64.
win_x64_bots = [
win_bot for win_bot in platform_and_bots['win']
if 'x64' in win_bot]
# Separate out non x64 bits win bots
platform_and_bots['win'] = list(
set(platform_and_bots['win']) - set(win_x64_bots))
platform_and_bots['win-x64'] = win_x64_bots
if 'all-win' in trybot_name:
return {'win': platform_and_bots['win'],
'win-x64': platform_and_bots['win-x64']}
if 'all-mac' in trybot_name:
return {'mac': platform_and_bots['mac']}
if 'all-android' in trybot_name:
return {'android': platform_and_bots['android']}
if 'all-linux' in trybot_name:
return {'linux': platform_and_bots['linux']}
return platform_and_bots
def _RunProcess(cmd):
logging.debug('Running process: "%s"', ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
returncode = proc.poll()
return (returncode, out, err)
_GIT_CMD = 'git'
if platform.system() == 'Windows':
# On windows, the git command is installed as 'git.bat'
_GIT_CMD = 'git.bat'
class Trybot(command_line.ArgParseCommand):
""" Run telemetry perf benchmark on trybot """
usage = 'botname benchmark_name [<benchmark run options>]'
_builders = None
def __init__(self):
self._builder_names = None
@classmethod
def _GetBuilderList(cls):
if not cls._builders:
try:
f = urllib2.urlopen(
('https://build.chromium.org/p/tryserver.chromium.perf/json/'
'builders'),
timeout=5)
# In case of any kind of exception, allow tryjobs to use default trybots.
# Possible exception are ssl.SSLError, urllib2.URLError,
# socket.timeout, socket.error.
except Exception:
# Incase of any exception return default trybots.
print ('WARNING: Unable to reach builbot to retrieve trybot '
'information, tryjob will use default trybots.')
cls._builders = DEFAULT_TRYBOTS
else:
builders = json.loads(f.read()).keys()
# Exclude unsupported bots like win xp and some dummy bots.
cls._builders = [bot for bot in builders if bot not in EXCLUDED_BOTS]
return cls._builders
def _InitializeBuilderNames(self, trybot):
self._builder_names = _GetBuilderNames(trybot, self._GetBuilderList())
@classmethod
def CreateParser(cls):
parser = argparse.ArgumentParser(
('Run telemetry benchmarks on trybot. You can add all the benchmark '
'options available except the --browser option'),
formatter_class=argparse.RawTextHelpFormatter)
return parser
@classmethod
def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
del environment # unused
for arg in extra_args:
if arg == '--browser' or arg.startswith('--browser='):
parser.error('--browser=... is not allowed when running trybot.')
all_benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfBenchmarksDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark.Benchmark).values()
all_benchmark_names = [b.Name() for b in all_benchmarks]
all_benchmarks_by_names = {b.Name(): b for b in all_benchmarks}
benchmark_class = all_benchmarks_by_names.get(options.benchmark_name, None)
if not benchmark_class:
possible_benchmark_names = matching.GetMostLikelyMatchedObject(
all_benchmark_names, options.benchmark_name)
parser.error(
'No benchmark named "%s". Do you mean any of those benchmarks '
'below?\n%s' %
(options.benchmark_name, '\n'.join(possible_benchmark_names)))
is_benchmark_disabled, reason = cls.IsBenchmarkDisabledOnTrybotPlatform(
benchmark_class, options.trybot)
also_run_disabled_option = '--also-run-disabled-tests'
if is_benchmark_disabled and also_run_disabled_option not in extra_args:
parser.error('%s To run the benchmark on trybot anyway, add '
'%s option.' % (reason, also_run_disabled_option))
@classmethod
def IsBenchmarkDisabledOnTrybotPlatform(cls, benchmark_class, trybot_name):
""" Return whether benchmark will be disabled on trybot platform.
Note that we cannot tell with certainty whether the benchmark will be
disabled on the trybot platform since the disable logic in ShouldDisable()
can be very dynamic and can only be verified on the trybot server platform.
We are biased on the side of enabling the benchmark, and attempt to
early discover whether the benchmark will be disabled as our best.
It should never be the case that the benchmark will be enabled on the test
platform but this method returns True.
Returns:
A tuple (is_benchmark_disabled, reason) whereas |is_benchmark_disabled| is
a boolean that tells whether we are sure that the benchmark will be
disabled, and |reason| is a string that shows the reason why we think the
benchmark is disabled for sure.
"""
benchmark_name = benchmark_class.Name()
benchmark_disabled_strings = set()
if hasattr(benchmark_class, '_disabled_strings'):
# pylint: disable=protected-access
benchmark_disabled_strings = benchmark_class._disabled_strings
# pylint: enable=protected-access
if 'all' in benchmark_disabled_strings:
return True, 'Benchmark %s is disabled on all platform.' % benchmark_name
if trybot_name == 'all':
return False, ''
trybot_platform = _GetBotPlatformFromTrybotName(trybot_name)
if trybot_platform in benchmark_disabled_strings:
return True, (
"Benchmark %s is disabled on %s, and trybot's platform is %s." %
(benchmark_name, ', '.join(benchmark_disabled_strings),
trybot_platform))
benchmark_enabled_strings = None
if hasattr(benchmark_class, '_enabled_strings'):
# pylint: disable=protected-access
benchmark_enabled_strings = benchmark_class._enabled_strings
# pylint: enable=protected-access
if (benchmark_enabled_strings and
trybot_platform not in benchmark_enabled_strings and
'all' not in benchmark_enabled_strings):
return True, (
"Benchmark %s is only enabled on %s, and trybot's platform is %s." %
(benchmark_name, ', '.join(benchmark_enabled_strings),
trybot_platform))
if benchmark_class.ShouldDisable != benchmark.Benchmark.ShouldDisable:
logging.warning(
'Benchmark %s has ShouldDisable() method defined. If your trybot run '
'does not produce any results, it is possible that the benchmark '
'is disabled on the target trybot platform.', benchmark_name)
return False, ''
@classmethod
def AddCommandLineArgs(cls, parser, environment):
del environment # unused
available_bots = _GetTrybotList(cls._GetBuilderList())
parser.add_argument(
'trybot', choices=available_bots,
help=('specify which bots to run telemetry benchmarks on. '
' Allowed values are:\n' + '\n'.join(available_bots)),
metavar='<trybot name>')
parser.add_argument(
'benchmark_name', type=str,
help=('specify which benchmark to run. To see all available benchmarks,'
' run `run_benchmark list`'),
metavar='<benchmark name>')
def Run(self, options, extra_args=None):
"""Sends a tryjob to a perf trybot.
This creates a branch, telemetry-tryjob, switches to that branch, edits
the bisect config, commits it, uploads the CL to rietveld, and runs a
tryjob on the given bot.
"""
if extra_args is None:
extra_args = []
self._InitializeBuilderNames(options.trybot)
arguments = [options.benchmark_name] + extra_args
# First check if there are chromium changes to upload.
status = self._AttemptTryjob(CHROMIUM_CONFIG_FILENAME, arguments)
if status not in [SUCCESS, ERROR]:
# If we got here, there are no chromium changes to upload. Try blink.
os.chdir('third_party/WebKit/')
status = self._AttemptTryjob(BLINK_CONFIG_FILENAME, arguments)
os.chdir('../..')
if status not in [SUCCESS, ERROR]:
logging.error('No local changes found in chromium or blink trees. '
'browser=%s argument sends local changes to the '
'perf trybot(s): %s.', options.trybot,
self._builder_names.values())
return 1
return 0
def _UpdateConfigAndRunTryjob(self, bot_platform, cfg_file_path, arguments):
"""Updates perf config file, uploads changes and excutes perf try job.
Args:
bot_platform: Name of the platform to be generated.
cfg_file_path: Perf config file path.
Returns:
(result, msg) where result is one of:
SUCCESS if a tryjob was sent
NO_CHANGES if there was nothing to try,
ERROR if a tryjob was attempted but an error encountered
and msg is an error message if an error was encountered, or rietveld
url if success, otherwise throws TrybotError exception.
"""
config = self._GetPerfConfig(bot_platform, arguments)
config_to_write = 'config = %s' % json.dumps(
config, sort_keys=True, indent=2, separators=(',', ': '))
try:
with open(cfg_file_path, 'r') as config_file:
if config_to_write == config_file.read():
return NO_CHANGES, ''
except IOError:
msg = 'Cannot find %s. Please run from src dir.' % cfg_file_path
return (ERROR, msg)
with open(cfg_file_path, 'w') as config_file:
config_file.write(config_to_write)
# Commit the config changes locally.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'commit', '-a', '-m', 'bisect config: %s' % bot_platform])
if returncode:
raise TrybotError('Could not commit bisect config change for %s,'
' error %s' % (bot_platform, err))
# Upload the CL to rietveld and run a try job.
returncode, out, err = _RunProcess([
_GIT_CMD, 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on %s' % bot_platform
])
if returncode:
raise TrybotError('Could not upload to rietveld for %s, error %s' %
(bot_platform, err))
match = re.search(r'https://codereview.chromium.org/[\d]+', out)
if not match:
raise TrybotError('Could not upload CL to rietveld for %s! Output %s' %
(bot_platform, out))
rietveld_url = match.group(0)
# Generate git try command for available bots.
git_try_command = [_GIT_CMD, 'cl', 'try', '-m', 'tryserver.chromium.perf']
for bot in self._builder_names[bot_platform]:
git_try_command.extend(['-b', bot])
returncode, out, err = _RunProcess(git_try_command)
if returncode:
raise TrybotError('Could not try CL for %s, error %s' %
(bot_platform, err))
return (SUCCESS, rietveld_url)
def _GetPerfConfig(self, bot_platform, arguments):
"""Generates the perf config for try job.
Args:
bot_platform: Name of the platform to be generated.
Returns:
A dictionary with perf config parameters.
"""
# To make sure that we don't mutate the original args
arguments = arguments[:]
# Always set verbose logging for later debugging
if '-v' not in arguments and '--verbose' not in arguments:
arguments.append('--verbose')
# Generate the command line for the perf trybots
target_arch = 'ia32'
if any(arg == '--chrome-root' or arg.startswith('--chrome-root=') for arg
in arguments):
raise ValueError(
'Trybot does not suport --chrome-root option set directly '
'through command line since it may contain references to your local '
'directory')
if bot_platform in ['win', 'win-x64']:
arguments.insert(0, 'python tools\\perf\\run_benchmark')
else:
arguments.insert(0, './tools/perf/run_benchmark')
if bot_platform == 'android':
arguments.insert(1, '--browser=android-chromium')
elif any('x64' in bot for bot in self._builder_names[bot_platform]):
arguments.insert(1, '--browser=release_x64')
target_arch = 'x64'
else:
arguments.insert(1, '--browser=release')
command = ' '.join(arguments)
return {
'command': command,
'repeat_count': '1',
'max_time_minutes': '120',
'truncate_percent': '0',
'target_arch': target_arch,
}
def _AttemptTryjob(self, cfg_file_path, arguments):
"""Attempts to run a tryjob from the current directory.
This is run once for chromium, and if it returns NO_CHANGES, once for blink.
Args:
cfg_file_path: Path to the config file for the try job.
Returns:
Returns SUCCESS if a tryjob was sent, NO_CHANGES if there was nothing to
try, ERROR if a tryjob was attempted but an error encountered.
"""
source_repo = 'chromium'
if cfg_file_path == BLINK_CONFIG_FILENAME:
source_repo = 'blink'
# TODO(prasadv): This method is quite long, we should consider refactor
# this by extracting to helper methods.
returncode, original_branchname, err = _RunProcess(
[_GIT_CMD, 'rev-parse', '--abbrev-ref', 'HEAD'])
if returncode:
msg = 'Must be in a git repository to send changes to trybots.'
if err:
msg += '\nGit error: %s' % err
logging.error(msg)
return ERROR
original_branchname = original_branchname.strip()
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index
_RunProcess([_GIT_CMD, 'update-index', '--refresh', '-q'])
returncode, out, err = _RunProcess([_GIT_CMD, 'diff-index', 'HEAD'])
if out:
logging.error(
'Cannot send a try job with a dirty tree. Commit locally first.')
return ERROR
# Make sure the tree does have local commits.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'log', 'origin/master..HEAD'])
if not out:
return NO_CHANGES
# Create/check out the telemetry-tryjob branch, and edit the configs
# for the tryjob there.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'checkout', '-b', 'telemetry-tryjob'])
if returncode:
logging.error('Error creating branch telemetry-tryjob. '
'Please delete it if it exists.\n%s', err)
return ERROR
try:
returncode, out, err = _RunProcess(
[_GIT_CMD, 'branch', '--set-upstream-to', 'origin/master'])
if returncode:
logging.error('Error in git branch --set-upstream-to: %s', err)
return ERROR
for bot_platform in self._builder_names:
if not self._builder_names[bot_platform]:
logging.warning('No builder is found for %s', bot_platform)
continue
try:
results, output = self._UpdateConfigAndRunTryjob(
bot_platform, cfg_file_path, arguments)
if results == ERROR:
logging.error(output)
return ERROR
elif results == NO_CHANGES:
print ('Skip the try job run on %s because it has been tried in '
'previous try job run. ' % bot_platform)
else:
print ('Uploaded %s try job to rietveld for %s platform. '
'View progress at %s' % (source_repo, bot_platform, output))
except TrybotError, err:
print err
logging.error(err)
finally:
# Checkout original branch and delete telemetry-tryjob branch.
# TODO(prasadv): This finally block could be extracted out to be a
# separate function called _CleanupBranch.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'checkout', original_branchname])
if returncode:
logging.error('Could not check out %s. Please check it out and '
'manually delete the telemetry-tryjob branch. '
': %s', original_branchname, err)
return ERROR # pylint: disable=lost-exception
logging.info('Checked out original branch: %s', original_branchname)
returncode, out, err = _RunProcess(
[_GIT_CMD, 'branch', '-D', 'telemetry-tryjob'])
if returncode:
logging.error('Could not delete telemetry-tryjob branch. '
'Please delete it manually: %s', err)
return ERROR # pylint: disable=lost-exception
logging.info('Deleted temp branch: telemetry-tryjob')
return SUCCESS
| bsd-3-clause | 614,718,633,362,522,200 | 2,261,917,274,430,905,900 | 37.305284 | 80 | 0.648922 | false |
ml-lab/neon | neon/optimizers/__init__.py | 4 | 1067 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# import shortcuts
from neon.optimizers.gradient_descent import (GradientDescent, # noqa
GradientDescentPretrain,
GradientDescentMomentum,
GradientDescentMomentumWeightDecay)
from neon.optimizers.adadelta import AdaDelta # noqa
| apache-2.0 | -7,150,247,418,907,760,000 | 7,423,051,668,218,006,000 | 52.35 | 78 | 0.602624 | false |
jetskijoe/SickGear | tests/name_parser_tests.py | 1 | 28709 | from __future__ import print_function
import datetime
import os.path
import test_lib as test
import sys
import unittest
sys.path.insert(1, os.path.abspath('..'))
sys.path.insert(1, os.path.abspath('../lib'))
from sickbeard.name_parser import parser
import sickbeard
sickbeard.SYS_ENCODING = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group':
parser.ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': parser.ParseResult(None, 'Mr. Show Name', 1, [2, 3], 'My Ep Name'),
'Show.Name.S01.E02.E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name-0 2010', 1, [2], 'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name':
parser.ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': parser.ParseResult(None, 'Show Name', 6, [1], '30-'),
'Show-Name-S06E01-720p': parser.ParseResult(None, 'Show-Name', 6, [1], '720p'),
'Show-Name-S06E01-1080i': parser.ParseResult(None, 'Show-Name', 6, [1], '1080i'),
'Show.Name.S06E01.Other.WEB-DL': parser.ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL'),
'Show.Name.S06E01 Some-Stuff Here': parser.ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here'),
'Show.Name.S01E15-11001001': parser.ParseResult(None, 'Show Name', 1, [15], None),
'Show.Name.S01E02.Source.Quality.Etc-Group - [stuff]':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc', 'Group'),
'Show Name 1x02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': parser.ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'My Ep Name'),
'1x02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': parser.ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': parser.ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': parser.ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2], 'WEB-DL'),
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name':
parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL'),
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.1x02.1x03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL'),
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group':
parser.ParseResult(None, 'show name 2010', 1, [23], 'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group':
parser.ParseResult(None, 'show name 2010.222', 1, [23], 'source.quality.etc', 'group'),
'Show.Name.102': parser.ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-lol': parser.ParseResult(None, 'the event', 4, [1], 'hdtv', 'lol'),
# 'show.name.2010.special.hdtv-blah': None,
},
'stupid': {
'tpz-abc102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
'tpz-abc.102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
},
'no_season': {
'Show Name - 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': parser.ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL'),
'Show.Name.2015.04.19.Ep.Name.Part.2.PROPER.PDTV.x264-GROUP':
parser.ParseResult(None, 'Show Name', release_group='GROUP', extra_info='Ep.Name.Part.2.PROPER.PDTV.x264',
air_date=datetime.date(2015, 4, 19)),
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [3], 'Source.Quality.Etc', 'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': parser.ParseResult(None, 'Show Name', 1, [1, 2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc', 'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD':
parser.ParseResult(None, 'Deconstructed', None, [7], '1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [23, 24], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1, 2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23, 24], 'WEB-DL'),
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': parser.ParseResult(None, 'Show Name', 2),
'Season 02': parser.ParseResult(None, None, 2),
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010.11.23': parser.ParseResult(None, 'Show Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010-11-23 - Ep Name':
parser.ParseResult(None, 'Show Name', extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'2010-11-23 - Ep Name': parser.ParseResult(None, extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.11.23.WEB-DL':
parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010, 11, 23)),
},
'uk_date_format': {
'Show.Name.23.11.2010.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 23.11.2010': parser.ParseResult(None, 'Show Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.11.23.2010.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 23-11-2010 - Ep Name':
parser.ParseResult(None, 'Show Name', extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'23-11-2010 - Ep Name': parser.ParseResult(None, extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.23.11.2010.WEB-DL':
parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010, 11, 23)),
},
'anime_ultimate': {
'[Tsuki] Bleach - 301 [1280x720][61D1D4EE]':
parser.ParseResult(None, 'Bleach', None, [], '1280x720', 'Tsuki', None, [301]),
'[Tsuki] Fairy Tail - 70 [1280x720][C4807111]':
parser.ParseResult(None, 'Fairy Tail', None, [], '1280x720', 'Tsuki', None, [70]),
'[SGKK] Bleach 312v2 [720p MKV]':
parser.ParseResult(None, 'Bleach', None, [], '720p MKV', 'SGKK', None, [312]),
'[BSS-Anon] Tengen Toppa Gurren Lagann - 22-23 [1280x720][h264][6039D9AF]':
parser.ParseResult(None, 'Tengen Toppa Gurren Lagann', None, [], '1280x720', 'BSS-Anon', None, [22, 23]),
'[SJSUBS]_Naruto_Shippuden_-_02_[480p AAC]':
parser.ParseResult(None, 'Naruto Shippuden', None, [], '480p AAC', 'SJSUBS', None, [2]),
'[SFW-Chihiro] Dance in the Vampire Bund - 12 [1920x1080 Blu-ray FLAC][2F6DBC66].mkv':
parser.ParseResult(
None, 'Dance in the Vampire Bund', None, [], '1920x1080 Blu-ray FLAC', 'SFW-Chihiro', None, [12]),
'[SHiN-gx] Hanasaku Iroha - 01 [1280x720 h.264 AAC][BDC36683]':
parser.ParseResult(None, 'Hanasaku Iroha', None, [], '1280x720 h.264 AAC', 'SHiN-gx', None, [1]),
'[SFW-Chihiro] Dance in the Vampire Bund - 02 [1920x1080 Blu-ray FLAC][C1FA0A09]':
parser.ParseResult(
None, 'Dance in the Vampire Bund', None, [], '1920x1080 Blu-ray FLAC', 'SFW-Chihiro', None, [2]),
'[HorribleSubs] No. 6 - 11 [720p]':
parser.ParseResult(None, 'No. 6', None, [], '720p', 'HorribleSubs', None, [11]),
'[HorribleSubs] D Gray-Man - 312 (480p) [F501C9BE]':
parser.ParseResult(None, 'D Gray-Man', None, [], '480p', 'HorribleSubs', None, [312]),
'[SGKK] Tengen Toppa Gurren Lagann - 45-46 (720p h264) [F501C9BE]':
parser.ParseResult(None, 'Tengen Toppa Gurren Lagann', None, [], '720p h264', 'SGKK', None, [45, 46]),
'[Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '1280x720_H.264_AAC', 'Stratos-Subs', None, [12]),
'[ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)':
parser.ParseResult(None, 'Bleach', None, [], 'CX 1280x720 x264 AAC', 'ShinBunBu-Subs', None, [2, 3]),
'[Doki] Hanasaku Iroha - 03 (848x480 h264 AAC) [CB1AA73B]':
parser.ParseResult(None, 'Hanasaku Iroha', None, [], '848x480 h264 AAC', 'Doki', None, [3]),
'[UTW]_Fractal_-_01_[h264-720p][96D3F1BF]':
parser.ParseResult(None, 'Fractal', None, [], 'h264-720p', 'UTW', None, [1]),
'[a-s]_inuyasha_-_028_rs2_[BFDDF9F2]':
parser.ParseResult(None, 'inuyasha', None, [], 'BFDDF9F2', 'a-s', None, [28]),
'[HorribleSubs] Fairy Tail S2 - 37 [1080p]':
parser.ParseResult(None, 'Fairy Tail S2', None, [], '1080p', 'HorribleSubs', None, [37]),
'[HorribleSubs] Sword Art Online II - 23 [720p]':
parser.ParseResult(None, 'Sword Art Online II', None, [], '720p', 'HorribleSubs', None, [23]),
},
'anime_standard': {
'[Cthuko] Shirobako - 05v2 [720p H264 AAC][80C9B09B]':
parser.ParseResult(None, 'Shirobako', None, [], '720p H264 AAC', 'Cthuko', None, [5]),
'[Ayako]_Minami-ke_Okaeri_-_01v2_[1024x576 H264+AAC][B1912CD8]':
parser.ParseResult(None, 'Minami-ke Okaeri', None, [], '1024x576 H264+AAC', 'Ayako', None, [1]),
'Show.Name.123-11001001': parser.ParseResult(None, 'Show Name', None, [], None, None, None, [123]),
},
'anime_ep_name': {
'[TzaTziki]_One_Piece_279_Chopper_Man_1_[720p][8AE5F25D]':
parser.ParseResult(None, 'One Piece', None, [], '720p', 'TzaTziki', None, [279]),
"[ACX]Wolf's_Rain_-_04_-_Scars_in_the_Wasteland_[octavarium]_[82B7E357]":
parser.ParseResult(None, "Wolf's Rain", None, [], 'octavarium', 'ACX', None, [4]),
'[ACX]Black Lagoon - 02v2 - Mangrove Heaven [SaintDeath] [7481F875]':
parser.ParseResult(None, 'Black Lagoon', None, [], 'SaintDeath', 'ACX', None, [2]),
},
'anime_standard_round': {
'[SGKK] Bleach - 312v2 (1280x720 h264 AAC) [F501C9BE]':
parser.ParseResult(None, 'Bleach', None, [], '1280x720 h264 AAC', 'SGKK', None, [312]),
},
'anime_slash': {
'[SGKK] Bleach 312v1 [720p/MKV]': parser.ParseResult(None, 'Bleach', None, [], '720p', 'SGKK', None, [312]),
'[SGKK] Bleach 312 [480p/MKV]': parser.ParseResult(None, 'Bleach', None, [], '480p', 'SGKK', None, [312])
},
'anime_standard_codec': {
'[Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '720p', 'Ayako', None, [7]),
'[Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '720p', 'Ayako', None, [7]),
'[Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]':
parser.ParseResult(None, 'Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne', None, [],
'720p', 'Ayako-Shikkaku', None, [10]),
'[Tsuki] Fairy Tail - 72 [XviD][C4807111]':
parser.ParseResult(None, 'Fairy Tail', None, [], 'C4807111', 'Tsuki', None, [72]),
'Bubblegum Crisis Tokyo 2040 - 25 [aX] [F4E2E558]':
parser.ParseResult(None, 'Bubblegum Crisis Tokyo 2040', None, [], 'aX', None, None, [25]),
},
'anime_and_normal': {
'Bleach - s02e03 - 012 - Name & Name': parser.ParseResult(None, 'Bleach', 2, [3], None, None, None, [12]),
'Bleach - s02e03e04 - 012-013 - Name & Name':
parser.ParseResult(None, 'Bleach', 2, [3, 4], None, None, None, [12, 13]),
'Bleach - s16e03-04 - 313-314': parser.ParseResult(None, 'Bleach', 16, [3, 4], None, None, None, [313, 314]),
'Blue Submarine No. 6 s16e03e04 313-314':
parser.ParseResult(None, 'Blue Submarine No. 6', 16, [3, 4], None, None, None, [313, 314]),
'Bleach.s16e03-04.313-314': parser.ParseResult(None, 'Bleach', 16, [3, 4], None, None, None, [313, 314]),
'.hack roots s01e01 001.mkv': parser.ParseResult(None, 'hack roots', 1, [1], None, None, None, [1]),
'.hack sign s01e01 001.mkv': parser.ParseResult(None, 'hack sign', 1, [1], None, None, None, [1])
},
'anime_and_normal_reverse': {
'Bleach - 012 - s02e03 - Name & Name': parser.ParseResult(None, 'Bleach', 2, [3], None, None, None, [12]),
'Blue Submarine No. 6 - 012-013 - s02e03e04 - Name & Name':
parser.ParseResult(None, 'Blue Submarine No. 6', 2, [3, 4], None, None, None, [12, 13]),
'07-GHOST - 012-013 - s02e03e04 - Name & Name':
parser.ParseResult(None, '07-GHOST', 2, [3, 4], None, None, None, [12, 13]),
'3x3 Eyes - 012-013 - s02e03-04 - Name & Name':
parser.ParseResult(None, '3x3 Eyes', 2, [3, 4], None, None, None, [12, 13]),
},
'anime_and_normal_front': {
'165.Naruto Shippuuden.s08e014':
parser.ParseResult(None, 'Naruto Shippuuden', 8, [14], None, None, None, [165]),
'165-166.Naruto Shippuuden.s08e014e015':
parser.ParseResult(None, 'Naruto Shippuuden', 8, [14, 15], None, None, None, [165, 166]),
'165-166.07-GHOST.s08e014-015': parser.ParseResult(None, '07-GHOST', 8, [14, 15], None, None, None, [165, 166]),
'165-166.3x3 Eyes.S08E014E015': parser.ParseResult(None, '3x3 Eyes', 8, [14, 15], None, None, None, [165, 166]),
},
'anime_bare': {
'One Piece 102': parser.ParseResult(None, 'One Piece', None, [], None, None, None, [102]),
'bleach - 010': parser.ParseResult(None, 'bleach', None, [], None, None, None, [10]),
'Naruto Shippuden - 314v2': parser.ParseResult(None, 'Naruto Shippuden', None, [], None, None, None, [314]),
'Blue Submarine No. 6 104-105':
parser.ParseResult(None, 'Blue Submarine No. 6', None, [], None, None, None, [104, 105]),
'Samurai X: Trust & Betrayal (OVA) 001-002':
parser.ParseResult(None, 'Samurai X: Trust & Betrayal (OVA)', None, [], None, None, None, [1, 2]),
"[ACX]_Wolf's_Spirit_001.mkv": parser.ParseResult(None, "Wolf's Spirit", None, [], None, 'ACX', None, [1])
}
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3], 'Ep Name'),
['no_season', 'season_only']),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
parser.ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group'),
['stupid', 'season_only']),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
parser.ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF'),
['standard']),
('/home/drop/storage/TV/Terminator The Sarah Connor Chronicles' +
'/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
parser.ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short'),
['standard']),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
parser.ParseResult(None, 'Jimmy Fallon', extra_info='blah', air_date=datetime.date(2010, 12, 15)),
['scene_date_format']),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
parser.ParseResult(None, '30 Rock', 4, [22]),
['fov']),
('Season 2\\Show Name - 03-04 - Ep Name.ext',
parser.ParseResult(None, 'Show Name', 2, [3, 4], extra_info='Ep Name'),
['no_season', 'season_only']),
('Season 02\\03-04-05 - Ep Name.ext',
parser.ParseResult(None, None, 2, [3, 4, 5], extra_info='Ep Name'),
['no_season', 'season_only']),
]
unicode_test_cases = [
(u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(
u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON',
version=-1)
),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(
u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON',
version=-1)
),
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(test.SickbeardTestDBCase):
def _test_unicode(self, name, result):
result.which_regex = ['fov']
parse_result = parser.NameParser(True, testing=True).parse(name)
self.assertEqual(parse_result, result)
# this shouldn't raise an exception
void = repr(str(parse_result))
void += ''
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(test.SickbeardTestDBCase):
@staticmethod
def _test_name(name):
np = parser.NameParser(True)
try:
parse_result = np.parse(name)
except (parser.InvalidNameException, parser.InvalidShowException):
return True
if VERBOSE:
print('Actual: ', parse_result.which_regex, parse_result)
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(test.SickbeardTestDBCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print()
print('Testing', name)
np = parser.NameParser(True)
try:
test_result = np.parse(name)
except parser.InvalidShowException:
return False
if DEBUG:
print(test_result, test_result.which_regex)
print(result, which_regexes)
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(os.path.normpath(name), result, which_regexes)
class BasicTests(test.SickbeardTestDBCase):
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print('Running', section, 'tests')
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print('Testing', cur_test)
result = simple_test_cases[section][cur_test_base]
if not result:
self.assertRaises(parser.InvalidNameException, np.parse, cur_test)
return
else:
test_result = np.parse(cur_test)
try:
# self.assertEqual(test_result.which_regex, [section])
self.assertEqual(test_result, result)
except:
print('air_by_date:', test_result.is_air_by_date, 'air_date:', test_result.air_date)
print('anime:', test_result.is_anime, 'ab_episode_numbers:', test_result.ab_episode_numbers)
print(test_result)
print(result)
raise
def test_standard_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'fov_repeat')
def test_bare_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'bare')
def test_stupid_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'stupid')
def test_no_season_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'season_only')
def test_scene_date_format_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'scene_date_format')
def test_uk_date_format_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'uk_date_format')
def test_standard_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
def test_bare_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'bare', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'stupid', lambda x: x + '.avi')
def test_no_season_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'season_only', lambda x: x + '.avi')
def test_scene_date_format_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'scene_date_format', lambda x: x + '.avi')
def test_combination_names(self):
pass
def test_anime_ultimate(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_ultimate')
def test_anime_standard(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_standard')
def test_anime_ep_name(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_ep_name')
def test_anime_slash(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_slash')
def test_anime_codec(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_standard_codec')
def test_anime_and_normal(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal')
def test_anime_and_normal_reverse(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal_reverse')
def test_anime_and_normal_front(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal_front')
def test_anime_bare(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_bare')
class TVShow(object):
def __init__(self, is_anime=False):
self.is_anime = is_anime
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('name_parser_tests.BasicTests.test_' + sys.argv[1])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(BasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(ComboTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(FailureCaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | -7,490,338,629,385,833,000 | 2,978,720,282,582,932,000 | 47.991468 | 120 | 0.600718 | false |
fbsder/openthread | tests/scripts/thread-cert/Cert_5_2_07_REEDSynchronization.py | 2 | 4471 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import ipv6
import node
import mle
import config
import command
LEADER = 1
DUT_ROUTER1 = 2
DUT_REED = 17
class Cert_5_2_7_REEDSynchronization(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1, 18):
self.nodes[i] = node.Node(i)
self.nodes[i].set_panid(0xface)
self.nodes[i].set_mode('rsdn')
self.nodes[i].set_router_selection_jitter(1)
self.sniffer = config.create_default_thread_sniffer()
self.sniffer.start()
def tearDown(self):
self.sniffer.stop()
del self.sniffer
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
# 1. Ensure topology is formed correctly without DUT_ROUTER1.
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
for i in range(2, 17):
self.nodes[i].start()
time.sleep(5)
for i in range(2, 17):
self.assertEqual(self.nodes[i].get_state(), 'router')
# 2. DUT_REED: Attach to network. Verify it didn't send an Address Solicit Request.
# Avoid DUT_REED attach to DUT_ROUTER1.
self.nodes[DUT_REED].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64(), config.RSSI['LINK_QULITY_1'])
self.nodes[DUT_REED].start()
time.sleep(5)
self.assertEqual(self.nodes[DUT_REED].get_state(), 'child')
# The DUT_REED must not send a coap message here.
reed_messages = self.sniffer.get_messages_sent_by(DUT_REED)
msg = reed_messages.does_not_contain_coap_message()
assert msg is True, "Error: The DUT_REED sent an Address Solicit Request"
# 3. DUT_REED: Verify sent a Link Request to at least 3 neighboring Routers.
for i in range(0, 3):
msg = reed_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
command.check_link_request(msg)
# 4. DUT_ROUTER1: Verify sent a Link Accept to DUT_REED.
time.sleep(30)
dut_messages = self.sniffer.get_messages_sent_by(DUT_ROUTER1)
flag_link_accept = False
while True:
msg = dut_messages.next_mle_message(mle.CommandType.LINK_ACCEPT, False)
if msg == None :
break
destination_link_local = self.nodes[DUT_REED].get_ip6_address(config.ADDRESS_TYPE.LINK_LOCAL)
if ipv6.ip_address(destination_link_local) == msg.ipv6_packet.ipv6_header.destination_address:
flag_link_accept = True
break
assert flag_link_accept is True, "Error: DUT_ROUTER1 didn't send a Link Accept to DUT_REED"
command.check_link_accept(msg, self.nodes[DUT_REED])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,490,963,509,928,216,600 | 7,431,917,430,433,434,000 | 38.566372 | 110 | 0.671438 | false |
ganga-devs/ganga | ganga/GangaDirac/Lib/Server/DiracCommands.py | 1 | 18300 | # Dirac commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobGroupJobs(jg):
''' Return jobs in a group'''
return dirac.selectJobs(jobGroup=jg)
@diracCommand
def kill(id):
''' Kill a given DIRAC Job ID within DIRAC '''
return dirac.deleteJob(id)
@diracCommand
def peek(id):
''' Peek at the DIRAC Job id and return what we saw '''
return dirac.peekJob(id)
@diracCommand
def getJobCPUTime(id):
''' Get the amount of CPU time taken by the DIRAC Job id'''
return dirac.getJobCPUTime(id)
@diracCommand
def reschedule(id):
''' Reschedule within DIRAC a given DIRAC Job id'''
return dirac.reschedule(id)
@diracCommand
def submit(djob, mode='wms'):
''' Submit a DIRAC job given by the jdl:djob with a given mode '''
return dirac.submitJob(djob, mode=mode)
@diracCommand
def ping(system, service):
''' Ping a given service on a given system running DIRAC '''
return dirac.ping(system, service)
@diracCommand
def removeFile(lfn):
''' Remove a given LFN from the DFC'''
ret = {}
if type(lfn) is list:
for l in lfn:
ret.update(dirac.removeFile(l))
else:
ret.update(dirac.removeFile(lfn))
return ret
@diracCommand
def getMetadata(lfn):
''' Return the metadata associated with a given :DN'''
return dirac.getLfnMetadata(lfn)
@diracCommand
def getReplicas(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location '''
return dirac.getReplicas(lfns, active=True, preferDisk = True)
@diracCommand
def getReplicasForJobs(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location.
This is for use in the splitter to negate copies at SEs that are not to be used for user jobs '''
return dirac.getReplicasForJobs(lfns)
@diracCommand
def getAccessURL(lfn, SE, protocol=False):
''' Return the access URL for the given LFN, storage element and protocol. The protocol should be in the form of a list '''
return dirac.getAccessURL(lfn, SE, False, protocol)
@diracCommand
def getFile(lfns, destDir=''):
''' Put the physical file behind the LFN in the destDir path'''
return dirac.getFile(lfns, destDir=destDir)
@diracCommand
def replicateFile(lfn, destSE, srcSE='', locCache=''):
''' Replicate a given LFN from a srcSE to a destSE'''
res = dirac.replicateFile(lfn, destSE, srcSE, locCache)
return res
@diracCommand
def removeReplica(lfn, sE):
''' Remove the physical files and LFN from the DFC'''
return dirac.removeReplica(lfn, sE)
@diracCommand
def getOutputData(id, outputFiles='', destinationDir=''):
''' Return output data of a requeted DIRAC Job id, place outputFiles in a given destinationDir') '''
return dirac.getJobOutputData(id, outputFiles, destinationDir)
@diracCommand
def splitInputData(files, files_per_job):
''' Split list of files ito a list of list of smaller files (below files_per_job in length) and return the list of lists'''
return dirac.splitInputData(files, files_per_job)
@diracCommand
def getInputDataCatalog(lfns, site, xml_file):
''' Get the XML describing the given LFNs at a given site'''
return dirac.getInputDataCatalog(lfns, site, xml_file)
@diracCommand
def uploadFile(lfn, file, diracSEs, guid=None):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
outerr = {}
for se in diracSEs:
result = dirac.addFile(lfn, file, se, guid)
if result.get('OK', False) and lfn in result.get('Value', {'Successful': {}})['Successful']:
result['Value']['Successful'][lfn].update({'DiracSE': se})
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
guid = md['Value']['Successful'][lfn]['GUID']
result['Value']['Successful'][lfn].update({'GUID': guid})
return result
outerr.update({se: result})
return outerr
@diracCommand
def addFile(lfn, file, diracSE, guid):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
return dirac.addFile(lfn, file, diracSE, guid)
@diracCommand
def getOutputSandbox(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, pipe_out=True):
'''
Get the outputsandbox and return the output from Dirac to the calling function
id: the DIRAC jobid of interest
outputDir: output directory locall on disk to use
oversized: is this output sandbox oversized this will be modified
noJobDir: should we create a folder with the DIRAC job ID?
output: should I output the Dirac output or should I return a python object (False)
unpack: should the sandbox be untarred when downloaded'''
result = dirac.getOutputSandbox(id, outputDir, oversized, noJobDir, unpack)
if result is not None and result.get('OK', False):
if not noJobDir:
tmpdir = os.path.join(outputDir, str(id))
os.system('mv -f %s/* %s/. ; rm -rf %s' % (tmpdir, outputDir, tmpdir))
os.system('for file in $(ls %s/*Ganga_*.log); do ln -s ${file} %s/stdout; break; done' % (outputDir, outputDir))
#So the download failed. Maybe the sandbox was oversized and stored on the grid. Check in the job parameters and download it
else:
parameters = dirac.getJobParameters(id)
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'OutputSandboxLFN' in parameters:
result = dirac.getFile(parameters['OutputSandboxLFN'], destDir=outputDir)
dirac.removeFile(parameters['OutputSandboxLFN'])
return result
@diracCommand
def getOutputDataInfo(id, pipe_out=True):
''' Get information on the output data generated by a job of ID and pipe it out or return it'''
ret = {}
result = getOutputDataLFNs(id, pipe_out=False)
if result.get('OK', False) and 'Value' in result:
for lfn in result.get('Value', []):
file_name = os.path.basename(lfn)
ret[file_name] = {}
ret[file_name]['LFN'] = lfn
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['GUID'] = md['Value']['Successful'][lfn]['GUID']
# this catches if fail upload, note lfn still exists in list as
# dirac tried it
elif md.get('OK', False) and lfn in md.get('Value', {'Failed': {}})['Failed']:
ret[file_name]['LFN'] = '###FAILED###'
ret[file_name]['LOCATIONS'] = md['Value']['Failed'][lfn]
ret[file_name]['GUID'] = 'NotAvailable'
continue
rp = dirac.getReplicas(lfn)
if rp.get('OK', False) and lfn in rp.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['LOCATIONS'] = rp['Value']['Successful'][lfn].keys()
return ret
# could shrink this with dirac.getJobOutputLFNs from ##dirac
@diracCommand
def getOutputDataLFNs(id, pipe_out=True):
''' Get the outputDataLFN which have been generated by a Dirac job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
lfns = []
ok = False
message = 'The outputdata LFNs could not be found.'
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
# remove the sandbox if it has been uploaded
sandbox = None
if 'OutputSandboxLFN' in parameters:
sandbox = parameters['OutputSandboxLFN']
# now find out about the outputdata
if 'UploadedOutputData' in parameters:
lfn_list = parameters['UploadedOutputData']
import re
lfns = re.split(',\s*', lfn_list)
if sandbox is not None and sandbox in lfns:
lfns.remove(sandbox)
ok = True
elif parameters is not None and 'Message' in parameters:
message = parameters['Message']
result = {'OK': ok}
if ok:
result['Value'] = lfns
else:
result['Message'] = message
return result
@diracCommand
def normCPUTime(id, pipe_out=True):
''' Get the normalied CPU time that has been used by a DIRAC job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
ncput = None
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'NormCPUTime(s)' in parameters:
ncput = parameters['NormCPUTime(s)']
return ncput
@diracCommand
def finished_job(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, downloadSandbox = True):
''' Nesting function to reduce number of calls made against DIRAC when finalising a job, takes arguments such as getOutputSandbox
Returns the CPU time of the job as a dict, the output sandbox information in another dict and a dict of the LFN of any uploaded data'''
out_cpuTime = normCPUTime(id, pipe_out=False)
if downloadSandbox:
out_sandbox = getOutputSandbox(id, outputDir, unpack, oversized, noJobDir, pipe_out=False)
else:
out_sandbox = None
out_dataInfo = getOutputDataInfo(id, pipe_out=False)
outStateTime = {'completed' : getStateTime(id, 'completed', pipe_out=False)}
return (out_cpuTime, out_sandbox, out_dataInfo, outStateTime)
@diracCommand
def finaliseJobs(inputDict, statusmapping, downloadSandbox=True, oversized=True, noJobDir=True):
''' A function to get the necessaries to finalise a whole bunch of jobs. Returns a dict of job information and a dict of stati.'''
returnDict = {}
statusList = dirac.getJobStatus(list(inputDict))
for diracID in inputDict:
returnDict[diracID] = {}
returnDict[diracID]['cpuTime'] = normCPUTime(diracID, pipe_out=False)
if downloadSandbox:
returnDict[diracID]['outSandbox'] = getOutputSandbox(diracID, inputDict[diracID], oversized, noJobDir, pipe_out=False)
else:
returnDict[diracID]['outSandbox'] = None
returnDict[diracID]['outDataInfo'] = getOutputDataInfo(diracID, pipe_out=False)
returnDict[diracID]['outStateTime'] = {'completed' : getStateTime(diracID, 'completed', pipe_out=False)}
return returnDict, statusList
@diracCommand
def status(job_ids, statusmapping, pipe_out=True):
'''Function to check the statuses and return the Ganga status of a job after looking it's DIRAC status against a Ganga one'''
# Translate between the many statuses in DIRAC and the few in Ganga
#return {'OK':True, 'Value':[['WIP', 'WIP', 'WIP', 'WIP', 'WIP']]}
result = dirac.getJobStatus(job_ids)
if not result['OK']:
return result
status_list = []
bulk_status = result['Value']
for _id in job_ids:
job_status = bulk_status.get(_id, {})
minor_status = job_status.get('MinorStatus', None)
dirac_status = job_status.get('Status', None)
dirac_site = job_status.get('Site', None)
ganga_status = statusmapping.get(dirac_status, None)
if ganga_status is None:
ganga_status = 'failed'
dirac_status = 'Unknown: No status for Job'
#if dirac_status == 'Completed' and (minor_status not in ['Pending Requests']):
# ganga_status = 'running'
if minor_status in ['Uploading Output Data']:
ganga_status = 'running'
try:
from DIRAC.Core.DISET.RPCClient import RPCClient
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
app_status = monitoring.getJobAttributes(_id)['Value']['ApplicationStatus']
except:
app_status = "unknown ApplicationStatus"
status_list.append([minor_status, dirac_status, dirac_site, ganga_status, app_status])
return status_list
@diracCommand
def getStateTime(id, status, pipe_out=True):
''' Return the state time from DIRAC corresponding to DIRACJob tranasitions'''
log = dirac.getJobLoggingInfo(id)
if 'Value' not in log:
return None
L = log['Value']
checkstr = ''
if status == 'running':
checkstr = 'Running'
elif status == 'completed':
checkstr = 'Done'
elif status == 'completing':
checkstr = 'Completed'
elif status == 'failed':
checkstr = 'Failed'
else:
checkstr = ''
if checkstr == '':
print("%s" % None)
return
for l in L:
if checkstr in l[0]:
T = datetime.datetime(*(time.strptime(l[3], "%Y-%m-%d %H:%M:%S")[0:6]))
return T
return None
@diracCommand
def getBulkStateTime(job_ids, status, pipe_out=True):
''' Function to repeatedly call getStateTime for multiple Dirac Job id and return the result in a dictionary '''
result = {}
for this_id in job_ids:
result[this_id] = getStateTime(this_id, status, pipe_out=False)
return result
@diracCommand
def monitorJobs(job_ids, status_mapping, pipe_out=True):
''' This combines 'status' and 'getBulkStateTime' into 1 function call for monitoring
'''
status_info = status(job_ids, status_mapping, pipe_out=False)
state_job_status = {}
for job_id, this_stat_info in zip(job_ids, status_info):
if this_stat_info:
update_status = this_stat_info[3]
if update_status not in state_job_status:
state_job_status[update_status] = []
state_job_status[update_status].append(job_id)
state_info = {}
for this_status, these_jobs in state_job_status.items():
state_info[this_status] = getBulkStateTime(these_jobs, this_status, pipe_out=False)
return (status_info, state_info)
@diracCommand
def timedetails(id):
''' Function to return the getJobLoggingInfo for a DIRAC Job of id'''
log = dirac.getJobLoggingInfo(id)
d = {}
for i in range(0, len(log['Value'])):
d[i] = log['Value'][i]
return d
# DiracAdmin commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobPilotOutput(id, dir):
''' Get the output of the DIRAC pilot that this job was running on and place it in dir'''
pwd = os.getcwd()
try:
os.chdir(dir)
os.system('rm -f pilot_%d/std.out && rmdir pilot_%d ' % (id, id))
result = DiracAdmin().getJobPilotOutput(id)
finally:
os.chdir(pwd)
return result
@diracCommand
def getServicePorts():
''' Get the service ports from the DiracAdmin based upon the Dirac config'''
return DiracAdmin().getServicePorts()
@diracCommand
def isSEArchive(se):
''' Ask if the specified SE is for archive '''
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
return DMSHelpers().isSEArchive(se)
@diracCommand
def getSitesForSE(se):
''' Get the Sites associated with this SE'''
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
result = getSitesForSE(storageElement=se)
return result
@diracCommand
def getSEsForSite(site):
''' Get the list of SE associated with this site'''
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
result = getSEsForSite(site)
return result
@diracCommand
def getSESiteMapping():
'''Get the mapping of SEs and sites'''
from DIRAC.Core.Utilities.SiteSEMapping import getSESiteMapping
result = getSESiteMapping()
return result
@diracCommand
def checkSEStatus(se, access = 'Write'):
''' returns the value of a certain SE status flag (access or other)
param se: Storage Element name
type se: string
param access: type of access
type access: string in ('Read', 'Write', 'Remove', 'Check')
returns: True or False
'''
result = dirac.checkSEAccess(se, access)
return result
@diracCommand
def listFiles(baseDir, minAge = None):
''' Return a list of LFNs for files stored on the grid in the argument
directory and its subdirectories
param baseDir: Top directory to begin search
type baseDir: string
param minAge: minimum age of files to be returned
type minAge: string format: "W:D:H"
'''
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
from datetime import datetime, timedelta
withMetaData = False
cutoffTime = datetime.utcnow()
import re
r = re.compile('\d:\d:\d')
if r.match(minAge):
withMetaData = True
timeList = minAge.split(':')
timeLimit = timedelta(weeks = int(timeList[0]), days = int(timeList[1]), hours = int(timeList[2]))
cutoffTime = datetime.utcnow() - timeLimit
baseDir = baseDir.rstrip('/')
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len(activeDirs) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory(currentDir, withMetaData, timeout = 360)
if not res['OK']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] )
elif currentDir in res['Value']['Failed']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] )
else:
dirContents = res['Value']['Successful'][currentDir]
subdirs = dirContents['SubDirs']
files = dirContents['Files']
if not subdirs and not files:
emptyDirs.append( currentDir )
else:
for subdir in sorted( subdirs, reverse=True):
if (not withMetaData) or subdirs[subdir]['CreationDate'] < cutoffTime:
activeDirs.append(subdir)
for filename in sorted(files):
fileOK = False
if (not withMetaData) or files[filename]['MetaData']['CreationDate'] < cutoffTime:
fileOK = True
if not fileOK:
files.pop(filename)
allFiles += sorted(files)
return allFiles
| gpl-2.0 | -3,379,689,280,473,649,000 | -6,635,477,259,329,841,000 | 35.094675 | 139 | 0.640109 | false |
abimannans/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause | 8,087,290,840,421,110,000 | -6,161,845,567,439,300,000 | 31.727273 | 76 | 0.637778 | false |
hazelnusse/sympy-old | sympy/thirdparty/pyglet/pyglet/window/carbon/quartzkey.py | 5 | 6154 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = ''
from pyglet.window import key
# From SDL: src/video/quartz/SDL_QuartzKeys.h
# These are the Macintosh key scancode constants -- from Inside Macintosh
QZ_ESCAPE = 0x35
QZ_F1 = 0x7A
QZ_F2 = 0x78
QZ_F3 = 0x63
QZ_F4 = 0x76
QZ_F5 = 0x60
QZ_F6 = 0x61
QZ_F7 = 0x62
QZ_F8 = 0x64
QZ_F9 = 0x65
QZ_F10 = 0x6D
QZ_F11 = 0x67
QZ_F12 = 0x6F
QZ_PRINT = 0x69
QZ_SCROLLOCK = 0x6B
QZ_PAUSE = 0x71
QZ_POWER = 0x7F
QZ_BACKQUOTE = 0x32
QZ_1 = 0x12
QZ_2 = 0x13
QZ_3 = 0x14
QZ_4 = 0x15
QZ_5 = 0x17
QZ_6 = 0x16
QZ_7 = 0x1A
QZ_8 = 0x1C
QZ_9 = 0x19
QZ_0 = 0x1D
QZ_MINUS = 0x1B
QZ_EQUALS = 0x18
QZ_BACKSPACE = 0x33
QZ_INSERT = 0x72
QZ_HOME = 0x73
QZ_PAGEUP = 0x74
QZ_NUMLOCK = 0x47
QZ_KP_EQUALS = 0x51
QZ_KP_DIVIDE = 0x4B
QZ_KP_MULTIPLY = 0x43
QZ_TAB = 0x30
QZ_q = 0x0C
QZ_w = 0x0D
QZ_e = 0x0E
QZ_r = 0x0F
QZ_t = 0x11
QZ_y = 0x10
QZ_u = 0x20
QZ_i = 0x22
QZ_o = 0x1F
QZ_p = 0x23
QZ_LEFTBRACKET = 0x21
QZ_RIGHTBRACKET = 0x1E
QZ_BACKSLASH = 0x2A
QZ_DELETE = 0x75
QZ_END = 0x77
QZ_PAGEDOWN = 0x79
QZ_KP7 = 0x59
QZ_KP8 = 0x5B
QZ_KP9 = 0x5C
QZ_KP_MINUS = 0x4E
QZ_CAPSLOCK = 0x39
QZ_a = 0x00
QZ_s = 0x01
QZ_d = 0x02
QZ_f = 0x03
QZ_g = 0x05
QZ_h = 0x04
QZ_j = 0x26
QZ_k = 0x28
QZ_l = 0x25
QZ_SEMICOLON = 0x29
QZ_QUOTE = 0x27
QZ_RETURN = 0x24
QZ_KP4 = 0x56
QZ_KP5 = 0x57
QZ_KP6 = 0x58
QZ_KP_PLUS = 0x45
QZ_LSHIFT = 0x38
QZ_z = 0x06
QZ_x = 0x07
QZ_c = 0x08
QZ_v = 0x09
QZ_b = 0x0B
QZ_n = 0x2D
QZ_m = 0x2E
QZ_COMMA = 0x2B
QZ_PERIOD = 0x2F
QZ_SLASH = 0x2C
QZ_RSHIFT = 0x3C
QZ_UP = 0x7E
QZ_KP1 = 0x53
QZ_KP2 = 0x54
QZ_KP3 = 0x55
QZ_KP_ENTER = 0x4C
QZ_LCTRL = 0x3B
QZ_LALT = 0x3A
QZ_LMETA = 0x37
QZ_SPACE = 0x31
QZ_RMETA = 0x36
QZ_RALT = 0x3D
QZ_RCTRL = 0x3E
QZ_LEFT = 0x7B
QZ_DOWN = 0x7D
QZ_RIGHT = 0x7C
QZ_KP0 = 0x52
QZ_KP_PERIOD = 0x41
QZ_IBOOK_ENTER = 0x34
QZ_IBOOK_LEFT = 0x3B
QZ_IBOOK_RIGHT = 0x3C
QZ_IBOOK_DOWN = 0x3D
QZ_IBOOK_UP = 0x3E
keymap = {
QZ_ESCAPE: key.ESCAPE,
QZ_F1: key.F1,
QZ_F2: key.F2,
QZ_F3: key.F3,
QZ_F4: key.F4,
QZ_F5: key.F5,
QZ_F6: key.F6,
QZ_F7: key.F7,
QZ_F8: key.F8,
QZ_F9: key.F9,
QZ_F10: key.F10,
QZ_F11: key.F11,
QZ_F12: key.F12,
QZ_PRINT: key.PRINT,
QZ_SCROLLOCK: key.SCROLLLOCK,
QZ_PAUSE: key.PAUSE,
#QZ_POWER: key.POWER,
QZ_BACKQUOTE: key.QUOTELEFT,
QZ_1: key._1,
QZ_2: key._2,
QZ_3: key._3,
QZ_4: key._4,
QZ_5: key._5,
QZ_6: key._6,
QZ_7: key._7,
QZ_8: key._8,
QZ_9: key._9,
QZ_0: key._0,
QZ_MINUS: key.MINUS,
QZ_EQUALS: key.EQUAL,
QZ_BACKSPACE: key.BACKSPACE,
QZ_INSERT: key.INSERT,
QZ_HOME: key.HOME,
QZ_PAGEUP: key.PAGEUP,
QZ_NUMLOCK: key.NUMLOCK,
QZ_KP_EQUALS: key.NUM_EQUAL,
QZ_KP_DIVIDE: key.NUM_DIVIDE,
QZ_KP_MULTIPLY: key.NUM_MULTIPLY,
QZ_TAB: key.TAB,
QZ_q: key.Q,
QZ_w: key.W,
QZ_e: key.E,
QZ_r: key.R,
QZ_t: key.T,
QZ_y: key.Y,
QZ_u: key.U,
QZ_i: key.I,
QZ_o: key.O,
QZ_p: key.P,
QZ_LEFTBRACKET: key.BRACKETLEFT,
QZ_RIGHTBRACKET: key.BRACKETRIGHT,
QZ_BACKSLASH: key.BACKSLASH,
QZ_DELETE: key.DELETE,
QZ_END: key.END,
QZ_PAGEDOWN: key.PAGEDOWN,
QZ_KP7: key.NUM_7,
QZ_KP8: key.NUM_8,
QZ_KP9: key.NUM_9,
QZ_KP_MINUS: key.NUM_SUBTRACT,
QZ_CAPSLOCK: key.CAPSLOCK,
QZ_a: key.A,
QZ_s: key.S,
QZ_d: key.D,
QZ_f: key.F,
QZ_g: key.G,
QZ_h: key.H,
QZ_j: key.J,
QZ_k: key.K,
QZ_l: key.L,
QZ_SEMICOLON: key.SEMICOLON,
QZ_QUOTE: key.APOSTROPHE,
QZ_RETURN: key.RETURN,
QZ_KP4: key.NUM_4,
QZ_KP5: key.NUM_5,
QZ_KP6: key.NUM_6,
QZ_KP_PLUS: key.NUM_ADD,
QZ_LSHIFT: key.LSHIFT,
QZ_z: key.Z,
QZ_x: key.X,
QZ_c: key.C,
QZ_v: key.V,
QZ_b: key.B,
QZ_n: key.N,
QZ_m: key.M,
QZ_COMMA: key.COMMA,
QZ_PERIOD: key.PERIOD,
QZ_SLASH: key.SLASH,
QZ_RSHIFT: key.RSHIFT,
QZ_UP: key.UP,
QZ_KP1: key.NUM_1,
QZ_KP2: key.NUM_2,
QZ_KP3: key.NUM_3,
QZ_KP_ENTER: key.NUM_ENTER,
QZ_LCTRL: key.LCTRL,
QZ_LALT: key.LALT,
QZ_LMETA: key.LMETA,
QZ_SPACE: key.SPACE,
QZ_RMETA: key.RMETA,
QZ_RALT: key.RALT,
QZ_RCTRL: key.RCTRL,
QZ_LEFT: key.LEFT,
QZ_DOWN: key.DOWN,
QZ_RIGHT: key.RIGHT,
QZ_KP0: key.NUM_0,
QZ_KP_PERIOD: key.NUM_DECIMAL,
QZ_IBOOK_ENTER: key.ENTER,
QZ_IBOOK_LEFT: key.LEFT,
QZ_IBOOK_RIGHT: key.RIGHT,
QZ_IBOOK_DOWN: key.DOWN,
QZ_IBOOK_UP: key.UP,
}
| bsd-3-clause | 6,467,358,570,544,595,000 | 8,948,642,909,081,324,000 | 21.792593 | 78 | 0.628047 | false |
argriffing/scipy | scipy/sparse/csgraph/tests/test_traversal.py | 92 | 2390 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
def test_graph_breadth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
def test_graph_breadth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
| bsd-3-clause | 5,894,444,608,854,490,000 | 6,268,456,792,695,937,000 | 33.142857 | 71 | 0.479498 | false |
timthelion/FreeCAD_sf_master | src/Mod/Cam/Init.py | 55 | 1871 | # FreeCAD init script of the Cam module
# (c) 2007 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is Cam of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2007 *
#***************************************************************************/
| lgpl-2.1 | 245,229,341,045,622,400 | -2,485,833,041,052,618,000 | 69.961538 | 79 | 0.390166 | false |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/django/contrib/admin/tests.py | 113 | 7451 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import modify_settings
from django.test.selenium import SeleniumTestCase
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import ugettext as _
class CSPMiddleware(MiddlewareMixin):
"""The admin's JavaScript should be compatible with CSP."""
def process_response(self, request, response):
response['Content-Security-Policy'] = "default-src 'self'"
return response
@modify_settings(MIDDLEWARE={'append': 'django.contrib.admin.tests.CSPMiddleware'})
class AdminSeleniumTestCase(SeleniumTestCase, StaticLiveServerTestCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_for_popup(self, num_windows=2, timeout=10):
"""
Block until `num_windows` are present (usually 2, but can be
overridden in the case of pop-ups opening other pop-ups).
"""
self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout)
def wait_for(self, css_selector, timeout=10):
"""
Helper function that blocks until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_until_visible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is visible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.visibility_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_until_invisible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is invisible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.invisibility_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_for('body')
except TimeoutException:
# IE7 occasionally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def _assertOptionsValues(self, options_selector, values):
if values:
options = self.selenium.find_elements_by_css_selector(options_selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
else:
# Prevent the `find_elements_by_css_selector` call from blocking
# if the selector doesn't match any options as we expect it
# to be the case.
with self.disable_implicit_wait():
self.wait_until(
lambda driver: len(driver.find_elements_by_css_selector(options_selector)) == 0
)
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
self._assertOptionsValues("%s > option" % selector, values)
def assertSelectedOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
selected options with the given `values`.
"""
self._assertOptionsValues("%s > option:checked" % selector, values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
| mit | -7,052,203,056,519,596,000 | -6,671,723,029,296,977,000 | 39.275676 | 99 | 0.628238 | false |
mkheirkhah/ns-3.23 | wutils.py | 47 | 8681 | import os
import os.path
import re
import sys
import subprocess
import shlex
# WAF modules
from waflib import Options, Utils, Logs, TaskGen, Build, Context
from waflib.Errors import WafError
# these are set from the main wscript file
APPNAME=None
VERSION=None
bld=None
def get_command_template(env, arguments=()):
cmd = Options.options.command_template or '%s'
for arg in arguments:
cmd = cmd + " " + arg
return cmd
if hasattr(os.path, "relpath"):
relpath = os.path.relpath # since Python 2.6
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def find_program(program_name, env):
launch_dir = os.path.abspath(Context.launch_dir)
#top_dir = os.path.abspath(Options.cwd_launch)
found_programs = []
for obj in bld.all_task_gen:
if not getattr(obj, 'is_ns3_program', False):
continue
## filter out programs not in the subtree starting at the launch dir
if not (obj.path.abspath().startswith(launch_dir)
or obj.path.get_bld().abspath().startswith(launch_dir)):
continue
name1 = obj.name
name2 = os.path.join(relpath(obj.path.abspath(), launch_dir), obj.name)
names = [name1, name2]
found_programs.extend(names)
if program_name in names:
return obj
raise ValueError("program '%s' not found; available programs are: %r"
% (program_name, found_programs))
def get_proc_env(os_env=None):
env = bld.env
if sys.platform == 'linux2':
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform == 'win32':
pathvar = 'PATH'
elif sys.platform == 'cygwin':
pathvar = 'PATH'
elif sys.platform.startswith('freebsd'):
pathvar = 'LD_LIBRARY_PATH'
else:
Logs.warn(("Don't know how to configure "
"dynamic library path for the platform %r;"
" assuming it's LD_LIBRARY_PATH.") % (sys.platform,))
pathvar = 'LD_LIBRARY_PATH'
proc_env = dict(os.environ)
if os_env is not None:
proc_env.update(os_env)
if pathvar is not None:
if pathvar in proc_env:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']) + [proc_env[pathvar]])
else:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']))
pymoddir = bld.path.find_dir('bindings/python').get_bld().abspath()
pyvizdir = bld.path.find_dir('src/visualizer').abspath()
if 'PYTHONPATH' in proc_env:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir] + [proc_env['PYTHONPATH']])
else:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir])
if 'PATH' in proc_env:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']) + [proc_env['PATH']])
else:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']))
return proc_env
def run_argv(argv, env, os_env=None, cwd=None, force_no_valgrind=False):
proc_env = get_proc_env(os_env)
if Options.options.valgrind and not force_no_valgrind:
if Options.options.command_template:
raise WafError("Options --command-template and --valgrind are conflicting")
if not env['VALGRIND']:
raise WafError("valgrind is not installed")
argv = [env['VALGRIND'], "--leak-check=full", "--show-reachable=yes", "--error-exitcode=1"] + argv
proc = subprocess.Popen(argv, env=proc_env, cwd=cwd, stderr=subprocess.PIPE)
error = False
for line in proc.stderr:
sys.stderr.write(line)
if "== LEAK SUMMARY" in line:
error = True
retval = proc.wait()
if retval == 0 and error:
retval = 1
else:
try:
WindowsError
except NameError:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
else:
try:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
except WindowsError, ex:
raise WafError("Command %s raised exception %s" % (argv, ex))
if retval:
signame = None
if retval < 0: # signal?
import signal
for name, val in vars(signal).iteritems():
if len(name) > 3 and name[:3] == 'SIG' and name[3] != '_':
if val == -retval:
signame = name
break
if signame:
raise WafError("Command %s terminated with signal %s."
" Run it under a debugger to get more information "
"(./waf --run <program> --command-template=\"gdb --args %%s <args>\")." % (argv, signame))
else:
raise WafError("Command %s exited with code %i" % (argv, retval))
return retval
def get_run_program(program_string, command_template=None):
"""
Return the program name and argv of the process that would be executed by
run_program(program_string, command_template).
"""
#print "get_run_program_argv(program_string=%r, command_template=%r)" % (program_string, command_template)
env = bld.env
if command_template in (None, '%s'):
argv = shlex.split(program_string)
#print "%r ==shlex.split==> %r" % (program_string, argv)
program_name = argv[0]
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
execvec = [program_node.abspath()] + argv[1:]
else:
program_name = program_string
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
tmpl = command_template % (program_node.abspath(),)
execvec = shlex.split(tmpl.replace('\\', '\\\\'))
#print "%r ==shlex.split==> %r" % (command_template % (program_node.abspath(env),), execvec)
return program_name, execvec
def run_program(program_string, env, command_template=None, cwd=None, visualize=False):
"""
if command_template is not None, then program_string == program
name and argv is given by command_template with %s replaced by the
full path to the program. Else, program_string is interpreted as
a shell command with first name being the program name.
"""
dummy_program_name, execvec = get_run_program(program_string, command_template)
if cwd is None:
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv(execvec, env, cwd=cwd)
def run_python_program(program_string, env, visualize=False):
env = bld.env
execvec = shlex.split(program_string)
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv([env['PYTHON'][0]] + execvec, env, cwd=cwd)
def uniquify_list(seq):
"""Remove duplicates while preserving order
From Dave Kirby http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
| gpl-2.0 | 4,669,993,001,268,959,000 | -2,582,665,591,702,383,000 | 35.628692 | 123 | 0.603272 | false |
rafaelvieiras/script.pseudotv.live | resources/lib/ChannelListThread.py | 1 | 9795 | # Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon
import subprocess, os
import time, threading
import datetime
import sys, re
import random, traceback
from ChannelList import ChannelList
from Channel import Channel
from Globals import *
from Artdownloader import *
class ChannelListThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.myOverlay = None
sys.setcheckinterval(25)
self.chanlist = ChannelList()
self.paused = False
self.fullUpdating = True
self.Artdownloader = Artdownloader()
def log(self, msg, level = xbmc.LOGDEBUG):
log('ChannelListThread: ' + msg, level)
def run(self):
self.log("Starting")
self.chanlist.exitThread = False
self.chanlist.readConfig()
self.chanlist.sleepTime = 0.1
if self.myOverlay == None:
self.log("Overlay not defined. Exiting.")
return
self.chanlist.myOverlay = self.myOverlay
self.fullUpdating = (self.myOverlay.backgroundUpdating == 0)
validchannels = 0
for i in range(self.myOverlay.maxChannels):
self.chanlist.channels.append(Channel())
if self.myOverlay.channels[i].isValid:
validchannels += 1
# Don't load invalid channels if minimum threading mode is on
if self.fullUpdating and self.myOverlay.isMaster:
if validchannels < self.chanlist.enteredChannelCount:
title = 'PseudoTV Live, Background Loading...'
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000 , THUMB))
for i in range(self.myOverlay.maxChannels):
if self.myOverlay.channels[i].isValid == False:
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(1)
if self.paused == False:
break
self.chanlist.channels[i].setAccessTime(self.myOverlay.channels[i].lastAccessTime)
try:
if self.chanlist.setupChannel(i + 1, True, True, False) == True:
while self.paused:
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(1)
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.channels[i].isValid == True:
title = "PseudoTV Live, Channel " + str(i + 1) + " Added"
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000, THUMB))
except Exception,e:
self.log("Unknown Channel Creation Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
REAL_SETTINGS.setSetting('ForceChannelReset', 'false')
self.chanlist.sleepTime = 0.3
if REAL_SETTINGS.getSetting("ArtService_Enabled") == "true":
InfoTimer = INFOBAR_TIMER[int(REAL_SETTINGS.getSetting('InfoTimer'))]
self.ArtServiceThread = threading.Timer(float(InfoTimer), self.Artdownloader.ArtService)
self.ArtServiceThread.name = "ArtServiceThread"
self.ArtServiceThread.start()
while True:
for i in range(self.myOverlay.maxChannels):
modified = True
while modified == True and self.myOverlay.channels[i].getTotalDuration() < PREP_CHANNEL_TIME and self.myOverlay.channels[i].Playlist.size() < 16288:
# If minimum updating is on, don't attempt to load invalid channels
if self.fullUpdating == False and self.myOverlay.channels[i].isValid == False and self.myOverlay.isMaster:
break
modified = False
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
curtotal = self.myOverlay.channels[i].getTotalDuration()
if self.myOverlay.isMaster:
if curtotal > 0:
# When appending, many of the channel variables aren't set, so copy them over.
# This needs to be done before setup since a rule may use one of the values.
# It also needs to be done after since one of them may have changed while being setup.
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
# Only allow appending valid channels, don't allow erasing them
try:
self.chanlist.setupChannel(i + 1, True, False, True)
except Exception,e:
self.log("Unknown Channel Appending Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
else:
try:
self.chanlist.setupChannel(i + 1, True, True, False)
except Exception,e:
self.log("Unknown Channel Modification Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
else:
try:
# We're not master, so no modifications...just try and load the channel
self.chanlist.setupChannel(i + 1, True, False, False)
except Exception,e:
self.log("Unknown Channel Loading Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.isMaster:
ADDON_SETTINGS.setSetting('Channel_' + str(i + 1) + '_time', str(self.myOverlay.channels[i].totalTimePlayed))
if self.myOverlay.channels[i].getTotalDuration() > curtotal and self.myOverlay.isMaster:
modified = True
# A do-while loop for the paused state
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
if self.paused == False:
break
timeslept = 0
if self.fullUpdating == False and self.myOverlay.isMaster:
return
# If we're master, wait 30 minutes in between checks. If not, wait 5 minutes.
while (timeslept < 1800 and self.myOverlay.isMaster == True) or (timeslept < 300 and self.myOverlay.isMaster == False):
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(2)
timeslept += 2
self.log("All channels up to date. Exiting thread.")
def pause(self):
self.paused = True
self.chanlist.threadPaused = True
def unpause(self):
self.paused = False
self.chanlist.threadPaused = False
| gpl-3.0 | 7,808,499,291,245,188,000 | 1,904,897,008,411,793,400 | 44.347222 | 164 | 0.54099 | false |
fresskarma/tinyos-1.x | contrib/GGB/tools/java/net/tinyos/sentri/prepare_app.py | 2 | 2628 | #!/usr/bin/python
# $Id: prepare_app.py,v 1.1 2006/12/01 00:57:00 binetude Exp $
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement is
# hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
# CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# Copyright (c) 2002-2003 Intel Corporation
# All rights reserved.
#
# This file is distributed under the terms in the attached INTEL-LICENSE
# file. If you do not find these files, copies can be found by writing to
# Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300, Berkeley, CA,
# 94704. Attention: Intel License Inquiry.
#
# @author Sukun Kim <[email protected]>
#
import os
motes = [1, \
2]
MAX_RETRY = 10
for k in range(2):
os.system('date')
os.system('sleep 1')
print 'java net.tinyos.sentri.DataCenter releaseRoute'
os.system('sleep 1')
print 'java net.tinyos.sentri.DataCenter eraseFlash'
for i in range(6):
os.system('sleep 2')
print 'java net.tinyos.sentri.DataCenter ledOff'
for i in motes:
mote = str(i)
os.system('sleep 1')
result = 0
print 'java net.tinyos.sentri.DataCenter networkInfo ' + mote
if result != 0:
os.system('sleep 10')
os.system('sleep 12')
print 'java net.tinyos.sentri.DataCenter fixRoute'
os.system('sleep 6')
print 'java net.tinyos.sentri.DataCenter startSensing 48000 1000 -chnlSelect 31 -samplesToAvg 5'
for i in motes:
mote = str(i)
for j in range(MAX_RETRY):
os.system('sleep 1')
result = 0
print 'java net.tinyos.sentri.DataCenter readData -dest ' + mote
if result == 0:
break
os.system('sleep 1')
print 'java net.tinyos.sentri.DataCenter releaseRoute'
| bsd-3-clause | 2,077,485,740,988,163,000 | -36,159,491,255,722,990 | 29.55814 | 98 | 0.710807 | false |
ralscha/extdirectspring | addsettings.py | 5 | 1330 | #!/usr/bin/env python
import sys
import os
import os.path
import xml.dom.minidom
if os.environ["TRAVIS_SECURE_ENV_VARS"] == "false":
print "no secure env vars available, skipping deployment"
sys.exit()
homedir = os.path.expanduser("~")
m2 = xml.dom.minidom.parse(homedir + '/.m2/settings.xml')
settings = m2.getElementsByTagName("settings")[0]
serversNodes = settings.getElementsByTagName("servers")
if not serversNodes:
serversNode = m2.createElement("servers")
settings.appendChild(serversNode)
else:
serversNode = serversNodes[0]
sonatypeServerNode = m2.createElement("server")
sonatypeServerId = m2.createElement("id")
sonatypeServerUser = m2.createElement("username")
sonatypeServerPass = m2.createElement("password")
idNode = m2.createTextNode("sonatype-nexus-snapshots")
userNode = m2.createTextNode(os.environ["SONATYPE_USERNAME"])
passNode = m2.createTextNode(os.environ["SONATYPE_PASSWORD"])
sonatypeServerId.appendChild(idNode)
sonatypeServerUser.appendChild(userNode)
sonatypeServerPass.appendChild(passNode)
sonatypeServerNode.appendChild(sonatypeServerId)
sonatypeServerNode.appendChild(sonatypeServerUser)
sonatypeServerNode.appendChild(sonatypeServerPass)
serversNode.appendChild(sonatypeServerNode)
m2Str = m2.toxml()
f = open(homedir + '/.m2/mySettings.xml', 'w')
f.write(m2Str)
f.close() | apache-2.0 | 8,803,663,540,610,176,000 | -4,827,382,385,394,934,000 | 28.577778 | 61 | 0.78797 | false |
sosreport/sos | sos/presets/redhat/__init__.py | 4 | 2870 | # Copyright (C) 2020 Red Hat, Inc., Jake Hunsaker <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.options import SoSOptions
from sos.presets import PresetDefaults
RHEL_RELEASE_STR = "Red Hat Enterprise Linux"
_opts_verify = SoSOptions(verify=True)
_opts_all_logs = SoSOptions(all_logs=True)
_opts_all_logs_verify = SoSOptions(all_logs=True, verify=True)
_cb_profiles = ['boot', 'storage', 'system']
_cb_plugopts = ['boot.all-images=on', 'rpm.rpmva=on', 'rpm.rpmdb=on']
RHV = "rhv"
RHV_DESC = "Red Hat Virtualization"
RHEL = "rhel"
RHEL_DESC = RHEL_RELEASE_STR
RHOSP = "rhosp"
RHOSP_DESC = "Red Hat OpenStack Platform"
RHOCP = "ocp"
RHOCP_DESC = "OpenShift Container Platform by Red Hat"
RHOSP_OPTS = SoSOptions(plugopts=[
'process.lsof=off',
'networking.ethtool_namespaces=False',
'networking.namespaces=200'])
RH_CFME = "cfme"
RH_CFME_DESC = "Red Hat CloudForms"
RH_SATELLITE = "satellite"
RH_SATELLITE_DESC = "Red Hat Satellite"
SAT_OPTS = SoSOptions(log_size=100, plugopts=['apache.log=on'])
CB = "cantboot"
CB_DESC = "For use when normal system startup fails"
CB_OPTS = SoSOptions(
verify=True, all_logs=True, profiles=_cb_profiles,
plugopts=_cb_plugopts
)
CB_NOTE = ("Data collection will be limited to a boot-affecting scope")
NOTE_SIZE = "This preset may increase report size"
NOTE_TIME = "This preset may increase report run time"
NOTE_SIZE_TIME = "This preset may increase report size and run time"
RHEL_PRESETS = {
RHV: PresetDefaults(name=RHV, desc=RHV_DESC, note=NOTE_TIME,
opts=_opts_verify),
RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC),
RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, opts=RHOSP_OPTS),
RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME,
opts=_opts_all_logs_verify),
RH_CFME: PresetDefaults(name=RH_CFME, desc=RH_CFME_DESC, note=NOTE_TIME,
opts=_opts_verify),
RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC,
note=NOTE_TIME, opts=SAT_OPTS),
CB: PresetDefaults(name=CB, desc=CB_DESC, note=CB_NOTE, opts=CB_OPTS)
}
ATOMIC = "atomic"
ATOMIC_RELEASE_STR = "Atomic"
ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host"
ATOMIC_PRESETS = {
ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME,
opts=_opts_verify)
}
# vim: set et ts=4 sw=4 :
| gpl-2.0 | -5,842,784,423,077,918,000 | 4,298,844,999,158,808,600 | 33.166667 | 76 | 0.666899 | false |
syed/PerfKitBenchmarker | perfkitbenchmarker/static_virtual_machine.py | 2 | 10858 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a Static Virtual Machine object.
All static VMs provided in a given group will be used before any non-static
VMs are provisioned. For example, in a test that uses 4 VMs, if 3 static VMs
are provided, all of them will be used and one additional non-static VM
will be provisioned. The VM's should be set up with passwordless ssh and
passwordless sudo (neither sshing nor running a sudo command should prompt
the user for a password).
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import collections
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import windows_virtual_machine
WINDOWS = 'windows'
DEBIAN = 'debian'
RHEL = 'rhel'
UBUNTU_CONTAINER = 'ubuntu_container'
FLAGS = flags.FLAGS
class StaticVmSpec(virtual_machine.BaseVmSpec):
"""Object containing all info needed to create a Static VM."""
def __init__(self, ip_address=None, user_name=None, ssh_private_key=None,
internal_ip=None, ssh_port=22, install_packages=True,
password=None, disk_specs=None, os_type=None, **kwargs):
"""Initialize the StaticVmSpec object.
Args:
ip_address: The public ip address of the VM.
user_name: The username of the VM that the keyfile corresponds to.
ssh_private_key: The absolute path to the private keyfile to use to ssh
to the VM.
internal_ip: The internal ip address of the VM.
ssh_port: The port number to use for SSH and SCP commands.
install_packages: If false, no packages will be installed. This is
useful if benchmark dependencies have already been installed.
password: The password used to log into the VM (Windows Only).
disk_specs: A list of dictionaries containing kwargs used to create
disk.BaseDiskSpecs.
os_type: The OS type of the VM. See the flag of the same name for more
information.
"""
super(StaticVmSpec, self).__init__(**kwargs)
self.ip_address = ip_address
self.user_name = user_name
self.ssh_private_key = ssh_private_key
self.internal_ip = internal_ip
self.ssh_port = ssh_port
self.install_packages = install_packages
self.password = password
self.os_type = os_type
self.disk_specs = disk_specs
class StaticDisk(disk.BaseDisk):
"""Object representing a static Disk."""
def _Create(self):
"""StaticDisks don't implement _Create()."""
pass
def _Delete(self):
"""StaticDisks don't implement _Delete()."""
pass
def Attach(self):
"""StaticDisks don't implement Attach()."""
pass
def Detach(self):
"""StaticDisks don't implement Detach()."""
pass
class StaticVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Static Virtual Machine."""
is_static = True
vm_pool = collections.deque()
vm_pool_lock = threading.Lock()
def __init__(self, vm_spec):
"""Initialize a static virtual machine.
Args:
vm_spec: A StaticVmSpec object containing arguments.
"""
super(StaticVirtualMachine, self).__init__(vm_spec, None, None)
self.ip_address = vm_spec.ip_address
self.user_name = vm_spec.user_name
self.ssh_private_key = vm_spec.ssh_private_key
self.internal_ip = vm_spec.internal_ip
self.zone = self.zone or ('Static - %s@%s' % (self.user_name,
self.ip_address))
self.ssh_port = vm_spec.ssh_port
self.install_packages = vm_spec.install_packages
self.password = vm_spec.password
if vm_spec.disk_specs:
for spec in vm_spec.disk_specs:
self.disk_specs.append(disk.BaseDiskSpec(**spec))
self.from_pool = False
def _Create(self):
"""StaticVirtualMachines do not implement _Create()."""
pass
def _Delete(self):
"""Returns the virtual machine to the pool."""
if self.from_pool:
with self.vm_pool_lock:
self.vm_pool.appendleft(self)
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
spec = self.disk_specs[len(self.scratch_disks)]
self.scratch_disks.append(StaticDisk(spec))
def DeleteScratchDisks(self):
"""StaticVirtualMachines do not delete scratch disks."""
pass
def GetLocalDisks(self):
"""Returns a list of local disks on the VM."""
return [disk_spec.device_path
for disk_spec in self.disk_specs if disk_spec.device_path]
@classmethod
def ReadStaticVirtualMachineFile(cls, file_obj):
"""Read a file describing the static VMs to use.
This function will read the static VM information from the provided file,
instantiate VMs corresponding to the info, and add the VMs to the static
VM pool. The provided file should contain a single array in JSON-format.
Each element in the array must be an object with required format:
ip_address: string.
user_name: string.
keyfile_path: string.
ssh_port: integer, optional. Default 22
internal_ip: string, optional.
zone: string, optional.
local_disks: array of strings, optional.
scratch_disk_mountpoints: array of strings, optional
os_type: string, optional (see package_managers)
install_packages: bool, optional
Args:
file_obj: An open handle to a file containing the static VM info.
Raises:
ValueError: On missing required keys, or invalid keys.
"""
vm_arr = json.load(file_obj)
if not isinstance(vm_arr, list):
raise ValueError('Invalid static VM file. Expected array, got: %s.' %
type(vm_arr))
required_keys = frozenset(['ip_address', 'user_name'])
linux_required_keys = required_keys | frozenset(['keyfile_path'])
required_keys_by_os = {
WINDOWS: required_keys | frozenset(['password']),
DEBIAN: linux_required_keys,
RHEL: linux_required_keys,
UBUNTU_CONTAINER: linux_required_keys,
}
required_keys = required_keys_by_os[FLAGS.os_type]
optional_keys = frozenset(['internal_ip', 'zone', 'local_disks',
'scratch_disk_mountpoints', 'os_type',
'ssh_port', 'install_packages'])
allowed_keys = required_keys | optional_keys
def VerifyItemFormat(item):
"""Verify that the decoded JSON object matches the required schema."""
item_keys = frozenset(item)
extra_keys = sorted(item_keys - allowed_keys)
missing_keys = required_keys - item_keys
if extra_keys:
raise ValueError('Unexpected keys: {0}'.format(', '.join(extra_keys)))
elif missing_keys:
raise ValueError('Missing required keys: {0}'.format(
', '.join(missing_keys)))
for item in vm_arr:
VerifyItemFormat(item)
ip_address = item['ip_address']
user_name = item['user_name']
keyfile_path = item.get('keyfile_path')
internal_ip = item.get('internal_ip')
zone = item.get('zone')
local_disks = item.get('local_disks', [])
password = item.get('password')
if not isinstance(local_disks, list):
raise ValueError('Expected a list of local disks, got: {0}'.format(
local_disks))
scratch_disk_mountpoints = item.get('scratch_disk_mountpoints', [])
if not isinstance(scratch_disk_mountpoints, list):
raise ValueError(
'Expected a list of disk mount points, got: {0}'.format(
scratch_disk_mountpoints))
ssh_port = item.get('ssh_port', 22)
os_type = item.get('os_type')
install_packages = item.get('install_packages', True)
if ((os_type == WINDOWS and FLAGS.os_type != WINDOWS) or
(os_type != WINDOWS and FLAGS.os_type == WINDOWS)):
raise ValueError('Please only use Windows VMs when using '
'--os_type=windows and vice versa.')
disk_kwargs_list = []
for path in scratch_disk_mountpoints:
disk_kwargs_list.append({'mount_point': path})
for local_disk in local_disks:
disk_kwargs_list.append({'device_path': local_disk})
vm_spec = StaticVmSpec(
ip_address=ip_address, user_name=user_name, ssh_port=ssh_port,
install_packages=install_packages, ssh_private_key=keyfile_path,
internal_ip=internal_ip, zone=zone, disk_specs=disk_kwargs_list,
password=password)
vm_class = GetStaticVmClass(os_type)
vm = vm_class(vm_spec)
cls.vm_pool.append(vm)
@classmethod
def GetStaticVirtualMachine(cls):
"""Pull a Static VM from the pool of static VMs.
If there are no VMs left in the pool, the method will return None.
Returns:
A static VM from the pool, or None if there are no static VMs left.
"""
with cls.vm_pool_lock:
if cls.vm_pool:
vm = cls.vm_pool.popleft()
vm.from_pool = True
return vm
else:
return None
def GetStaticVmClass(os_type):
"""Returns the static VM class that corresponds to the os_type."""
class_dict = {
DEBIAN: DebianBasedStaticVirtualMachine,
RHEL: RhelBasedStaticVirtualMachine,
WINDOWS: WindowsBasedStaticVirtualMachine,
UBUNTU_CONTAINER: ContainerizedStaticVirtualMachine,
}
if os_type in class_dict:
return class_dict[os_type]
else:
logging.warning('Could not find os type for VM. Defaulting to debian.')
return DebianBasedStaticVirtualMachine
class ContainerizedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.ContainerizedDebianMixin):
pass
class DebianBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.DebianMixin):
pass
class RhelBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsBasedStaticVirtualMachine(StaticVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
| apache-2.0 | -4,731,604,683,091,890,000 | 6,341,220,588,759,171,000 | 33.469841 | 78 | 0.668355 | false |
janebeckman/gpdb | gpMgmt/bin/gppylib/commands/unix.py | 9 | 33309 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
Set of Classes for executing unix commands.
"""
import os
import platform
import psutil
import socket
import signal
import uuid
from gppylib.gplog import get_default_logger
from gppylib.commands.base import *
logger = get_default_logger()
# ---------------platforms--------------------
# global variable for our platform
SYSTEM = "unknown"
SUNOS = "sunos"
LINUX = "linux"
DARWIN = "darwin"
FREEBSD = "freebsd"
platform_list = [SUNOS, LINUX, DARWIN, FREEBSD]
curr_platform = platform.uname()[0].lower()
GPHOME = os.environ.get('GPHOME', None)
# ---------------command path--------------------
CMDPATH = ['/usr/kerberos/bin', '/usr/sfw/bin', '/opt/sfw/bin', '/bin', '/usr/local/bin',
'/usr/bin', '/sbin', '/usr/sbin', '/usr/ucb', '/sw/bin', '/opt/Navisphere/bin']
if GPHOME:
CMDPATH.append(GPHOME)
CMD_CACHE = {}
# ----------------------------------
class CommandNotFoundException(Exception):
def __init__(self, cmd, paths):
self.cmd = cmd
self.paths = paths
def __str__(self):
return "Could not locate command: '%s' in this set of paths: %s" % (self.cmd, repr(self.paths))
def findCmdInPath(cmd):
global CMD_CACHE
if cmd not in CMD_CACHE:
for p in CMDPATH:
f = os.path.join(p, cmd)
if os.path.exists(f):
CMD_CACHE[cmd] = f
return f
logger.critical('Command %s not found' % cmd)
search_path = CMDPATH[:]
raise CommandNotFoundException(cmd, search_path)
else:
return CMD_CACHE[cmd]
# For now we'll leave some generic functions outside of the Platform framework
def getLocalHostname():
return socket.gethostname().split('.')[0]
def getUserName():
return os.environ.get('LOGNAME') or os.environ.get('USER')
def check_pid_on_remotehost(pid, host):
""" Check For the existence of a unix pid on remote host. """
if pid == 0:
return False
cmd = Command(name='check pid on remote host', cmdStr='kill -0 %d' % pid, ctxt=REMOTE, remoteHost=host)
cmd.run()
if cmd.get_results().rc == 0:
return True
return False
def check_pid(pid):
""" Check For the existence of a unix pid. """
if pid == 0:
return False
try:
os.kill(int(pid), signal.SIG_DFL)
except OSError:
return False
else:
return True
"""
Given the data directory, port and pid for a segment,
kill -9 all the processes associated with that segment.
If pid is -1, then the postmaster is already stopped,
so we check for any leftover processes for that segment
and kill -9 those processes
E.g postgres: port 45002, logger process
postgres: port 45002, sweeper process
postgres: port 45002, checkpoint process
"""
def kill_9_segment_processes(datadir, port, pid):
logger.info('Terminating processes for segment %s' % datadir)
pid_list = []
# pid is the pid of the postgres process.
# pid can be -1 if the process is down already
if pid != -1:
pid_list = [pid]
cmd = Command('get a list of processes to kill -9',
cmdStr='ps ux | grep "[p]ostgres:\s*port\s*%s" | awk \'{print $2}\'' % (port))
try:
cmd.run(validateAfter=True)
except Exception as e:
logger.warning('Unable to get the pid list of processes for segment %s: (%s)' % (datadir, str(e)))
return
results = cmd.get_results()
results = results.stdout.strip().split('\n')
for result in results:
if result:
pid_list.append(int(result))
for pid in pid_list:
# Try to kill -9 the process.
# We ignore any errors
try:
os.kill(pid, signal.SIGKILL)
except Exception as e:
logger.error('Failed to kill processes for segment %s: (%s)' % (datadir, str(e)))
def logandkill(pid, sig):
msgs = {
signal.SIGCONT: "Sending SIGSCONT to %d",
signal.SIGTERM: "Sending SIGTERM to %d (smart shutdown)",
signal.SIGINT: "Sending SIGINT to %d (fast shutdown)",
signal.SIGQUIT: "Sending SIGQUIT to %d (immediate shutdown)",
signal.SIGABRT: "Sending SIGABRT to %d"
}
logger.info(msgs[sig] % pid)
os.kill(pid, sig)
def kill_sequence(pid):
if not check_pid(pid): return
# first send SIGCONT in case the process is stopped
logandkill(pid, signal.SIGCONT)
# next try SIGTERM (smart shutdown)
logandkill(pid, signal.SIGTERM)
# give process a few seconds to exit
for i in range(0, 3):
time.sleep(1)
if not check_pid(pid):
return
# next try SIGINT (fast shutdown)
logandkill(pid, signal.SIGINT)
# give process a few more seconds to exit
for i in range(0, 3):
time.sleep(1)
if not check_pid(pid):
return
# next try SIGQUIT (immediate shutdown)
logandkill(pid, signal.SIGQUIT)
# give process a final few seconds to exit
for i in range(0, 5):
time.sleep(1)
if not check_pid(pid):
return
# all else failed - try SIGABRT
logandkill(pid, signal.SIGABRT)
# ---------------Platform Framework--------------------
""" The following platform framework is used to handle any differences between
the platform's we support. The GenericPlatform class is the base class
that a supported platform extends from and overrides any of the methods
as necessary.
TODO: should the platform stuff be broken out to separate module?
"""
class GenericPlatform():
def getName(self):
"unsupported"
def getDefaultLocale(self):
return 'en_US.utf-8'
def get_machine_arch_cmd(self):
return 'uname -i'
def getPingOnceCmd(self):
pass
def getDiskFreeCmd(self):
return findCmdInPath('df') + " -k"
def getTarCmd(self):
return findCmdInPath('tar')
def getCpCmd(self):
return findCmdInPath('cp')
def getSadcCmd(self, interval, outfilename):
return None
def getIfconfigCmd(self):
return findCmdInPath('ifconfig')
def getMountDevFirst(self):
return True
class LinuxPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "linux"
def getDefaultLocale(self):
return 'en_US.utf8'
def getDiskFreeCmd(self):
# -P is for POSIX formatting. Prevents error
# on lines that would wrap
return findCmdInPath('df') + " -Pk"
def getSadcCmd(self, interval, outfilename):
cmd = "/usr/lib64/sa/sadc -F -d " + str(interval) + " " + outfilename
return cmd
def getMountDevFirst(self):
return True
def getPing6(self):
return findCmdInPath('ping6')
class SolarisPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "sunos"
def getDefaultLocale(self):
return 'en_US.UTF-8'
def getDiskFreeCmd(self):
return findCmdInPath('df') + " -bk"
def getTarCmd(self):
return findCmdInPath('gtar')
def getSadcCmd(self, interval, outfilename):
cmd = "/usr/lib/sa/sadc " + str(interval) + " 100000 " + outfilename
return cmd
def getIfconfigCmd(self):
return findCmdInPath('ifconfig') + ' -a inet'
def getMountDevFirst(self):
return False
class DarwinPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "darwin"
def get_machine_arch_cmd(self):
return 'uname -m'
def getMountDevFirst(self):
return True
def getPing6(self):
return findCmdInPath('ping6')
class FreeBsdPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "freebsd"
def get_machine_arch_cmd(self):
return 'uname -m'
def getMountDevFirst(self):
return True
""" if self.SYSTEM == 'sunos':
self.PS_TXT='ef'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.ZCAT='gzcat'
self.PG_METHOD='trust'
self.NOLINE_ECHO='/usr/bin/echo'
self.MAIL='/bin/mailx'
self.PING_TIME='1'
self.DF=findCmdInPath('df')
self.DU_TXT='-s'
elif self.SYSTEM == 'linux':
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='c'
elif self.SYSTEM == 'darwin':
self.PS_TXT='ax'
self.LIB_TYPE='DYLD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO= self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
elif self.SYSTEM == 'freebsd':
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
"""
# ---------------ping--------------------
class Ping(Command):
def __init__(self, name, hostToPing, ctxt=LOCAL, remoteHost=None, obj=None):
self.hostToPing = hostToPing
self.obj = obj
pingToUse = findCmdInPath('ping')
if curr_platform == LINUX or curr_platform == DARWIN:
# Get the family of the address we need to ping. If it's AF_INET6
# we must use ping6 to ping it.
addrinfo = socket.getaddrinfo(hostToPing, None)
if addrinfo and addrinfo[0] and addrinfo[0][0] == socket.AF_INET6:
pingToUse = SYSTEM.getPing6()
cmdStr = "%s -c 1 %s" % (pingToUse, hostToPing)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def ping_list(host_list):
for host in host_list:
yield Ping("ping", host, ctxt=LOCAL, remoteHost=None)
@staticmethod
def local(name, hostToPing):
p = Ping(name, hostToPing)
p.run(validateAfter=True)
@staticmethod
def remote(name, hostToPing, hostToPingFrom):
p = Ping(name, hostToPing, ctxt=REMOTE, remoteHost=hostToPingFrom)
p.run(validateAfter=True)
# ---------------du--------------------
class DiskUsage(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
if remoteHost:
cmdStr = "ls -l -R %s | %s ^- | %s '{t+=\$5;} END{print t}'" % (
directory, findCmdInPath('grep'), findCmdInPath('awk'))
else:
cmdStr = "ls -l -R %s | %s ^- | %s '{t+=$5;} END{print t}'" % (
directory, findCmdInPath('grep'), findCmdInPath('awk'))
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def get_size(name, remote_host, directory):
duCmd = DiskUsage(name, directory, ctxt=REMOTE, remoteHost=remote_host)
duCmd.run(validateAfter=True)
return duCmd.get_bytes_used()
def get_bytes_used(self):
rawIn = self.results.stdout.split('\t')[0].strip()
# TODO: revisit this idea of parsing '' and making it a 0. seems dangerous.
if rawIn == '':
return 0
if rawIn[0] == 'd':
raise ExecutionError("du command could not find directory: cmd: %s"
"resulted in stdout: '%s' stderr: '%s'" %
(self.cmdStr, self.results.stdout,
self.results.stderr),
self)
else:
dirBytes = int(rawIn)
return dirBytes
# -------------df----------------------
class DiskFree(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = "%s %s" % (SYSTEM.getDiskFreeCmd(), directory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def get_size(name, remote_host, directory):
dfCmd = DiskFree(name, directory, ctxt=REMOTE, remoteHost=remote_host)
dfCmd.run(validateAfter=True)
return dfCmd.get_bytes_free()
@staticmethod
def get_size_local(name, directory):
dfCmd = DiskFree(name, directory)
dfCmd.run(validateAfter=True)
return dfCmd.get_bytes_free()
@staticmethod
def get_disk_free_info_local(name, directory):
dfCmd = DiskFree(name, directory)
dfCmd.run(validateAfter=True)
return dfCmd.get_disk_free_output()
def get_disk_free_output(self):
'''expected output of the form:
Filesystem 512-blocks Used Available Capacity Mounted on
/dev/disk0s2 194699744 158681544 35506200 82% /
Returns data in list format:
['/dev/disk0s2', '194699744', '158681544', '35506200', '82%', '/']
'''
rawIn = self.results.stdout.split('\n')[1]
return rawIn.split()
def get_bytes_free(self):
disk_free = self.get_disk_free_output()
bytesFree = int(disk_free[3]) * 1024
return bytesFree
# -------------mkdir------------------
class MakeDirectory(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = "%s -p %s" % (findCmdInPath('mkdir'), directory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, directory):
mkdirCmd = MakeDirectory(name, directory)
mkdirCmd.run(validateAfter=True)
@staticmethod
def remote(name, remote_host, directory):
mkdirCmd = MakeDirectory(name, directory, ctxt=REMOTE, remoteHost=remote_host)
mkdirCmd.run(validateAfter=True)
# -------------mv------------------
class MoveDirectory(Command):
def __init__(self, name, srcDirectory, dstDirectory, ctxt=LOCAL, remoteHost=None):
self.srcDirectory = srcDirectory
self.dstDirectory = dstDirectory
cmdStr = "%s -f %s %s" % (findCmdInPath('mv'), srcDirectory, dstDirectory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# -------------append------------------
class AppendTextToFile(Command):
def __init__(self, name, file, text, ctxt=LOCAL, remoteHost=None):
cmdStr = "echo '%s' >> %s" % (text, file)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# -------------inline perl replace------
class InlinePerlReplace(Command):
def __init__(self, name, fromStr, toStr, file, ctxt=LOCAL, remoteHost=None):
cmdStr = "%s -pi.bak -e's/%s/%s/g' %s" % (findCmdInPath('perl'), fromStr, toStr, file)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# ------------- remove a directory recursively ------------------
class RemoveDirectory(Command):
"""
remove a directory recursively, including the directory itself.
Uses rsync for efficiency.
"""
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
unique_dir = "/tmp/emptyForRemove%s" % uuid.uuid4()
cmd_str = "if [ -d {target_dir} ]; then " \
"mkdir -p {unique_dir} && " \
"{cmd} -a --delete {unique_dir}/ {target_dir}/ && " \
"rmdir {target_dir} {unique_dir} ; fi".format(
unique_dir=unique_dir,
cmd=findCmdInPath('rsync'),
target_dir=directory
)
Command.__init__(self, name, cmd_str, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
rm_cmd = RemoveDirectory(name, directory, ctxt=REMOTE, remoteHost=remote_host)
rm_cmd.run(validateAfter=True)
@staticmethod
def local(name, directory):
rm_cmd = RemoveDirectory(name, directory)
rm_cmd.run(validateAfter=True)
# -------------rm -rf ------------------
class RemoveFile(Command):
def __init__(self, name, filepath, ctxt=LOCAL, remoteHost=None):
cmdStr = "%s -f %s" % (findCmdInPath('rm'), filepath)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, filepath):
rmCmd = RemoveFile(name, filepath, ctxt=REMOTE, remoteHost=remote_host)
rmCmd.run(validateAfter=True)
@staticmethod
def local(name, filepath):
rmCmd = RemoveFile(name, filepath)
rmCmd.run(validateAfter=True)
class RemoveDirectoryContents(Command):
"""
remove contents of a directory recursively, excluding the parent directory.
Uses rsync for efficiency.
"""
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
unique_dir = "/tmp/emptyForRemove%s" % uuid.uuid4()
cmd_str = "if [ -d {target_dir} ]; then " \
"mkdir -p {unique_dir} && " \
"{cmd} -a --delete {unique_dir}/ {target_dir}/ && " \
"rmdir {unique_dir} ; fi".format(
unique_dir=unique_dir,
cmd=findCmdInPath('rsync'),
target_dir=directory
)
Command.__init__(self, name, cmd_str, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
rm_cmd = RemoveDirectoryContents(name, directory, ctxt=REMOTE, remoteHost=remote_host)
rm_cmd.run(validateAfter=True)
@staticmethod
def local(name, directory):
rm_cmd = RemoveDirectoryContents(name, directory)
rm_cmd.run(validateAfter=True)
class RemoveGlob(Command):
"""
This glob removal tool uses rm -rf, so it can fail OoM if there are too many files that match.
"""
def __init__(self, name, glob, ctxt=LOCAL, remoteHost=None):
cmd_str = "%s -rf %s" % (findCmdInPath('rm'), glob)
Command.__init__(self, name, cmd_str, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
rm_cmd = RemoveGlob(name, directory, ctxt=REMOTE, remoteHost=remote_host)
rm_cmd.run(validateAfter=True)
@staticmethod
def local(name, directory):
rm_cmd = RemoveGlob(name, directory)
rm_cmd.run(validateAfter=True)
# -------------file and dir existence -------------
class PathIsDirectory(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = """python -c "import os; print os.path.isdir('%s')" """ % directory
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
cmd = PathIsDirectory(name, directory, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.isDir()
def isDir(self):
return bool(self.results.stdout.strip())
# --------------------------
class FileDirExists(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = """python -c "import os; print os.path.exists('%s')" """ % directory
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
cmd = FileDirExists(name, directory, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.filedir_exists()
def filedir_exists(self):
return self.results.stdout.strip().upper() == 'TRUE'
class CreateDirIfNecessary(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = """python -c "import sys, os, errno;
try:
os.mkdir('%s')
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
" """ % (directory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
cmd = CreateDirIfNecessary(name, directory, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
class DirectoryIsEmpty(Command):
def __init__(self, name, directory, ctxt=LOCAL, remoteHost=None):
self.directory = directory
cmdStr = """python -c "import os;
for root, dirs, files in os.walk('%s'):
print (len(dirs) != 0 or len(files) != 0)
" """ % self.directory
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, directory):
cmd = DirectoryIsEmpty(name, directory, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.isEmpty()
def isEmpty(self):
return bool(self.results.stdout.strip())
# -------------scp------------------
# MPP-13617
def canonicalize(addr):
if ':' not in addr: return addr
if '[' in addr: return addr
return '[' + addr + ']'
class RemoteCopy(Command):
def __init__(self, name, srcDirectory, dstHost, dstDirectory, ctxt=LOCAL, remoteHost=None):
self.srcDirectory = srcDirectory
self.dstHost = dstHost
self.dstDirectory = dstDirectory
cmdStr = "%s -o 'StrictHostKeyChecking no' -r %s %s:%s" % (
findCmdInPath('scp'), srcDirectory, canonicalize(dstHost), dstDirectory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
class Scp(Command):
def __init__(self, name, srcFile, dstFile, srcHost=None, dstHost=None, recursive=False, ctxt=LOCAL,
remoteHost=None):
cmdStr = findCmdInPath('scp') + " "
if recursive:
cmdStr = cmdStr + "-r "
if srcHost:
cmdStr = cmdStr + canonicalize(srcHost) + ":"
cmdStr = cmdStr + srcFile + " "
if dstHost:
cmdStr = cmdStr + canonicalize(dstHost) + ":"
cmdStr = cmdStr + dstFile
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# -------------local copy------------------
class LocalDirCopy(Command):
def __init__(self, name, srcDirectory, dstDirectory):
# tar is much faster than cp for directories with lots of files
self.srcDirectory = srcDirectory
self.dstDirectory = dstDirectory
tarCmd = SYSTEM.getTarCmd()
cmdStr = "%s -cf - -C %s . | %s -xf - -C %s" % (tarCmd, srcDirectory, tarCmd, dstDirectory)
Command.__init__(self, name, cmdStr, LOCAL, None)
# -------------local copy------------------
class LocalCopy(Command):
def __init__(self, name, srcFile, dstFile):
# tar is much faster than cp for directories with lots of files
cpCmd = SYSTEM.getCpCmd()
cmdStr = "%s %s %s" % (cpCmd, srcFile, dstFile)
Command.__init__(self, name, cmdStr, LOCAL, None)
# ------------ ssh + tar ------------------
# TODO: impl this.
# tar czf - srcDir/ | ssh user@dstHost tar xzf - -C dstDir
# -------------create tar------------------
class CreateTar(Command):
def __init__(self, name, srcDirectory, dstTarFile, ctxt=LOCAL, remoteHost=None):
self.srcDirectory = srcDirectory
self.dstTarFile = dstTarFile
tarCmd = SYSTEM.getTarCmd()
cmdStr = "%s cvPf %s -C %s ." % (tarCmd, self.dstTarFile, srcDirectory)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# -------------extract tar---------------------
class ExtractTar(Command):
def __init__(self, name, srcTarFile, dstDirectory, ctxt=LOCAL, remoteHost=None):
self.srcTarFile = srcTarFile
self.dstDirectory = dstDirectory
tarCmd = SYSTEM.getTarCmd()
cmdStr = "%s -C %s -xf %s" % (tarCmd, dstDirectory, srcTarFile)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# --------------kill ----------------------
class Kill(Command):
def __init__(self, name, pid, signal, ctxt=LOCAL, remoteHost=None):
self.pid = pid
self.signal = signal
cmdStr = "%s -s %s %s" % (findCmdInPath('kill'), signal, pid)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, pid, signal):
cmd = Kill(name, pid, signal)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, pid, signal, remote_host):
cmd = Kill(name, pid, signal, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
# --------------kill children--------------
class KillChildren(Command):
def __init__(self, name, pid, signal, ctxt=LOCAL, remoteHost=None):
self.pid = pid
self.signal = signal
cmdStr = "%s -%s -P %s" % (findCmdInPath('pkill'), signal, pid)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, pid, signal):
cmd = KillChildren(name, pid, signal)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, pid, signal, remote_host):
cmd = KillChildren(name, pid, signal, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
# --------------pkill----------------------
class Pkill(Command):
def __init__(self, name, processname, signal=signal.SIGTERM, ctxt=LOCAL, remoteHost=None):
cmdStr = "%s -%s %s" % (findCmdInPath('pkill'), signal, processname)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
# --------------sadc-----------------------
class Sadc(Command):
def __init__(self, name, outfilename, interval=5, background=False, ctxt=LOCAL, remoteHost=None):
cmdStr = SYSTEM.getSadcCmd(interval, outfilename)
if background:
cmdStr = "rm " + outfilename + "; nohup " + cmdStr + " < /dev/null > /dev/null 2>&1 &"
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, outfilename, interval, background):
cmd = Sadc(name, outfilename, interval, background)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, outfilename, interval, background, remote_host):
cmd = Sadc(name, outfilename, interval, background, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
# --------------hostname ----------------------
class Hostname(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.remotehost = remoteHost
Command.__init__(self, name, findCmdInPath('hostname'), ctxt, remoteHost)
def get_hostname(self):
if not self.results:
raise Exception, 'Command not yet executed'
return self.results.stdout.strip()
class InterfaceAddrs(Command):
"""Returns list of interface IP Addresses. List does not include loopback."""
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
ifconfig = SYSTEM.getIfconfigCmd()
grep = findCmdInPath('grep')
awk = findCmdInPath('awk')
cut = findCmdInPath('cut')
cmdStr = '%s|%s "inet "|%s -v "127.0.0"|%s \'{print \$2}\'|%s -d: -f2' % (ifconfig, grep, grep, awk, cut)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name):
cmd = InterfaceAddrs(name)
cmd.run(validateAfter=True)
return cmd.get_results().stdout.split()
@staticmethod
def remote(name, remoteHost):
cmd = InterfaceAddrs(name, ctxt=REMOTE, remoteHost=remoteHost)
cmd.run(validateAfter=True)
return cmd.get_results().stdout.split()
class FileContainsTerm(Command):
def __init__(self, name, search_term, file, ctxt=LOCAL, remoteHost=None):
self.search_term = search_term
self.file = file
cmdStr = "%s -c %s %s" % (findCmdInPath('grep'), search_term, file)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
def contains_term(self):
if not self.results:
raise Exception, 'Command not yet executed'
count = int(self.results.stdout.strip())
if count == 0:
return False
else:
return True
# --------------tcp port is active -----------------------
class PgPortIsActive(Command):
def __init__(self, name, port, file, ctxt=LOCAL, remoteHost=None):
self.port = port
cmdStr = "%s -an 2>/dev/null | %s %s | %s '{print $NF}'" % \
(findCmdInPath('netstat'), findCmdInPath('grep'), file, findCmdInPath('awk'))
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
def contains_port(self):
rows = self.results.stdout.strip().split()
if len(rows) == 0:
return False
for r in rows:
val = r.split('.')
netstatport = int(val[len(val) - 1])
if netstatport == self.port:
return True
return False
@staticmethod
def local(name, file, port):
cmd = PgPortIsActive(name, port, file)
cmd.run(validateAfter=True)
return cmd.contains_port()
@staticmethod
def remote(name, file, port, remoteHost):
cmd = PgPortIsActive(name, port, file, ctxt=REMOTE, remoteHost=remoteHost)
cmd.run(validateAfter=True)
return cmd.contains_port()
# --------------chmod ----------------------
class Chmod(Command):
def __init__(self, name, dir, perm, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s %s %s' % (findCmdInPath('chmod'), perm, dir)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, dir, perm):
cmd = Chmod(name, dir, perm)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, hostname, dir, perm):
cmd = Chmod(name, dir, perm, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
# --------------echo ----------------------
class Echo(Command):
def __init__(self, name, echoString, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s "%s"' % (findCmdInPath('echo'), echoString)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, echoString, hostname):
cmd = Echo(name, echoString, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
# --------------touch ----------------------
class Touch(Command):
def __init__(self, name, file, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s %s' % (findCmdInPath('touch'), file)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, file, hostname):
cmd = Touch(name, file, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
# --------------get user id ----------------------
class UserId(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
idCmd = findCmdInPath('id')
trCmd = findCmdInPath('tr')
awkCmd = findCmdInPath('awk')
cmdStr = "%s|%s '(' ' '|%s ')' ' '|%s '{print $2}'" % (idCmd, trCmd, trCmd, awkCmd)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name):
cmd = UserId(name)
cmd.run(validateAfter=True)
return cmd.results.stdout.strip()
# -------------- test file for setuid bit ----------------------
class FileTestSuid(Command):
def __init__(self, name, filename, ctxt=LOCAL, remoteHost=None):
cmdStr = """python -c "import os; import stat; testRes = os.stat('%s'); print (testRes.st_mode & stat.S_ISUID) == stat.S_ISUID" """ % filename
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, filename):
cmd = FileTestSuid(name, filename, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.file_is_suid()
def file_is_suid(self):
return self.results.stdout.strip().upper() == 'TRUE'
# -------------- get file owner ----------------------
class FileGetOwnerUid(Command):
def __init__(self, name, filename, ctxt=LOCAL, remoteHost=None):
cmdStr = """python -c "import os; import stat; testRes = os.stat('%s'); print testRes.st_uid " """ % filename
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, remote_host, filename):
cmd = FileGetOwnerUid(name, filename, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.file_uid()
def file_uid(self):
return int(self.results.stdout.strip().upper())
# --------------get list of desecendant processes -------------------
def getDescendentProcesses(pid):
''' return all process pids which are descendant from the given processid '''
children_pids = []
for p in psutil.Process(pid).children(recursive=True):
if p.is_running():
children_pids.append(p.pid)
return children_pids
# --------------global variable initialization ----------------------
if curr_platform == SUNOS:
SYSTEM = SolarisPlatform()
elif curr_platform == LINUX:
SYSTEM = LinuxPlatform()
elif curr_platform == DARWIN:
SYSTEM = DarwinPlatform()
elif curr_platform == FREEBSD:
SYSTEM = FreeBsdPlatform()
else:
raise Exception("Platform %s is not supported. Supported platforms are: %s", SYSTEM, str(platform_list))
| apache-2.0 | -7,633,183,659,227,661,000 | -777,556,197,278,916,500 | 30.905172 | 150 | 0.58951 | false |
prune998/ansible | test/units/modules/network/eos/test_eos_user.py | 41 | 3952 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_user
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosUserModule(TestEosModule):
module = eos_user
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.eos.eos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_user_create(self):
set_module_args(dict(username='test', nopassword=True))
commands = ['username test nopassword']
self.execute_module(changed=True, commands=commands)
def test_eos_user_delete(self):
set_module_args(dict(username='ansible', state='absent'))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_password(self):
set_module_args(dict(username='ansible', password='test'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege(self):
set_module_args(dict(username='ansible', privilege=15))
commands = ['username ansible privilege 15']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege_invalid(self):
set_module_args(dict(username='ansible', privilege=25))
self.execute_module(failed=True)
def test_eos_user_purge(self):
set_module_args(dict(purge=True))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_role(self):
set_module_args(dict(username='ansible', role='test'))
commands = ['username ansible role test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_sshkey(self):
set_module_args(dict(username='ansible', sshkey='test'))
commands = ['username ansible sshkey test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_changed(self):
set_module_args(dict(username='test', password='test', update_password='on_create'))
commands = ['username test secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_on_create_ok(self):
set_module_args(dict(username='ansible', password='test', update_password='on_create'))
self.execute_module()
def test_eos_user_update_password_always(self):
set_module_args(dict(username='ansible', password='test', update_password='always'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 | 2,634,565,877,003,511,000 | 1,102,836,681,472,384,500 | 38.919192 | 95 | 0.694332 | false |
bzero/statsmodels | examples/incomplete/ols_table.py | 34 | 1999 | """Example: statsmodels.OLS
"""
from statsmodels.datasets.longley import load
import statsmodels.api as sm
from statsmodels.iolib.table import SimpleTable, default_txt_fmt
import numpy as np
data = load()
data_orig = (data.endog.copy(), data.exog.copy())
#.. Note: In this example using zscored/standardized variables has no effect on
#.. regression estimates. Are there no numerical problems?
rescale = 0
#0: no rescaling, 1:demean, 2:standardize, 3:standardize and transform back
rescale_ratio = data.endog.std() / data.exog.std(0)
if rescale > 0:
# rescaling
data.endog -= data.endog.mean()
data.exog -= data.exog.mean(0)
if rescale > 1:
data.endog /= data.endog.std()
data.exog /= data.exog.std(0)
#skip because mean has been removed, but dimension is hardcoded in table
data.exog = sm.tools.add_constant(data.exog, prepend=False)
ols_model = sm.OLS(data.endog, data.exog)
ols_results = ols_model.fit()
# the Longley dataset is well known to have high multicollinearity
# one way to find the condition number is as follows
#Find OLS parameters for model with one explanatory variable dropped
resparams = np.nan * np.ones((7, 7))
res = sm.OLS(data.endog, data.exog).fit()
resparams[:, 0] = res.params
indall = range(7)
for i in range(6):
ind = indall[:]
del ind[i]
res = sm.OLS(data.endog, data.exog[:, ind]).fit()
resparams[ind, i + 1] = res.params
if rescale == 1:
pass
if rescale == 3:
resparams[:-1, :] *= rescale_ratio[:, None]
txt_fmt1 = default_txt_fmt
numformat = '%10.4f'
txt_fmt1 = dict(data_fmts=[numformat])
rowstubs = data.names[1:] + ['const']
headers = ['all'] + ['drop %s' % name for name in data.names[1:]]
tabl = SimpleTable(resparams, headers, rowstubs, txt_fmt=txt_fmt1)
nanstring = numformat % np.nan
nn = len(nanstring)
nanrep = ' ' * (nn - 1)
nanrep = nanrep[:nn // 2] + '-' + nanrep[nn // 2:]
print('Longley data - sensitivity to dropping an explanatory variable')
print(str(tabl).replace(nanstring, nanrep))
| bsd-3-clause | 3,325,461,163,740,931,000 | -1,387,442,604,971,325,200 | 27.971014 | 79 | 0.690345 | false |
AnoopAlias/nDeploy | scripts/update_cluster_ipmap.py | 1 | 1898 | #!/usr/bin/env python
import yaml
import argparse
import os
__author__ = "Anoop P Alias"
__copyright__ = "Copyright 2014, PiServe Technologies Pvt Ltd , India"
__license__ = "GPL"
__email__ = "[email protected]"
installation_path = "/opt/nDeploy" # Absolute Installation Path
cluster_config_file = installation_path+"/conf/ndeploy_cluster.yaml"
# Function defs
def update_ip_map(server, iphere, ipthere):
cluster_data_yaml = open(cluster_config_file, 'r')
cluster_data_yaml_parsed = yaml.safe_load(cluster_data_yaml)
cluster_data_yaml.close()
if cluster_data_yaml_parsed:
if server in cluster_data_yaml_parsed.keys():
connect_server_dict = cluster_data_yaml_parsed.get(server)
ipmap_dict = connect_server_dict.get("ipmap")
ipmap_dict[iphere] = ipthere
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
mydict = {server: {'ipmap': {iphere: ipthere}}}
cluster_data_yaml_parsed.update(mydict)
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
print("Invalid cluster data")
parser = argparse.ArgumentParser(description="create/update nDeploy-cluster ipmap")
parser.add_argument("slave_hostname")
parser.add_argument("ip_here")
parser.add_argument("remote_ip")
args = parser.parse_args()
server_key = args.slave_hostname
ip_here = args.ip_here
remote_ip = args.remote_ip
if os.path.isfile(cluster_config_file):
update_ip_map(server_key, ip_here, remote_ip)
else:
mydict = {server_key: {'ipmap': {ip_here: remote_ip}}}
with open(cluster_config_file, 'w') as cluster_conf:
cluster_conf.write(yaml.dump(mydict, default_flow_style=False))
| gpl-3.0 | -689,779,598,004,869,100 | 853,583,012,990,762,800 | 34.148148 | 94 | 0.674921 | false |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/django/db/migrations/recorder.py | 1 | 2868 | from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
| mit | -6,098,164,054,142,605,000 | -4,537,825,044,610,460,700 | 32.348837 | 112 | 0.642957 | false |
staffanm/layeredconfig | layeredconfig/dictsource.py | 1 | 1625 | # this should possibly be a abstract class as well
from . import ConfigSource
class DictSource(ConfigSource):
def __init__(self, **kwargs):
"""If your backend data is exposable as a python dict, you can
subclass from this class to avoid implementing :py:meth:`has`,
:py:meth:`get`, :py:meth:`keys`, :py:meth:`subsection` and
:py:meth:`subsections`. You only need to write
:py:meth:`__init__` (which should set ``self.source`` to that
exposed dict), and possibly :py:meth:`typed` and
:py:meth:`save`.
"""
super(DictSource, self).__init__(**kwargs)
self.source = {}
def subsections(self):
for (k, v) in self.source.items():
if isinstance(v, dict):
yield k
def keys(self):
for (k, v) in self.source.items():
if not isinstance(v, dict) and not isinstance(v, type):
yield k
def subsection(self, key):
# Make an object of the correct type
return self.__class__(defaults=self.source[key],
parent=self,
identifier=self.identifier)
def typed(self, key):
# if we have it, we can type it
return key in self.source and self.source[key] is not None
def has(self, key):
# should return true for real values only, not type placeholders or sub-dicts
return key in self.source and not isinstance(self.source[key], (type, dict))
def get(self, key):
return self.source[key]
def set(self, key, value):
self.source[key] = value
| bsd-3-clause | -1,464,869,563,306,499,300 | 2,231,021,591,537,312,500 | 33.574468 | 85 | 0.580923 | false |
backenklee/RIOT | dist/tools/cc2538-bsl/cc2538-bsl.py | 48 | 44987 | #!/usr/bin/env python
# Copyright (c) 2014, Jelmer Tiete <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Implementation based on stm32loader by Ivan A-R <[email protected]>
# Serial boot loader over UART for CC13xx / CC2538 / CC26xx
# Based on the info found in TI's swru333a.pdf (spma029.pdf)
#
# Bootloader only starts if no valid image is found or if boot loader
# backdoor is enabled.
# Make sure you don't lock yourself out!! (enable backdoor in your firmware)
# More info at https://github.com/JelmerT/cc2538-bsl
from __future__ import print_function
from subprocess import Popen, PIPE
import sys
import getopt
import glob
import time
import os
import struct
import binascii
import traceback
try:
import magic
have_magic = True
except ImportError:
have_magic = False
try:
from intelhex import IntelHex
have_hex_support = True
except ImportError:
have_hex_support = False
# version
VERSION_STRING = "2.1"
# Verbose level
QUIET = 5
# Check which version of Python is running
PY3 = sys.version_info >= (3, 0)
try:
import serial
except ImportError:
print('{} requires the Python serial library'.format(sys.argv[0]))
print('Please install it with one of the following:')
print('')
if PY3:
print(' Ubuntu: sudo apt-get install python3-serial')
print(' Mac: sudo port install py34-serial')
else:
print(' Ubuntu: sudo apt-get install python-serial')
print(' Mac: sudo port install py-serial')
sys.exit(1)
def mdebug(level, message, attr='\n'):
if QUIET >= level:
print(message, end=attr, file=sys.stderr)
# Takes chip IDs (obtained via Get ID command) to human-readable names
CHIP_ID_STRS = {0xb964: 'CC2538',
0xb965: 'CC2538'
}
RETURN_CMD_STRS = {0x40: 'Success',
0x41: 'Unknown command',
0x42: 'Invalid command',
0x43: 'Invalid address',
0x44: 'Flash fail'
}
COMMAND_RET_SUCCESS = 0x40
COMMAND_RET_UNKNOWN_CMD = 0x41
COMMAND_RET_INVALID_CMD = 0x42
COMMAND_RET_INVALID_ADR = 0x43
COMMAND_RET_FLASH_FAIL = 0x44
class CmdException(Exception):
pass
class FirmwareFile(object):
HEX_FILE_EXTENSIONS = ('hex', 'ihx', 'ihex')
def __init__(self, path):
"""
Read a firmware file and store its data ready for device programming.
This class will try to guess the file type if python-magic is available.
If python-magic indicates a plain text file, and if IntelHex is
available, then the file will be treated as one of Intel HEX format.
In all other cases, the file will be treated as a raw binary file.
In both cases, the file's contents are stored in bytes for subsequent
usage to program a device or to perform a crc check.
Parameters:
path -- A str with the path to the firmware file.
Attributes:
bytes: A bytearray with firmware contents ready to send to the
device
"""
self._crc32 = None
firmware_is_hex = False
if have_magic:
file_type = bytearray(magic.from_file(path, True))
# from_file() returns bytes with PY3, str with PY2. This comparison
# will be True in both cases"""
if file_type == b'text/plain':
firmware_is_hex = True
mdebug(5, "Firmware file: Intel Hex")
elif file_type == b'application/octet-stream':
mdebug(5, "Firmware file: Raw Binary")
else:
error_str = "Could not determine firmware type. Magic " \
"indicates '%s'" % (file_type)
raise CmdException(error_str)
else:
if os.path.splitext(path)[1][1:] in self.HEX_FILE_EXTENSIONS:
firmware_is_hex = True
mdebug(5, "Your firmware looks like an Intel Hex file")
else:
mdebug(5, "Cannot auto-detect firmware filetype: Assuming .bin")
mdebug(10, "For more solid firmware type auto-detection, install "
"python-magic.")
mdebug(10, "Please see the readme for more details.")
if firmware_is_hex:
if have_hex_support:
self.bytes = bytearray(IntelHex(path).tobinarray())
return
else:
error_str = "Firmware is Intel Hex, but the IntelHex library " \
"could not be imported.\n" \
"Install IntelHex in site-packages or program " \
"your device with a raw binary (.bin) file.\n" \
"Please see the readme for more details."
raise CmdException(error_str)
with open(path, 'rb') as f:
self.bytes = bytearray(f.read())
def crc32(self):
"""
Return the crc32 checksum of the firmware image
Return:
The firmware's CRC32, ready for comparison with the CRC
returned by the ROM bootloader's COMMAND_CRC32
"""
if self._crc32 is None:
self._crc32 = binascii.crc32(bytearray(self.bytes)) & 0xffffffff
return self._crc32
class CommandInterface(object):
ACK_BYTE = 0xCC
NACK_BYTE = 0x33
def open(self, aport='/dev/tty.usbserial-000013FAB', abaudrate=500000):
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_NONE,
stopbits=1,
xonxoff=0, # enable software flow control
rtscts=0, # disable RTS/CTS flow control
timeout=0.5 # set a timeout value, None for waiting
# forever
)
def invoke_bootloader(self, dtr_active_high=False, inverted=False):
# Use the DTR and RTS lines to control bootloader and the !RESET pin.
# This can automatically invoke the bootloader without the user
# having to toggle any pins.
#
# If inverted is False (default):
# DTR: connected to the bootloader pin
# RTS: connected to !RESET
# If inverted is True, pin connections are the other way round
if inverted:
set_bootloader_pin = self.sp.setRTS
set_reset_pin = self.sp.setDTR
else:
set_bootloader_pin = self.sp.setDTR
set_reset_pin = self.sp.setRTS
set_bootloader_pin(1 if not dtr_active_high else 0)
set_reset_pin(0)
set_reset_pin(1)
set_reset_pin(0)
# Make sure the pin is still asserted when the chip
# comes out of reset. This fixes an issue where
# there wasn't enough delay here on Mac.
time.sleep(0.002)
set_bootloader_pin(0 if not dtr_active_high else 1)
# Some boards have a co-processor that detects this sequence here and
# then drives the main chip's BSL enable and !RESET pins. Depending on
# board design and co-processor behaviour, the !RESET pin may get
# asserted after we have finished the sequence here. In this case, we
# need a small delay so as to avoid trying to talk to main chip before
# it has actually entered its bootloader mode.
#
# See contiki-os/contiki#1533
time.sleep(0.1)
def close(self):
self.sp.close()
def _wait_for_ack(self, info="", timeout=1):
stop = time.time() + timeout
got = bytearray(2)
while got[-2] != 00 or got[-1] not in (CommandInterface.ACK_BYTE,
CommandInterface.NACK_BYTE):
got += self._read(1)
if time.time() > stop:
raise CmdException("Timeout waiting for ACK/NACK after '%s'"
% (info,))
# Our bytearray's length is: 2 initial bytes + 2 bytes for the ACK/NACK
# plus a possible N-4 additional (buffered) bytes
mdebug(10, "Got %d additional bytes before ACK/NACK" % (len(got) - 4,))
# wait for ask
ask = got[-1]
if ask == CommandInterface.ACK_BYTE:
# ACK
return 1
elif ask == CommandInterface.NACK_BYTE:
# NACK
mdebug(10, "Target replied with a NACK during %s" % info)
return 0
# Unknown response
mdebug(10, "Unrecognised response 0x%x to %s" % (ask, info))
return 0
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
if PY3:
return bytes([byte0, byte1, byte2, byte3])
else:
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3))
def _decode_addr(self, byte0, byte1, byte2, byte3):
return ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | (byte0 << 0))
def _calc_checks(self, cmd, addr, size):
return ((sum(bytearray(self._encode_addr(addr))) +
sum(bytearray(self._encode_addr(size))) +
cmd) & 0xFF)
def _write(self, data, is_retry=False):
if PY3:
if type(data) == int:
assert data < 256
goal = 1
written = self.sp.write(bytes([data]))
elif type(data) == bytes or type(data) == bytearray:
goal = len(data)
written = self.sp.write(data)
else:
raise CmdException("Internal Error. Bad data type: {}"
.format(type(data)))
else:
if type(data) == int:
assert data < 256
goal = 1
written = self.sp.write(chr(data))
else:
goal = len(data)
written = self.sp.write(data)
if written < goal:
mdebug(10, "*** Only wrote {} of target {} bytes"
.format(written, goal))
if is_retry and written == 0:
raise CmdException("Failed to write data on the serial bus")
mdebug(10, "*** Retrying write for remainder")
if type(data) == int:
return self._write(data, is_retry=True)
else:
return self._write(data[written:], is_retry=True)
def _read(self, length):
return bytearray(self.sp.read(length))
def sendAck(self):
self._write(0x00)
self._write(0xCC)
return
def sendNAck(self):
self._write(0x00)
self._write(0x33)
return
def receivePacket(self):
# stop = time.time() + 5
# got = None
# while not got:
got = self._read(2)
# if time.time() > stop:
# break
# if not got:
# raise CmdException("No response to %s" % info)
size = got[0] # rcv size
chks = got[1] # rcv checksum
data = bytearray(self._read(size - 2)) # rcv data
mdebug(10, "*** received %x bytes" % size)
if chks == sum(data) & 0xFF:
self.sendAck()
return data
else:
self.sendNAck()
# TODO: retry receiving!
raise CmdException("Received packet checksum error")
return 0
def sendSynch(self):
cmd = 0x55
# flush serial input buffer for first ACK reception
self.sp.flushInput()
mdebug(10, "*** sending synch sequence")
self._write(cmd) # send U
self._write(cmd) # send U
return self._wait_for_ack("Synch (0x55 0x55)", 2)
def checkLastCmd(self):
stat = self.cmdGetStatus()
if not (stat):
raise CmdException("No response from target on status request. "
"(Did you disable the bootloader?)")
if stat[0] == COMMAND_RET_SUCCESS:
mdebug(10, "Command Successful")
return 1
else:
stat_str = RETURN_CMD_STRS.get(stat[0], None)
if stat_str is None:
mdebug(0, "Warning: unrecognized status returned "
"0x%x" % stat[0])
else:
mdebug(0, "Target returned: 0x%x, %s" % (stat[0], stat_str))
return 0
def cmdPing(self):
cmd = 0x20
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** Ping command (0x20)")
if self._wait_for_ack("Ping (0x20)"):
return self.checkLastCmd()
def cmdReset(self):
cmd = 0x25
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** Reset command (0x25)")
if self._wait_for_ack("Reset (0x25)"):
return 1
def cmdGetChipId(self):
cmd = 0x28
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** GetChipId command (0x28)")
if self._wait_for_ack("Get ChipID (0x28)"):
# 4 byte answ, the 2 LSB hold chip ID
version = self.receivePacket()
if self.checkLastCmd():
assert len(version) == 4, ("Unreasonable chip "
"id: %s" % repr(version))
mdebug(10, " Version 0x%02X%02X%02X%02X" % tuple(version))
chip_id = (version[2] << 8) | version[3]
return chip_id
else:
raise CmdException("GetChipID (0x28) failed")
def cmdGetStatus(self):
cmd = 0x23
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** GetStatus command (0x23)")
if self._wait_for_ack("Get Status (0x23)"):
stat = self.receivePacket()
return stat
def cmdSetXOsc(self):
cmd = 0x29
lng = 3
self._write(lng) # send size
self._write(cmd) # send checksum
self._write(cmd) # send data
mdebug(10, "*** SetXOsc command (0x29)")
if self._wait_for_ack("SetXOsc (0x29)"):
return 1
# UART speed (needs) to be changed!
def cmdRun(self, addr):
cmd = 0x22
lng = 7
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, 0)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
mdebug(10, "*** Run command(0x22)")
return 1
def cmdEraseMemory(self, addr, size):
cmd = 0x26
lng = 11
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** Erase command(0x26)")
if self._wait_for_ack("Erase memory (0x26)", 10):
return self.checkLastCmd()
def cmdBankErase(self):
cmd = 0x2C
lng = 3
self._write(lng) # send length
self._write(cmd) # send checksum
self._write(cmd) # send cmd
mdebug(10, "*** Bank Erase command(0x2C)")
if self._wait_for_ack("Bank Erase (0x2C)", 10):
return self.checkLastCmd()
def cmdCRC32(self, addr, size):
cmd = 0x27
lng = 11
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** CRC32 command(0x27)")
if self._wait_for_ack("Get CRC32 (0x27)", 1):
crc = self.receivePacket()
if self.checkLastCmd():
return self._decode_addr(crc[3], crc[2], crc[1], crc[0])
def cmdCRC32CC26xx(self, addr, size):
cmd = 0x27
lng = 15
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
self._write(self._encode_addr(0x00000000)) # send number of reads
mdebug(10, "*** CRC32 command(0x27)")
if self._wait_for_ack("Get CRC32 (0x27)", 1):
crc = self.receivePacket()
if self.checkLastCmd():
return self._decode_addr(crc[3], crc[2], crc[1], crc[0])
def cmdDownload(self, addr, size):
cmd = 0x21
lng = 11
if (size % 4) != 0: # check for invalid data lengths
raise Exception('Invalid data size: %i. '
'Size must be a multiple of 4.' % size)
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, size)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(self._encode_addr(size)) # send size
mdebug(10, "*** Download command (0x21)")
if self._wait_for_ack("Download (0x21)", 2):
return self.checkLastCmd()
def cmdSendData(self, data):
cmd = 0x24
lng = len(data)+3
# TODO: check total size of data!! max 252 bytes!
self._write(lng) # send size
self._write((sum(bytearray(data))+cmd) & 0xFF) # send checksum
self._write(cmd) # send cmd
self._write(bytearray(data)) # send data
mdebug(10, "*** Send Data (0x24)")
if self._wait_for_ack("Send data (0x24)", 10):
return self.checkLastCmd()
def cmdMemRead(self, addr): # untested
cmd = 0x2A
lng = 8
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, 4)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(4) # send width, 4 bytes
mdebug(10, "*** Mem Read (0x2A)")
if self._wait_for_ack("Mem Read (0x2A)", 1):
data = self.receivePacket()
if self.checkLastCmd():
# self._decode_addr(ord(data[3]),
# ord(data[2]),ord(data[1]),ord(data[0]))
return data
def cmdMemReadCC26xx(self, addr):
cmd = 0x2A
lng = 9
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, 2)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(1) # send width, 4 bytes
self._write(1) # send number of reads
mdebug(10, "*** Mem Read (0x2A)")
if self._wait_for_ack("Mem Read (0x2A)", 1):
data = self.receivePacket()
if self.checkLastCmd():
return data
def cmdMemWrite(self, addr, data, width): # untested
# TODO: check width for 1 or 4 and data size
cmd = 0x2B
lng = 10
self._write(lng) # send length
self._write(self._calc_checks(cmd, addr, 0)) # send checksum
self._write(cmd) # send cmd
self._write(self._encode_addr(addr)) # send addr
self._write(bytearray(data)) # send data
self._write(width) # send width, 4 bytes
mdebug(10, "*** Mem write (0x2B)")
if self._wait_for_ack("Mem Write (0x2B)", 2):
return self.checkLastCmd()
# Complex commands section
def writeMemory(self, addr, data):
lng = len(data)
# amount of data bytes transferred per packet (theory: max 252 + 3)
trsf_size = 248
empty_packet = bytearray((0xFF,) * trsf_size)
# Boot loader enable check
# TODO: implement check for all chip sizes & take into account partial
# firmware uploads
if (lng == 524288): # check if file is for 512K model
# check the boot loader enable bit (only for 512K model)
if not ((data[524247] & (1 << 4)) >> 4):
if not (conf['force'] or
query_yes_no("The boot loader backdoor is not enabled "
"in the firmware you are about to write "
"to the target. You will NOT be able to "
"reprogram the target using this tool if "
"you continue! "
"Do you want to continue?", "no")):
raise Exception('Aborted by user.')
mdebug(5, "Writing %(lng)d bytes starting at address 0x%(addr)08X" %
{'lng': lng, 'addr': addr})
offs = 0
addr_set = 0
# check if amount of remaining data is less then packet size
while lng > trsf_size:
# skip packets filled with 0xFF
if data[offs:offs+trsf_size] != empty_packet:
if addr_set != 1:
# set starting address if not set
self.cmdDownload(addr, lng)
addr_set = 1
mdebug(5, " Write %(len)d bytes at 0x%(addr)08X"
% {'addr': addr, 'len': trsf_size}, '\r')
sys.stdout.flush()
# send next data packet
self.cmdSendData(data[offs:offs+trsf_size])
else: # skipped packet, address needs to be set
addr_set = 0
offs = offs + trsf_size
addr = addr + trsf_size
lng = lng - trsf_size
mdebug(5, "Write %(len)d bytes at 0x%(addr)08X" % {'addr': addr,
'len': lng})
self.cmdDownload(addr, lng)
return self.cmdSendData(data[offs:offs+lng]) # send last data packet
class Chip(object):
def __init__(self, command_interface):
self.command_interface = command_interface
# Some defaults. The child can override.
self.flash_start_addr = 0x00000000
self.has_cmd_set_xosc = False
def crc(self, address, size):
return getattr(self.command_interface, self.crc_cmd)(address, size)
def disable_bootloader(self):
if not (conf['force'] or
query_yes_no("Disabling the bootloader will prevent you from "
"using this script until you re-enable the "
"bootloader using JTAG. Do you want to continue?",
"no")):
raise Exception('Aborted by user.')
if PY3:
pattern = struct.pack('<L', self.bootloader_dis_val)
else:
pattern = [ord(b) for b in struct.pack('<L',
self.bootloader_dis_val)]
if cmd.writeMemory(self.bootloader_address, pattern):
mdebug(5, " Set bootloader closed done ")
else:
raise CmdException("Set bootloader closed failed ")
class CC2538(Chip):
def __init__(self, command_interface):
super(CC2538, self).__init__(command_interface)
self.flash_start_addr = 0x00200000
self.addr_ieee_address_secondary = 0x0027ffcc
self.has_cmd_set_xosc = True
self.bootloader_dis_val = 0xefffffff
self.crc_cmd = "cmdCRC32"
FLASH_CTRL_DIECFG0 = 0x400D3014
FLASH_CTRL_DIECFG2 = 0x400D301C
addr_ieee_address_primary = 0x00280028
ccfg_len = 44
# Read out primary IEEE address, flash and RAM size
model = self.command_interface.cmdMemRead(FLASH_CTRL_DIECFG0)
self.size = (model[3] & 0x70) >> 4
if 0 < self.size <= 4:
self.size *= 0x20000 # in bytes
else:
self.size = 0x10000 # in bytes
self.bootloader_address = self.flash_start_addr + self.size - ccfg_len
sram = (((model[2] << 8) | model[3]) & 0x380) >> 7
sram = (2 - sram) << 3 if sram <= 1 else 32 # in KB
pg = self.command_interface.cmdMemRead(FLASH_CTRL_DIECFG2)
pg_major = (pg[2] & 0xF0) >> 4
if pg_major == 0:
pg_major = 1
pg_minor = pg[2] & 0x0F
ti_oui = bytearray([0x00, 0x12, 0x4B])
ieee_addr = self.command_interface.cmdMemRead(
addr_ieee_address_primary)
ieee_addr_end = self.command_interface.cmdMemRead(
addr_ieee_address_primary + 4)
if ieee_addr[:3] == ti_oui:
ieee_addr += ieee_addr_end
else:
ieee_addr = ieee_addr_end + ieee_addr
mdebug(5, "CC2538 PG%d.%d: %dKB Flash, %dKB SRAM, CCFG at 0x%08X"
% (pg_major, pg_minor, self.size >> 10, sram,
self.bootloader_address))
mdebug(5, "Primary IEEE Address: %s"
% (':'.join('%02X' % x for x in ieee_addr)))
def erase(self):
mdebug(5, "Erasing %s bytes starting at address 0x%08X"
% (self.size, self.flash_start_addr))
return self.command_interface.cmdEraseMemory(self.flash_start_addr,
self.size)
def read_memory(self, addr):
# CC2538's COMMAND_MEMORY_READ sends each 4-byte number in inverted
# byte order compared to what's written on the device
data = self.command_interface.cmdMemRead(addr)
return bytearray([data[x] for x in range(3, -1, -1)])
class CC26xx(Chip):
# Class constants
MISC_CONF_1 = 0x500010A0
PROTO_MASK_BLE = 0x01
PROTO_MASK_IEEE = 0x04
PROTO_MASK_BOTH = 0x05
def __init__(self, command_interface):
super(CC26xx, self).__init__(command_interface)
self.bootloader_dis_val = 0x00000000
self.crc_cmd = "cmdCRC32CC26xx"
ICEPICK_DEVICE_ID = 0x50001318
FCFG_USER_ID = 0x50001294
PRCM_RAMHWOPT = 0x40082250
FLASH_SIZE = 0x4003002C
addr_ieee_address_primary = 0x500012F0
ccfg_len = 88
ieee_address_secondary_offset = 0x20
bootloader_dis_offset = 0x30
sram = "Unknown"
# Determine CC13xx vs CC26xx via ICEPICK_DEVICE_ID::WAFER_ID and store
# PG revision
device_id = self.command_interface.cmdMemReadCC26xx(ICEPICK_DEVICE_ID)
wafer_id = (((device_id[3] & 0x0F) << 16) +
(device_id[2] << 8) +
(device_id[1] & 0xF0)) >> 4
pg_rev = (device_id[3] & 0xF0) >> 4
# Read FCFG1_USER_ID to get the package and supported protocols
user_id = self.command_interface.cmdMemReadCC26xx(FCFG_USER_ID)
package = {0x00: '4x4mm',
0x01: '5x5mm',
0x02: '7x7mm'}.get(user_id[2] & 0x03, "Unknown")
protocols = user_id[1] >> 4
# We can now detect the exact device
if wafer_id == 0xB99A:
chip = self._identify_cc26xx(pg_rev, protocols)
elif wafer_id == 0xB9BE:
chip = self._identify_cc13xx(pg_rev, protocols)
# Read flash size, calculate and store bootloader disable address
self.size = self.command_interface.cmdMemReadCC26xx(
FLASH_SIZE)[0] * 4096
self.bootloader_address = self.size - ccfg_len + bootloader_dis_offset
self.addr_ieee_address_secondary = (self.size - ccfg_len +
ieee_address_secondary_offset)
# RAM size
ramhwopt_size = self.command_interface.cmdMemReadCC26xx(
PRCM_RAMHWOPT)[0] & 3
if ramhwopt_size == 3:
sram = "20KB"
elif ramhwopt_size == 2:
sram = "16KB"
else:
sram = "Unknown"
# Primary IEEE address. Stored with the MSB at the high address
ieee_addr = self.command_interface.cmdMemReadCC26xx(
addr_ieee_address_primary + 4)[::-1]
ieee_addr += self.command_interface.cmdMemReadCC26xx(
addr_ieee_address_primary)[::-1]
mdebug(5, "%s (%s): %dKB Flash, %s SRAM, CCFG.BL_CONFIG at 0x%08X"
% (chip, package, self.size >> 10, sram,
self.bootloader_address))
mdebug(5, "Primary IEEE Address: %s"
% (':'.join('%02X' % x for x in ieee_addr)))
def _identify_cc26xx(self, pg, protocols):
chips_dict = {
CC26xx.PROTO_MASK_IEEE: 'CC2630',
CC26xx.PROTO_MASK_BLE: 'CC2640',
CC26xx.PROTO_MASK_BOTH: 'CC2650',
}
chip_str = chips_dict.get(protocols & CC26xx.PROTO_MASK_BOTH, "Unknown")
if pg == 1:
pg_str = "PG1.0"
elif pg == 3:
pg_str = "PG2.0"
elif pg == 7:
pg_str = "PG2.1"
elif pg == 8:
rev_minor = self.command_interface.cmdMemReadCC26xx(
CC26xx.MISC_CONF_1)[0]
if rev_minor == 0xFF:
rev_minor = 0x00
pg_str = "PG2.%d" % (2 + rev_minor,)
return "%s %s" % (chip_str, pg_str)
def _identify_cc13xx(self, pg, protocols):
chip_str = "CC1310"
if protocols & CC26xx.PROTO_MASK_IEEE == CC26xx.PROTO_MASK_IEEE:
chip_str = "CC1350"
if pg == 0:
pg_str = "PG1.0"
elif pg == 2:
rev_minor = self.command_interface.cmdMemReadCC26xx(
CC26xx.MISC_CONF_1)[0]
if rev_minor == 0xFF:
rev_minor = 0x00
pg_str = "PG2.%d" % (rev_minor,)
return "%s %s" % (chip_str, pg_str)
def erase(self):
mdebug(5, "Erasing all main bank flash sectors")
return self.command_interface.cmdBankErase()
def read_memory(self, addr):
# CC26xx COMMAND_MEMORY_READ returns contents in the same order as
# they are stored on the device
return self.command_interface.cmdMemReadCC26xx(addr)
def query_yes_no(question, default="yes"):
valid = {"yes": True,
"y": True,
"ye": True,
"no": False,
"n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
if PY3:
choice = input().lower()
else:
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
# Convert the entered IEEE address into an integer
def parse_ieee_address(inaddr):
try:
return int(inaddr, 16)
except ValueError:
# inaddr is not a hex string, look for other formats
if ':' in inaddr:
bytes = inaddr.split(':')
elif '-' in inaddr:
bytes = inaddr.split('-')
if len(bytes) != 8:
raise ValueError("Supplied IEEE address does not contain 8 bytes")
addr = 0
for i, b in zip(range(8), bytes):
try:
addr += int(b, 16) << (56-(i*8))
except ValueError:
raise ValueError("IEEE address contains invalid bytes")
return addr
def print_version():
# Get the version using "git describe".
try:
p = Popen(['git', 'describe', '--tags', '--match', '[0-9]*'],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
version = line.strip()
except:
# We're not in a git repo, or git failed, use fixed version string.
version = VERSION_STRING
print('%s %s' % (sys.argv[0], version))
def usage():
print("""Usage: %s [-DhqVfewvr] [-l length] [-p port] [-b baud] [-a addr] \
[-i addr] [--bootloader-active-high] [--bootloader-invert-lines] [file.bin]
-h, --help This help
-q Quiet
-V Verbose
-f Force operation(s) without asking any questions
-e Erase (full)
-w Write
-v Verify (CRC32 check)
-r Read
-l length Length of read
-p port Serial port (default: first USB-like port in /dev)
-b baud Baud speed (default: 500000)
-a addr Target address
-i, --ieee-address addr Set the secondary 64 bit IEEE address
--bootloader-active-high Use active high signals to enter bootloader
--bootloader-invert-lines Inverts the use of RTS and DTR to enter bootloader
-D, --disable-bootloader After finishing, disable the bootloader
--version Print script version
Examples:
./%s -e -w -v example/main.bin
./%s -e -w -v --ieee-address 00:12:4b:aa:bb:cc:dd:ee example/main.bin
""" % (sys.argv[0], sys.argv[0], sys.argv[0]))
if __name__ == "__main__":
conf = {
'port': 'auto',
'baud': 500000,
'force_speed': 0,
'address': None,
'force': 0,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
'len': 0x80000,
'fname': '',
'ieee_address': 0,
'bootloader_active_high': False,
'bootloader_invert_lines': False,
'disable-bootloader': 0
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:],
"DhqVfewvrp:b:a:l:i:",
['help', 'ieee-address=',
'disable-bootloader',
'bootloader-active-high',
'bootloader-invert-lines', 'version'])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h' or o == '--help':
usage()
sys.exit(0)
elif o == '-f':
conf['force'] = 1
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
conf['force_speed'] = 1
elif o == '-a':
conf['address'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
elif o == '-i' or o == '--ieee-address':
conf['ieee_address'] = str(a)
elif o == '--bootloader-active-high':
conf['bootloader_active_high'] = True
elif o == '--bootloader-invert-lines':
conf['bootloader_invert_lines'] = True
elif o == '-D' or o == '--disable-bootloader':
conf['disable-bootloader'] = 1
elif o == '--version':
print_version()
sys.exit(0)
else:
assert False, "Unhandled option"
try:
# Sanity checks
# check for input/output file
if conf['write'] or conf['read'] or conf['verify']:
try:
args[0]
except:
raise Exception('No file path given.')
if conf['write'] and conf['read']:
if not (conf['force'] or
query_yes_no("You are reading and writing to the same "
"file. This will overwrite your input file. "
"Do you want to continue?", "no")):
raise Exception('Aborted by user.')
if conf['erase'] and conf['read'] and not conf['write']:
if not (conf['force'] or
query_yes_no("You are about to erase your target before "
"reading. Do you want to continue?", "no")):
raise Exception('Aborted by user.')
if conf['read'] and not conf['write'] and conf['verify']:
raise Exception('Verify after read not implemented.')
if conf['len'] < 0:
raise Exception('Length must be positive but %d was provided'
% (conf['len'],))
# Try and find the port automatically
if conf['port'] == 'auto':
ports = []
# Get a list of all USB-like names in /dev
for name in ['tty.usbserial',
'ttyUSB',
'tty.usbmodem',
'tty.SLAB_USBtoUART']:
ports.extend(glob.glob('/dev/%s*' % name))
ports = sorted(ports)
if ports:
# Found something - take it
conf['port'] = ports[0]
else:
raise Exception('No serial port found.')
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'])
cmd.invoke_bootloader(conf['bootloader_active_high'],
conf['bootloader_invert_lines'])
mdebug(5, "Opening port %(port)s, baud %(baud)d"
% {'port': conf['port'], 'baud': conf['baud']})
if conf['write'] or conf['verify']:
mdebug(5, "Reading data from %s" % args[0])
firmware = FirmwareFile(args[0])
mdebug(5, "Connecting to target...")
if not cmd.sendSynch():
raise CmdException("Can't connect to target. Ensure boot loader "
"is started. (no answer on synch sequence)")
# if (cmd.cmdPing() != 1):
# raise CmdException("Can't connect to target. Ensure boot loader "
# "is started. (no answer on ping command)")
chip_id = cmd.cmdGetChipId()
chip_id_str = CHIP_ID_STRS.get(chip_id, None)
if chip_id_str is None:
mdebug(10, ' Unrecognized chip ID. Trying CC13xx/CC26xx')
device = CC26xx(cmd)
else:
mdebug(10, " Target id 0x%x, %s" % (chip_id, chip_id_str))
device = CC2538(cmd)
# Choose a good default address unless the user specified -a
if conf['address'] is None:
conf['address'] = device.flash_start_addr
if conf['force_speed'] != 1 and device.has_cmd_set_xosc:
if cmd.cmdSetXOsc(): # switch to external clock source
cmd.close()
conf['baud'] = 1000000
cmd.open(conf['port'], conf['baud'])
mdebug(6, "Opening port %(port)s, baud %(baud)d"
% {'port': conf['port'], 'baud': conf['baud']})
mdebug(6, "Reconnecting to target at higher speed...")
if (cmd.sendSynch() != 1):
raise CmdException("Can't connect to target after clock "
"source switch. (Check external "
"crystal)")
else:
raise CmdException("Can't switch target to external clock "
"source. (Try forcing speed)")
if conf['erase']:
# we only do full erase for now
if device.erase():
mdebug(5, " Erase done")
else:
raise CmdException("Erase failed")
if conf['write']:
# TODO: check if boot loader back-door is open, need to read
# flash size first to get address
if cmd.writeMemory(conf['address'], firmware.bytes):
mdebug(5, " Write done ")
else:
raise CmdException("Write failed ")
if conf['verify']:
mdebug(5, "Verifying by comparing CRC32 calculations.")
crc_local = firmware.crc32()
# CRC of target will change according to length input file
crc_target = device.crc(conf['address'], len(firmware.bytes))
if crc_local == crc_target:
mdebug(5, " Verified (match: 0x%08x)" % crc_local)
else:
cmd.cmdReset()
raise Exception("NO CRC32 match: Local = 0x%x, "
"Target = 0x%x" % (crc_local, crc_target))
if conf['ieee_address'] != 0:
ieee_addr = parse_ieee_address(conf['ieee_address'])
if PY3:
mdebug(5, "Setting IEEE address to %s"
% (':'.join(['%02x' % b
for b in struct.pack('>Q', ieee_addr)])))
ieee_addr_bytes = struct.pack('<Q', ieee_addr)
else:
mdebug(5, "Setting IEEE address to %s"
% (':'.join(['%02x' % ord(b)
for b in struct.pack('>Q', ieee_addr)])))
ieee_addr_bytes = [ord(b)
for b in struct.pack('<Q', ieee_addr)]
if cmd.writeMemory(device.addr_ieee_address_secondary,
ieee_addr_bytes):
mdebug(5, " "
"Set address done ")
else:
raise CmdException("Set address failed ")
if conf['read']:
length = conf['len']
# Round up to a 4-byte boundary
length = (length + 3) & ~0x03
mdebug(5, "Reading %s bytes starting at address 0x%x"
% (length, conf['address']))
with open(args[0], 'wb') as f:
for i in range(0, length >> 2):
# reading 4 bytes at a time
rdata = device.read_memory(conf['address'] + (i * 4))
mdebug(5, " 0x%x: 0x%02x%02x%02x%02x"
% (conf['address'] + (i * 4), rdata[0], rdata[1],
rdata[2], rdata[3]), '\r')
f.write(rdata)
f.close()
mdebug(5, " Read done ")
if conf['disable-bootloader']:
device.disable_bootloader()
cmd.cmdReset()
except Exception as err:
if QUIET >= 10:
traceback.print_exc()
exit('ERROR: %s' % str(err))
| lgpl-2.1 | -8,150,061,551,502,049,000 | -6,871,460,619,747,156,000 | 35.397249 | 80 | 0.52184 | false |
tgbugs/hypush | test/memex/models/user_identity_test.py | 1 | 6800 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import sqlalchemy.exc
from hyputils.memex import models
from hyputils.memex._compat import PY2
class TestUserIdentity(object):
def test_you_can_save_and_then_retrieve_field_values(
self, db_session, matchers, user
):
user_identity_1 = models.UserIdentity(
provider="provider_1", provider_unique_id="1", user=user
)
user_identity_2 = models.UserIdentity(
provider="provider_1", provider_unique_id="2", user=user
)
user_identity_3 = models.UserIdentity(
provider="provider_2", provider_unique_id="3", user=user
)
db_session.add_all([user_identity_1, user_identity_2, user_identity_3])
db_session.flush()
user_identities = (
db_session.query(models.UserIdentity)
.order_by(models.UserIdentity.provider_unique_id)
.all()
)
# Auto incrementing unique IDs should have been generated for us.
assert type(user_identities[0].id) is int
assert type(user_identities[1].id) is int
assert type(user_identities[2].id) is int
# The provider strings that we gave should have been saved.
assert user_identities[0].provider == "provider_1"
assert user_identities[1].provider == "provider_1"
assert user_identities[2].provider == "provider_2"
# The provider_unique_id strings that we gave should have been saved.
assert user_identities[0].provider_unique_id == "1"
assert user_identities[1].provider_unique_id == "2"
assert user_identities[2].provider_unique_id == "3"
def test_provider_cant_be_null(self, db_session, user):
db_session.add(models.UserIdentity(provider_unique_id="1", user=user))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "provider" violates not-null constraint',
):
db_session.flush()
def test_provider_id_cant_be_null(self, db_session, user):
db_session.add(models.UserIdentity(provider="provider", user=user))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "provider_unique_id" violates not-null constraint',
):
db_session.flush()
def test_user_cant_be_null(self, db_session):
db_session.add(models.UserIdentity(provider="provider", provider_unique_id="1"))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "user_id" violates not-null constraint',
):
db_session.flush()
def test_two_cant_have_the_same_provider_and_provider_id(
self, db_session, factories
):
db_session.add_all(
[
models.UserIdentity(
provider="provider", provider_unique_id="id", user=factories.User()
),
models.UserIdentity(
provider="provider", provider_unique_id="id", user=factories.User()
),
]
)
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='duplicate key value violates unique constraint "uq__user_identity__provider"',
):
db_session.flush()
def test_one_user_can_have_the_same_provider_id_from_different_providers(
self, db_session, user
):
db_session.add_all(
[
models.UserIdentity(
provider="provider_1", provider_unique_id="id", user=user
),
models.UserIdentity(
provider="provider_2", provider_unique_id="id", user=user
),
]
)
db_session.flush()
def test_different_users_can_have_the_same_provider_id_from_different_providers(
self, db_session, factories
):
db_session.add_all(
[
models.UserIdentity(
provider="provider_1",
provider_unique_id="id",
user=factories.User(),
),
models.UserIdentity(
provider="provider_2",
provider_unique_id="id",
user=factories.User(),
),
]
)
db_session.flush()
def test_removing_a_user_identity_from_a_user_deletes_the_user_identity_from_the_db(
self, db_session, user
):
# Add a couple of noise UserIdentity's. These should not be removed
# from the DB.
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
# The UserIdentity that we are going to remove.
user_identity = models.UserIdentity(
provider="provider", provider_unique_id="3", user=user
)
user.identities.remove(user_identity)
assert user_identity not in db_session.query(models.UserIdentity).all()
def test_deleting_a_user_identity_removes_it_from_its_user(self, db_session, user):
# Add a couple of noise UserIdentity's. These should not be removed
# from user.identities.
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
# The UserIdentity that we are going to remove.
user_identity = models.UserIdentity(
provider="provider", provider_unique_id="3", user=user
)
db_session.commit()
db_session.delete(user_identity)
db_session.refresh(user) # Make sure user.identities is up to date.
assert user_identity not in user.identities
def test_deleting_a_user_deletes_all_its_user_identities(self, db_session, user):
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
db_session.commit()
db_session.delete(user)
assert db_session.query(models.UserIdentity).count() == 0
def test_repr(self):
user_identity = models.UserIdentity(
provider="provider_1", provider_unique_id="1"
)
expected_repr = "UserIdentity(provider='provider_1', provider_unique_id='1')"
if PY2:
expected_repr = (
"UserIdentity(provider=u'provider_1', " "provider_unique_id=u'1')"
)
assert repr(user_identity) == expected_repr
@pytest.fixture
def user(self, factories):
return factories.User()
| mit | -1,412,275,098,699,684,000 | 735,674,342,901,651,000 | 34.978836 | 97 | 0.596176 | false |
Kongsea/tensorflow | tensorflow/contrib/distributions/python/ops/shape.py | 41 | 19747 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
Terminology:
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
Purpose:
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
Examples of `Distribution` `shape` semantics:
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.div(1., tf.reduce_prod(x, event_dims))
```
Examples using this class:
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
Argument Validation:
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, squeeze_dims=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
| apache-2.0 | -9,039,554,486,782,103,000 | -8,950,360,807,875,730,000 | 39.3 | 80 | 0.616752 | false |
ecsark/storm-static | storm-core/src/dev/resources/tester_spout.py | 37 | 1524 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Python file uses the following encoding: utf-8
from storm import Spout, emit, log
from random import choice
from time import sleep
from uuid import uuid4
words = [u"nathan", u"mike", u"jackson", u"golda", u"bertels人"]
class TesterSpout(Spout):
def initialize(self, conf, context):
emit(['spout initializing'])
self.pending = {}
def nextTuple(self):
sleep(1.0/2)
word = choice(words)
id = str(uuid4())
self.pending[id] = word
emit([word], id=id)
def ack(self, id):
del self.pending[id]
def fail(self, id):
log("emitting " + self.pending[id] + " on fail")
emit([self.pending[id]], id=id)
TesterSpout().run()
| apache-2.0 | 28,101,605,743,424,508 | 2,460,946,407,858,206,000 | 31.382979 | 74 | 0.690539 | false |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/site-packages/win32comext/shell/demos/servers/empty_volume_cache.py | 37 | 7597 | # A sample implementation of IEmptyVolumeCache - see
# http://msdn2.microsoft.com/en-us/library/aa969271.aspx for an overview.
#
# * Execute this script to register the handler
# * Start the "disk cleanup" tool - look for "pywin32 compiled files"
import sys, os, stat, time
import pythoncom
from win32com.shell import shell, shellcon
from win32com.server.exception import COMException
import win32gui
import win32con
import winerror
# Our shell extension.
IEmptyVolumeCache_Methods = "Initialize GetSpaceUsed Purge ShowProperties Deactivate".split()
IEmptyVolumeCache2_Methods = "InitializeEx".split()
ico = os.path.join(sys.prefix, "py.ico")
if not os.path.isfile(ico):
ico = os.path.join(sys.prefix, "PC", "py.ico")
if not os.path.isfile(ico):
ico = None
print "Can't find python.ico - no icon will be installed"
class EmptyVolumeCache:
_reg_progid_ = "Python.ShellExtension.EmptyVolumeCache"
_reg_desc_ = "Python Sample Shell Extension (disk cleanup)"
_reg_clsid_ = "{EADD0777-2968-4c72-A999-2BF5F756259C}"
_reg_icon_ = ico
_com_interfaces_ = [shell.IID_IEmptyVolumeCache, shell.IID_IEmptyVolumeCache2]
_public_methods_ = IEmptyVolumeCache_Methods + IEmptyVolumeCache2_Methods
def Initialize(self, hkey, volume, flags):
# This should never be called, except on win98.
print "Unless we are on 98, Initialize call is unexpected!"
raise COMException(hresult=winerror.E_NOTIMPL)
def InitializeEx(self, hkey, volume, key_name, flags):
# Must return a tuple of:
# (display_name, description, button_name, flags)
print "InitializeEx called with", hkey, volume, key_name, flags
self.volume = volume
if flags & shellcon.EVCF_SETTINGSMODE:
print "We are being run on a schedule"
# In this case, "because there is no opportunity for user
# feedback, only those files that are extremely safe to clean up
# should be touched. You should ignore the initialization
# method's pcwszVolume parameter and clean unneeded files
# regardless of what drive they are on."
self.volume = None # flag as 'any disk will do'
elif flags & shellcon.EVCF_OUTOFDISKSPACE:
# In this case, "the handler should be aggressive about deleting
# files, even if it results in a performance loss. However, the
# handler obviously should not delete files that would cause an
# application to fail or the user to lose data."
print "We are being run as we are out of disk-space"
else:
# This case is not documented - we are guessing :)
print "We are being run because the user asked"
# For the sake of demo etc, we tell the shell to only show us when
# there are > 0 bytes available. Our GetSpaceUsed will check the
# volume, so will return 0 when we are on a different disk
flags = shellcon.EVCF_DONTSHOWIFZERO | shellcon.EVCF_ENABLEBYDEFAULT
return ("pywin32 compiled files",
"Removes all .pyc and .pyo files in the pywin32 directories",
"click me!",
flags
)
def _GetDirectories(self):
root_dir = os.path.abspath(os.path.dirname(os.path.dirname(win32gui.__file__)))
if self.volume is not None and \
not root_dir.lower().startswith(self.volume.lower()):
return []
return [os.path.join(root_dir, p)
for p in ('win32', 'win32com', 'win32comext', 'isapi')]
def _WalkCallback(self, arg, directory, files):
# callback function for os.path.walk - no need to be member, but its
# close to the callers :)
callback, total_list = arg
for file in files:
fqn = os.path.join(directory, file).lower()
if file.endswith(".pyc") or file.endswith(".pyo"):
# See below - total_list == None means delete files,
# otherwise it is a list where the result is stored. Its a
# list simply due to the way os.walk works - only [0] is
# referenced
if total_list is None:
print "Deleting file", fqn
# Should do callback.PurgeProcess - left as an exercise :)
os.remove(fqn)
else:
total_list[0] += os.stat(fqn)[stat.ST_SIZE]
# and callback to the tool
if callback:
# for the sake of seeing the progress bar do its thing,
# we take longer than we need to...
# ACK - for some bizarre reason this screws up the XP
# cleanup manager - clues welcome!! :)
## print "Looking in", directory, ", but waiting a while..."
## time.sleep(3)
# now do it
used = total_list[0]
callback.ScanProgress(used, 0, "Looking at " + fqn)
def GetSpaceUsed(self, callback):
total = [0] # See _WalkCallback above
try:
for d in self._GetDirectories():
os.path.walk(d, self._WalkCallback, (callback, total))
print "After looking in", d, "we have", total[0], "bytes"
except pythoncom.error, (hr, msg, exc, arg):
# This will be raised by the callback when the user selects 'cancel'.
if hr != winerror.E_ABORT:
raise # that's the documented error code!
print "User cancelled the operation"
return total[0]
def Purge(self, amt_to_free, callback):
print "Purging", amt_to_free, "bytes..."
# we ignore amt_to_free - it is generally what we returned for
# GetSpaceUsed
try:
for d in self._GetDirectories():
os.path.walk(d, self._WalkCallback, (callback, None))
except pythoncom.error, (hr, msg, exc, arg):
# This will be raised by the callback when the user selects 'cancel'.
if hr != winerror.E_ABORT:
raise # that's the documented error code!
print "User cancelled the operation"
def ShowProperties(self, hwnd):
raise COMException(hresult=winerror.E_NOTIMPL)
def Deactivate(self):
print "Deactivate called"
return 0
def DllRegisterServer():
# Also need to register specially in:
# HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches
# See link at top of file.
import _winreg
kn = r"Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\%s" \
% (EmptyVolumeCache._reg_desc_,)
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, kn)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, EmptyVolumeCache._reg_clsid_)
def DllUnregisterServer():
import _winreg
kn = r"Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\%s" \
% (EmptyVolumeCache._reg_desc_,)
try:
key = _winreg.DeleteKey(_winreg.HKEY_LOCAL_MACHINE, kn)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print EmptyVolumeCache._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(EmptyVolumeCache,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| apache-2.0 | 8,425,162,957,736,107,000 | 3,452,948,124,818,815,500 | 44.220238 | 93 | 0.613137 | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/array/ghost.py | 2 | 9953 | from warnings import warn
from . import overlap
def fractional_slice(task, axes):
"""
>>> fractional_slice(('x', 5.1), {0: 2}) # doctest: +SKIP
(getitem, ('x', 6), (slice(0, 2),))
>>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(None, None, None), slice(-3, None)))
>>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) # doctest: +SKIP
(getitem, ('x', 3, 5), (slice(0, 2), slice(-3, None)))
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.fractional_slice.',
Warning)
return overlap.fractional_slice(task, axes)
def expand_key(k, dims, name=None, axes=None):
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.expand_keys.',
Warning)
return overlap.expand_key(k, dims, name, axes)
def ghost_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes input informs how many cells to dask.array.overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.ghost_internal.',
Warning)
return overlap.overlap_internal(x, axes)
def trim_internal(x, axes):
""" Trim sides from each block
This couples well with the ghost operation, which may leave excess data on
each block
See also
--------
dask.array.chunk.trim
dask.array.map_blocks
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.trim_internal.',
Warning)
return overlap.trim_internal(x, axes)
def periodic(x, axis, depth):
""" Copy a slice of an array around to its other side
Useful to create periodic boundary conditions for ghost
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.periodic.',
Warning)
return overlap.periodic(x, axis, depth)
def reflect(x, axis, depth):
""" Reflect boundaries of array on the same side
This is the converse of ``periodic``
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.reflect.',
Warning)
return overlap.reflect(x, axis, depth)
def nearest(x, axis, depth):
""" Each reflect each boundary value outwards
This mimics what the skimage.filters.gaussian_filter(... mode="nearest")
does.
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.nearest.',
Warning)
return overlap.nearest(x, axis, depth)
def constant(x, axis, depth, value):
""" Add constant slice to either side of array """
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.constant.',
Warning)
return overlap.constant(x, axis, depth, value)
def _remove_ghost_boundaries(l, r, axis, depth):
lchunks = list(l.chunks)
lchunks[axis] = (depth,)
rchunks = list(r.chunks)
rchunks[axis] = (depth,)
l = l.rechunk(tuple(lchunks))
r = r.rechunk(tuple(rchunks))
return l, r
def boundaries(x, depth=None, kind=None):
""" Add boundary conditions to an array before ghosting
See Also
--------
periodic
constant
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.boundaries.',
Warning)
return overlap.boundaries(x, depth, kind)
def ghost(x, depth, boundary):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
depth: dict
The size of the shared boundary per axis
boundary: dict
The boundary condition on each axis. Options are 'reflect', 'periodic',
'nearest', 'none', or an array value. Such a value will fill the
boundary with that value.
The depth input informs how many cells to dask.array.overlap between neighboring
blocks ``{0: 2, 2: 5}`` means share two cells in 0 axis, 5 cells in 2 axis.
Axes missing from this input will not be overlapped.
Examples
--------
>>> import numpy as np
>>> import dask.array as da
>>> x = np.arange(64).reshape((8, 8))
>>> d = da.from_array(x, chunks=(4, 4))
>>> d.chunks
((4, 4), (4, 4))
>>> g = da.ghost.ghost(d, depth={0: 2, 1: 1},
... boundary={0: 100, 1: 'reflect'})
>>> g.chunks
((8, 8), (6, 6))
>>> np.array(g)
array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],
[ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],
[ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.ghost.',
Warning)
return overlap.overlap(x, depth, boundary)
def add_dummy_padding(x, depth, boundary):
"""
Pads an array which has 'none' as the boundary type.
Used to simplify trimming arrays which use 'none'.
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE
array([..., 0, 1, 2, 3, 4, 5, ...])
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.add_dummy_padding.',
Warning)
return overlap.add_dummy_padding(x, depth, boundary)
def map_overlap(x, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of elements that each block should share with its neighbors
If a tuple or dict then this can be different per axis
boundary: str, tuple, dict
How to handle the boundaries.
Values include 'reflect', 'periodic', 'nearest', 'none',
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim ``depth`` elements from each block after
calling the map function.
Set this to False if your mapping function already does this for you
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> import numpy as np
>>> import dask.array as da
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = da.from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
"""
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.map_overlap.',
Warning)
return overlap.map_overlap(x, func, depth, boundary, trim, **kwargs)
def coerce_depth(ndim, depth):
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.coerce_depth.',
Warning)
return overlap.coerce_depth(ndim, depth)
def coerce_boundary(ndim, boundary):
warn('DeprecationWarning: the dask.array.ghost module has '
'been renamed to dask.array.overlap, '
'use dask.array.overlap.coerce_boundary.',
Warning)
return overlap.coerce_boundary(ndim, boundary)
| gpl-3.0 | 4,026,610,490,633,103,400 | 1,280,058,313,859,570,200 | 31.106452 | 93 | 0.576007 | false |
devendermishrajio/nova_test_latest | nova/scheduler/utils.py | 29 | 13035 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for scheduling."""
import collections
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
LOG = logging.getLogger(__name__)
scheduler_opts = [
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_opts)
CONF.import_opt('scheduler_default_filters', 'nova.scheduler.host_manager')
GroupDetails = collections.namedtuple('GroupDetails', ['hosts', 'policies'])
def build_request_spec(ctxt, image, instances, instance_type=None):
"""Build a request_spec for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
"""
instance = instances[0]
if instance_type is None:
if isinstance(instance, objects.Instance):
instance_type = instance.get_flavor()
else:
instance_type = flavors.extract_flavor(instance)
if isinstance(instance, objects.Instance):
instance = obj_base.obj_to_primitive(instance)
# obj_to_primitive doesn't copy this enough, so be sure
# to detach our metadata blob because we modify it below.
instance['system_metadata'] = dict(instance.get('system_metadata', {}))
if isinstance(instance_type, objects.Flavor):
instance_type = obj_base.obj_to_primitive(instance_type)
# NOTE(danms): Replicate this old behavior because the
# scheduler RPC interface technically expects it to be
# there. Remove this when we bump the scheduler RPC API to
# v5.0
try:
flavors.save_flavor_info(instance.get('system_metadata', {}),
instance_type)
except KeyError:
# If the flavor isn't complete (which is legit with a
# flavor object, just don't put it in the request spec
pass
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances)}
return jsonutils.to_primitive(request_spec)
def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
ex, request_spec, db):
"""changes VM state and notifies."""
LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"),
{'service': service, 'method': method, 'ex': ex})
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
# NOTE(vish): We shouldn't get here unless we have a catastrophic
# failure, so just set the instance to its internal state
notifier = rpc.get_notifier(service)
state = vm_state.upper()
LOG.warning(_LW('Setting instance to %s state.'), state,
instance_uuid=instance_uuid)
instance = objects.Instance(context=context, uuid=instance_uuid,
**updates)
instance.obj_reset_changes(['uuid'])
instance.save()
compute_utils.add_instance_fault_from_exc(context,
instance, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_state,
method=method,
reason=ex)
event_type = '%s.%s' % (service, method)
notifier.error(context, event_type, payload)
def populate_filter_properties(filter_properties, host_state):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
if isinstance(host_state, dict):
host = host_state['host']
nodename = host_state['nodename']
limits = host_state['limits']
else:
host = host_state.host
nodename = host_state.nodename
limits = host_state.limits
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)
# Adds oversubscription policy
if not filter_properties.get('force_hosts'):
filter_properties['limits'] = limits
def populate_retry(filter_properties, instance_uuid):
max_attempts = _max_attempts()
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
# In the case of multiple force hosts/nodes, scheduler should not
# disable retry filter but traverse all force hosts/nodes one by
# one till scheduler gets a valid target host.
if (max_attempts == 1 or len(force_hosts) == 1
or len(force_nodes) == 1):
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
retry = filter_properties.setdefault(
'retry', {
'num_attempts': 0,
'hosts': [] # list of compute hosts tried
})
retry['num_attempts'] += 1
_log_compute_error(instance_uuid, retry)
exc = retry.pop('exc', None)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d '
'for instance %(instance_uuid)s. '
'Last exception: %(exc)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid,
'exc': exc})
raise exception.MaxRetriesExceeded(reason=msg)
def _log_compute_error(instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.get('exc') # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_LE('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
def _max_attempts():
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
def _add_retry_host(filter_properties, host, node):
"""Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
node has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append([host, node])
def parse_options(opts, sep='=', converter=str, name=""):
"""Parse a list of options, each in the format of <key><sep><value>. Also
use the converter to convert the value into desired type.
:params opts: list of options, e.g. from oslo_config.cfg.ListOpt
:params sep: the separator
:params converter: callable object to convert the value, should raise
ValueError for conversion failure
:params name: name of the option
:returns: a lists of tuple of values (key, converted_value)
"""
good = []
bad = []
for opt in opts:
try:
key, seen_sep, value = opt.partition(sep)
value = converter(value)
except ValueError:
key = None
value = None
if key and seen_sep and value is not None:
good.append((key, value))
else:
bad.append(opt)
if bad:
LOG.warning(_LW("Ignoring the invalid elements of the option "
"%(name)s: %(options)s"),
{'name': name,
'options': ", ".join(bad)})
return good
def validate_filter(filter):
"""Validates that the filter is configured in the default filters."""
return filter in CONF.scheduler_default_filters
_SUPPORTS_AFFINITY = None
_SUPPORTS_ANTI_AFFINITY = None
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
enabled.
:param instance_uuid: UUID of the instance to check
:param user_group_hosts: Hosts from the group or empty set
:returns: None or namedtuple GroupDetails
"""
global _SUPPORTS_AFFINITY
if _SUPPORTS_AFFINITY is None:
_SUPPORTS_AFFINITY = validate_filter(
'ServerGroupAffinityFilter')
global _SUPPORTS_ANTI_AFFINITY
if _SUPPORTS_ANTI_AFFINITY is None:
_SUPPORTS_ANTI_AFFINITY = validate_filter(
'ServerGroupAntiAffinityFilter')
_supports_server_groups = any((_SUPPORTS_AFFINITY,
_SUPPORTS_ANTI_AFFINITY))
if not _supports_server_groups or not instance_uuid:
return
try:
group = objects.InstanceGroup.get_by_instance_uuid(context,
instance_uuid)
except exception.InstanceGroupNotFound:
return
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
if (not _SUPPORTS_AFFINITY and 'affinity' in group.policies):
msg = _("ServerGroupAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
if (not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' in group.policies):
msg = _("ServerGroupAntiAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
group_hosts = set(group.get_hosts())
user_hosts = set(user_group_hosts) if user_group_hosts else set()
return GroupDetails(hosts=user_hosts | group_hosts,
policies=group.policies)
def setup_instance_group(context, request_spec, filter_properties):
"""Add group_hosts and group_policies fields to filter_properties dict
based on instance uuids provided in request_spec, if those instances are
belonging to a group.
:param request_spec: Request spec
:param filter_properties: Filter properties
"""
group_hosts = filter_properties.get('group_hosts')
# NOTE(sbauza) If there are multiple instance UUIDs, it's a boot
# request and they will all be in the same group, so it's safe to
# only check the first one.
instance_uuid = request_spec.get('instance_properties', {}).get('uuid')
group_info = _get_group_details(context, instance_uuid, group_hosts)
if group_info is not None:
filter_properties['group_updated'] = True
filter_properties['group_hosts'] = group_info.hosts
filter_properties['group_policies'] = group_info.policies
def retry_on_timeout(retries=1):
"""Retry the call in case a MessagingTimeout is raised.
A decorator for retrying calls when a service dies mid-request.
:param retries: Number of retries
:returns: Decorator
"""
def outer(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
attempt = 0
while True:
try:
return func(*args, **kwargs)
except messaging.MessagingTimeout:
attempt += 1
if attempt <= retries:
LOG.warning(_LW(
"Retrying %(name)s after a MessagingTimeout, "
"attempt %(attempt)s of %(retries)s."),
{'attempt': attempt, 'retries': retries,
'name': func.__name__})
else:
raise
return wrapped
return outer
retry_select_destinations = retry_on_timeout(_max_attempts() - 1)
| apache-2.0 | -8,131,639,664,913,673,000 | 2,109,531,036,289,867,800 | 35.615169 | 79 | 0.622862 | false |
laszlocsomor/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 8 | 4104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 634,690,106,548,582,900 | 1,746,309,668,485,848,300 | 32.096774 | 79 | 0.70614 | false |
samervin/arctic-scavengers-randomizer | arctic_cards/leaders.py | 1 | 3619 | # Fields
NAME = 'name'
SET = 'set'
USES_REFUGEES = 'uses-refugees'
TEXT = 'text'
# Set values
HQ_EXP = 'hq'
RECON_EXP = 'recon'
# Information not strictly contained on the card
COMMENT = 'comment'
class Leaders:
ALL_LEADERS = [
{
NAME: 'The Peacemaker',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to increase the power of another tribe member\s hunt or dig actions by +2.'
},
{
NAME: 'The Gangster',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Your Refugees have a fight of 0 and they count as 2 people for the purpose of breaking tied skirmishes.'
},
{
NAME: 'The Butcher',
SET: HQ_EXP,
TEXT: 'Each round you may kill 1 of your tribe members (remove the card permanently from play) and sell his/her internal organs for 1 food and 1 med.'
},
{
NAME: 'The Fanatic',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may use 1 Refugee from your hand as a suicide bomber against an opponent. '
'Discard 1 of your opponent\'s revealed cards (your choice), the Refugee dies in the process (remove card from play).'
},
{
NAME: 'The Organizer',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to perform a draw of 2, but only keep 1. '
'No other cards may be played to modify this draw and you may not perform another draw this round.'
},
{
NAME: 'The Cannibal',
SET: HQ_EXP,
TEXT: 'Each round you may cannibalize 1 tribe member for 3 food (and subsequently remove that card from play). '
'You may not combine food from hunting or a garden when hiring with cannibalized food.'
},
{
NAME: 'The Sergent at Arms',
SET: HQ_EXP,
TEXT: 'You are immune to the disarm action, preventing saboteurs from discarding your tools. '
'When hiring saboteurs, you pay no food (cost for you is 1 med).',
COMMENT: 'This card is misspelled as printed: the correct spelling is Sergeant.'
},
{
NAME: 'The Mentor',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee card to grant another tribe member a +1 to any action.'
},
{
NAME: 'The Excavator',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees have a dig of 1. '
'If a Refugee uses a digging tool (i.e. shovel or a pick axe), ignore the tool\'s normal bonus and add +1 to the score.'
},
{
NAME: 'The Ranger',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees and Tribe Families have a hunt of 1.'
},
{
NAME: 'The Swindler',
SET: RECON_EXP,
USES_REFUGEES: True,
TEXT: 'Once per turn, you may discard 1 Refugee to persuade a mercenary into joining your tribe for 1 less food '
'or discard two Refugees to reduce the price by 1 med.'
},
{
NAME: 'The Yardmaster',
SET: RECON_EXP,
TEXT: 'Once per turn, you may peek at the top 2 cards of the Junkyard. '
'Return both of them to the top or bottom of the Junkyard.'
}
]
| mit | 6,403,809,527,277,146,000 | -1,596,644,726,595,652,400 | 37.913978 | 162 | 0.546284 | false |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/Crypto/Cipher/DES.py | 1 | 7100 | # -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty. The effective key length is
therefore 56 bits only.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
**Use AES, not DES. This module is provided only for legacy purposes.**
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES
>>>
>>> key = b'-8B key-'
>>> cipher = DES.new(key, DES.MODE_OFB)
>>> plaintext = b'sona si latine loqueris '
>>> msg = cipher.iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __package__
"""
import sys
from Crypto.Cipher import _create_cipher
from Crypto.Util.py3compat import byte_string
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, expect_byte_string)
_raw_des_lib = load_pycryptodome_raw_lib(
"Crypto.Cipher._raw_des",
"""
int DES_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int DES_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_stop_operation(void *state);
""")
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
expect_byte_string(key)
if len(key) != key_size:
raise ValueError("Incorrect DES key length (%d bytes)" % len(key))
start_operation = _raw_des_lib.DES_start_operation
stop_operation = _raw_des_lib.DES_stop_operation
cipher = VoidPointer()
result = start_operation(key,
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the DES cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def new(key, mode, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
iv : byte string
(*Only* `MODE_CBC`, `MODE_CFB`, `MODE_OFB`, `MODE_OPENPGP`).
The initialization vector to use for encryption or decryption.
For `MODE_OPENPGP`, IV must be 8 bytes long for encryption
and 10 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
For all other modes, it must be 8 bytes long.
If not provided, a random byte string is generated (you can read it
back via the ``iv`` attribute).
nonce : byte string
(*Only* `MODE_EAX` and `MODE_CTR`).
A mandatory value that must never be reused for any other encryption.
For `MODE_CTR`, its length must be in the range ``[0..7]``.
For `MODE_EAX`, there are no restrictions, but it is recommended to
use at least 16 bytes.
If not provided for `MODE_EAX`, a random byte string is generated (you
can read it back via the ``nonce`` attribute).
mac_len : integer
(*Only* `MODE_EAX`). Length of the authentication tag, in bytes.
It must be no larger than 8 (which is the default).
segment_size : integer
(*Only* `MODE_CFB`).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
initial_value : integer
(*Only* `MODE_CTR`). The initial value for the counter within
the counter block. By default it is 0.
:Return: a DES cipher, of the applicable mode:
- CBC_ mode
- CFB_ mode
- CTR_ mode
- EAX_ mode
- ECB_ mode
- OFB_ mode
- OpenPgp_ mode
.. _CBC: Crypto.Cipher._mode_cbc.CbcMode-class.html
.. _CFB: Crypto.Cipher._mode_cfb.CfbMode-class.html
.. _CTR: Crypto.Cipher._mode_ctr.CtrMode-class.html
.. _EAX: Crypto.Cipher._mode_eax.EaxMode-class.html
.. _ECB: Crypto.Cipher._mode_ecb.EcbMode-class.html
.. _OFB: Crypto.Cipher._mode_ofb.OfbMode-class.html
.. _OpenPgp: Crypto.Cipher._mode_openpgp.OpenPgpMode-class.html
"""
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
#: Electronic Code Book (ECB). See `Crypto.Cipher._mode_ecb.EcbMode`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `Crypto.Cipher._mode_cbc.CbcMode`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `Crypto.Cipher._mode_cfb.CfbMode`.
MODE_CFB = 3
#: Output FeedBack (OFB). See `Crypto.Cipher._mode_ofb.OfbMode`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `Crypto.Cipher._mode_ctr.CtrMode`.
MODE_CTR = 6
#: OpenPGP Mode. See `Crypto.Cipher._mode_openpgp.OpenPgpMode`.
MODE_OPENPGP = 7
#: EAX Mode. See `Crypto.Cipher._mode_eax.EaxMode`.
MODE_EAX = 9
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8
| apache-2.0 | 5,130,900,167,295,430,000 | -6,553,601,840,176,936,000 | 35.787565 | 79 | 0.613521 | false |
BehavioralInsightsTeam/edx-platform | openedx/core/djangoapps/api_admin/migrations/0003_auto_20160404_1618.py | 13 | 2058 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api_admin', '0002_auto_20160325_1604'),
]
operations = [
migrations.CreateModel(
name='ApiAccessConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
migrations.AddField(
model_name='apiaccessrequest',
name='company_address',
field=models.CharField(default=b'', max_length=255),
),
migrations.AddField(
model_name='apiaccessrequest',
name='company_name',
field=models.CharField(default=b'', max_length=255),
),
migrations.AddField(
model_name='historicalapiaccessrequest',
name='company_address',
field=models.CharField(default=b'', max_length=255),
),
migrations.AddField(
model_name='historicalapiaccessrequest',
name='company_name',
field=models.CharField(default=b'', max_length=255),
),
migrations.AlterField(
model_name='apiaccessrequest',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
]
| agpl-3.0 | -7,283,335,574,286,082,000 | 3,622,479,907,706,299,400 | 36.418182 | 178 | 0.58552 | false |
cogmission/nupic.research | projects/sdr_paper/plot_effect_of_n_bami.py | 2 | 14728 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This uses plotly to create a nice looking graph of average false positive
# error rates as a function of N, the dimensionality of the vectors. I'm sorry
# this code is so ugly.
import plotly.plotly as py
from plotly.graph_objs import *
import os
plotlyUser = os.environ['PLOTLY_USERNAME']
plotlyAPIKey = os.environ['PLOTLY_API_KEY']
py.sign_in(plotlyUser, plotlyAPIKey)
# Calculated error values
# w=64, s=24 synapses on segment, dendritic threshold is theta=12
errorsW64 = [0.00109461662333690, 5.69571108769533e-6, 1.41253230930730e-7,
8.30107183322324e-9, 8.36246969414003e-10, 1.21653747887184e-10,
2.30980246348674e-11, 5.36606800342786e-12, 1.46020443491340e-12,
4.51268292560082e-13, 1.54840085336688e-13, 5.79872960230082e-14,
2.33904818374099e-14, 1.00570025123595e-14, 4.57067109837325e-15,
2.18074665975532e-15, 1.08615761649705e-15, 5.62075747510851e-16,
3.01011222991216e-16, 1.66258638217391e-16, 9.44355122475050e-17,
5.50227860973758e-17, 3.28135862312369e-17, 1.99909942419741e-17,
1.24208005365401e-17, 7.85865625150437e-18, 5.05651456769333e-18,
3.30476684404715e-18, 2.19155538525467e-18, 1.47322040125054e-18,
1.00301732527126e-18, 6.91085187713756e-19, 4.81531998098323e-19,
3.39081789745300e-19, 2.41162129140343e-19, 1.73141122432297e-19,
1.25417524780710e-19, 9.16179854408830e-20, 6.74653665117861e-20,
5.00594093406879e-20, 3.74140647385797e-20, 2.81565500298724e-20,
2.13295032637269e-20, 1.62595896290270e-20, 1.24693930357791e-20,
9.61778646245879e-21, 7.45921945784706e-21, 5.81568673720061e-21,
4.55727209094172e-21, 3.58853982726974e-21, 2.83894572073852e-21,
2.25603220132537e-21, 1.80056639958786e-21, 1.44304355442520e-21,
1.16115649370912e-21, 9.37953155574248e-22, 7.60487232154203e-22,
6.18824388817498e-22, 5.05306382976791e-22, 4.14003297073025e-22,
3.40303733585598e-22, 2.80606724840170e-22, 2.32089016319463e-22,
1.92528479384963e-22, 1.60169522159935e-22, 1.33620070237719e-22,
1.11772384488816e-22, 9.37419553557710e-23, 7.88201628070397e-23,
6.64374619109495e-23, 5.61346484579118e-23, 4.75403511103980e-23,
4.03533396572869e-23, 3.43285719468893e-23, 2.92661533398441e-23,
2.50025728639646e-23, 2.14037249918740e-23, 1.83593364333338e-23,
1.57785019555348e-23, 1.35860982932100e-23, 1.17198953848347e-23,
1.01282230018514e-23, 8.76808098727516e-24, 7.60360480333303e-24,
6.60481643546819e-24, 5.74660507849120e-24, 5.00789333176087e-24,
4.37095353819125e-24, 3.82084594311781e-24, 3.34495593004021e-24,
2.93261202567532e-24, 2.57476990096962e-24, 2.26375041804855e-24,
1.99302203415240e-24, 1.75701968882530e-24, 1.55099376138908e-24,
1.37088386401156e-24, 1.21321318827475e-24, 1.07499989501613e-24]
# w=128, s=24 synapses on segment, dendritic threshold is theta=12
errorsW128 = [0.292078213737764, 0.00736788303358289, 0.000320106080889471,
2.50255519815378e-5, 2.99642102590114e-6, 4.89399786076359e-7,
1.00958512780931e-7, 2.49639031779358e-8, 7.13143762262004e-9,
2.29143708340810e-9, 8.11722283609541e-10, 3.12183638427824e-10,
1.28795248562774e-10, 5.64573534731427e-11, 2.60920666735517e-11,
1.26329222640928e-11, 6.37403647747254e-12, 3.33669667244209e-12,
1.80542698201560e-12, 1.00649239071800e-12, 5.76511433714795e-13,
3.38478276365079e-13, 2.03268423835688e-13, 1.24631220425762e-13,
7.78926809872514e-14, 4.95511644935965e-14, 3.20435767306233e-14,
2.10406420101461e-14, 1.40139130251568e-14, 9.45883828128567e-15,
6.46439769450458e-15, 4.46990041341270e-15, 3.12495999111406e-15,
2.20745309613471e-15, 1.57465638743741e-15, 1.13369191106350e-15,
8.23389688886499e-16, 6.03003384235568e-16, 4.45098155251971e-16,
3.31012812127460e-16, 2.47930640620987e-16, 1.86967641684828e-16,
1.41911643042882e-16, 1.08382344694871e-16, 8.32664249878792e-17,
6.43341686630739e-17, 4.99770213701060e-17, 3.90264314839685e-17,
3.06278060677719e-17, 2.41521444354171e-17, 1.91336334608186e-17,
1.52252678771373e-17, 1.21670765082745e-17, 9.76322639206603e-18,
7.86542142828590e-18, 6.36079286593057e-18, 5.16301523572075e-18,
4.20575231020497e-18, 3.43779601881797e-18, 2.81944231990508e-18,
2.31977574047117e-18, 1.91462490842612e-18, 1.58501607064100e-18,
1.31599800204033e-18, 1.09574520166929e-18, 9.14870565820523e-19,
7.65896441163367e-19, 6.42845939069147e-19, 5.40925947054799e-19,
4.56280340219040e-19, 3.85797146007365e-19, 3.26957333574643e-19,
2.77715835010627e-19, 2.36407614922344e-19, 2.01673273889071e-19,
1.72399937123611e-19, 1.47674143329993e-19, 1.26744185074274e-19,
1.08989916627431e-19, 9.38984797396823e-20, 8.10447332969045e-20,
7.00754327115433e-20, 6.06964068938479e-20, 5.26621381311802e-20,
4.57672733591788e-20, 3.98396919091576e-20, 3.47348308088003e-20,
3.03310286648971e-20, 2.65256965853905e-20, 2.32321622214084e-20,
2.03770629346944e-20, 1.78981879590753e-20, 1.57426885025016e-20,
1.38655900262325e-20, 1.22285532217681e-20, 1.07988400985754e-20,
9.54844958066234e-21, 8.45339347007471e-21, 7.49308887332261e-21]
# w=256 s=24 synapses on segment, dendritic threshold is theta=12
errorsW256 = [0.999997973443107, 0.629372754740777, 0.121087724790945,
0.0193597645959856, 0.00350549721741729, 0.000748965962032781,
0.000186510373919969, 5.30069204544174e-5, 1.68542688790000e-5,
5.89560747849969e-6, 2.23767020178735e-6, 9.11225564771580e-7,
3.94475072403605e-7, 1.80169987461924e-7, 8.62734957588259e-8,
4.30835081022293e-8, 2.23380881095835e-8, 1.19793311140766e-8,
6.62301584036177e-9, 3.76438169312996e-9, 2.19423953869126e-9,
1.30887557403056e-9, 7.97480990380968e-10, 4.95482969325862e-10,
3.13460830324406e-10, 2.01656908833009e-10, 1.31767135541276e-10,
8.73586539716713e-11, 5.87077297245969e-11, 3.99576761200323e-11,
2.75220232248960e-11, 1.91701608847159e-11, 1.34943954043346e-11,
9.59410134279997e-12, 6.88558106762690e-12, 4.98590018053347e-12,
3.64092373686549e-12, 2.68014488783288e-12, 1.98797603387229e-12,
1.48528633835993e-12, 1.11739495331362e-12, 8.46179085322245e-13,
6.44833912395788e-13, 4.94359544385977e-13, 3.81184046390743e-13,
2.95540942533515e-13, 2.30352375229645e-13, 1.80454125570680e-13,
1.42053695445942e-13, 1.12348554361008e-13, 8.92553023993497e-14,
7.12162118182915e-14, 5.70601336962939e-14, 4.59018613132802e-14,
3.70688756443847e-14, 3.00477108050374e-14, 2.44444632746040e-14,
1.99555570507925e-14, 1.63459876978165e-14, 1.34330500162347e-14,
1.10741076071588e-14, 9.15735686079334e-15, 7.59482030375183e-15,
6.31700763775213e-15, 5.26883007721797e-15, 4.40646078058260e-15,
3.69491257084125e-15, 3.10616176350258e-15, 2.61768946987837e-15,
2.21134330625883e-15, 1.87244595538993e-15, 1.58909462235613e-15,
1.35160864769231e-15, 1.15209251425918e-15, 9.84089038159454e-16,
8.42303276784589e-16, 7.22382069351279e-16, 6.20737481445016e-16,
5.34405004455211e-16, 4.60929349954820e-16, 3.98272218221725e-16,
3.44737614911305e-16, 2.98911220348303e-16, 2.59611042743928e-16,
2.25847156136861e-16, 1.96788771381788e-16, 1.71737241200100e-16,
1.50103879041435e-16, 1.31391692394609e-16, 1.15180306705465e-16,
1.01113495891954e-16, 8.88888471340935e-17, 7.82491770468619e-17,
6.89753881281890e-17, 6.08805121319100e-17, 5.38047335965072e-17,
4.76112244136112e-17, 4.21826508250283e-17, 3.74182390049037e-17]
# a=n/2 cells active, s=24 synapses on segment, dendritic threshold is theta=12
errorsWHalfOfN = [0.00518604306750049, 0.00595902789913702, 0.00630387009654985,
0.00649883841432922, 0.00662414645898081, 0.00671145554136860,
0.00677576979476038, 0.00682511455944402, 0.00686417048273405,
0.00689585128896232, 0.00692206553525732, 0.00694411560202313,
0.00696292062841680, 0.00697914780884254, 0.00699329317658955,
0.00700573317947932, 0.00701675866709042, 0.00702659791060005,
0.00703543257326555, 0.00704340902766207, 0.00705064652812678,
0.00705724321275902, 0.00706328057895142, 0.00706882686694759,
0.00707393965010535, 0.00707866784069150, 0.00708305325948833,
0.00708713187600340, 0.00709093479720398, 0.00709448906232020,
0.00709781828668885, 0.00710094318706191, 0.00710388201308149,
0.00710665090391040, 0.00710926418473885, 0.00711173461466950,
0.00711407359503532, 0.00711629134532740, 0.00711839705245984,
0.00712039899796979, 0.00712230466686664, 0.00712412084114628,
0.00712585368043317, 0.00712750879177102, 0.00712909129022819,
0.00713060585169798, 0.00713205675904178, 0.00713344794253425,
0.00713478301541479, 0.00713606530522246, 0.00713729788148649,
0.00713848358025748, 0.00713962502589200, 0.00714072465044275,
0.00714178471095577, 0.00714280730493375, 0.00714379438418811,
0.00714474776727266, 0.00714566915066510, 0.00714656011884143,
0.00714742215336873, 0.00714825664112637, 0.00714906488175141,
0.00714984809439242, 0.00715060742384539, 0.00715134394613683,
0.00715205867361116, 0.00715275255957311, 0.00715342650252986,
0.00715408135007252, 0.00715471790243238, 0.00715533691574314,
0.00715593910503720, 0.00715652514700088, 0.00715709568251095,
0.00715765131897250, 0.00715819263247588, 0.00715872016978916,
0.00715923445020018, 0.00715973596722158, 0.00716022519017042,
0.00716070256563302, 0.00716116851882463, 0.00716162345485272,
0.00716206775989168, 0.00716250180227608, 0.00716292593351907,
0.00716334048926185, 0.00716374579015949, 0.00716414214270805,
0.00716452984001762, 0.00716490916253519, 0.00716528037872114,
0.00716564374568296, 0.00716599950976899, 0.00716634790712550,
0.00716668916421930, 0.00716702349832872, 0.00716735111800491]
listofNValues = [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300,
2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900, 4100, 4300, 4500, 4700, 4900,
5100, 5300, 5500, 5700, 5900, 6100, 6300, 6500, 6700, 6900, 7100, 7300, 7500,
7700, 7900, 8100, 8300, 8500, 8700, 8900, 9100, 9300, 9500, 9700, 9900, 10100,
10300, 10500, 10700, 10900, 11100, 11300, 11500, 11700, 11900, 12100, 12300,
12500, 12700, 12900, 13100, 13300, 13500, 13700, 13900, 14100, 14300, 14500,
14700, 14900, 15100, 15300, 15500, 15700, 15900, 16100, 16300, 16500, 16700,
16900, 17100, 17300, 17500, 17700, 17900, 18100, 18300, 18500, 18700, 18900,
19100, 19300, 19500, 19700, 19900]
trace1 = Scatter(
y=errorsW64,
x=listofNValues,
line=Line(
color='rgb(0, 0, 0)',
width=3,
shape='spline'
),
name="w=64"
)
trace2 = Scatter(
y=errorsW128,
x=listofNValues[1:],
line=Line(
color='rgb(0, 0, 0)',
width=3,
shape='spline'
),
name="w=128"
)
trace3 = Scatter(
y=errorsW256,
x=listofNValues[1:],
line=Line(
color='rgb(0, 0, 0)',
width=3,
shape='spline'
),
name="w=256"
)
trace4 = Scatter(
y=errorsWHalfOfN,
x=listofNValues[1:],
line=Line(
color='rgb(0, 0, 0)',
width=3,
dash='dash',
shape='spline',
),
name="w=0.5*n"
)
data = Data([trace1, trace2, trace3, trace4])
layout = Layout(
title='',
showlegend=False,
autosize=False,
width=855,
height=700,
xaxis=XAxis(
title='SDR size (n)',
titlefont=Font(
family='',
size=26,
color=''
),
tickfont=Font(
family='',
size=16,
color=''
),
exponentformat="none",
dtick=2000,
showline=True,
range=[0,20000],
),
yaxis=YAxis(
title='Probability of false positives',
type='log',
exponentformat='power',
autorange=True,
titlefont=Font(
family='',
size=26,
color=''
),
tickfont=Font(
family='',
size=12,
color=''
),
showline=True,
),
annotations=Annotations([
Annotation(
x=16988,
y=0.1143,
xref='x',
yref='paper',
text='$w = 64$',
showarrow=False,
font=Font(
family='',
size=16,
color=''
),
align='center',
textangle=0,
bordercolor='',
borderwidth=1,
borderpad=1,
bgcolor='rgba(0, 0, 0, 0)',
opacity=1
),
Annotation(
x=17103,
y=0.259,
xref='x',
yref='paper',
text='$w = 128$',
showarrow=False,
font=Font(
family='',
size=16,
color=''
),
align='center',
textangle=0,
bordercolor='',
borderwidth=1,
borderpad=1,
bgcolor='rgba(0, 0, 0, 0)',
opacity=1
),
Annotation(
x=17132,
y=0.411,
xref='x',
yref='paper',
text='$w = 256$',
showarrow=False,
font=Font(
family='',
size=16,
color=''
),
align='center',
textangle=0,
bordercolor='',
borderwidth=1,
borderpad=1,
bgcolor='rgba(0, 0, 0, 0)',
opacity=1
),
Annotation(
x=16845,
y=0.933,
xref='x',
yref='paper',
text='$w = \\frac{n}{2}$',
showarrow=False,
font=Font(
family='',
size=16,
color=''
),
align='center',
textangle=0,
bordercolor='',
borderwidth=1,
borderpad=1,
bgcolor='rgba(0, 0, 0, 0)',
opacity=1
),
]),)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "url=",plot_url
figure = py.get_figure(plot_url)
py.image.save_as(figure, 'images/effect_of_n_bami.png', scale=4)
| agpl-3.0 | -7,064,931,466,343,576,000 | 3,208,807,888,919,230,000 | 39.573003 | 80 | 0.697108 | false |
jhonnyam123/hangoutsbot | hangupsbot/event.py | 3 | 2777 | import logging
import hangups
logger = logging.getLogger(__name__)
class GenericEvent:
bot = None
emit_log = logging.INFO
def __init__(self, bot):
self.bot = bot
class StatusEvent(GenericEvent):
"""base class for all non-ConversationEvent"""
def __init__(self, bot, state_update_event):
super().__init__(bot)
self.conv_event = state_update_event
self.conv_id = state_update_event.conversation_id.id_
self.conv = None
self.event_id = None
self.user_id = None
self.user = None
self.timestamp = None
self.text = ''
self.from_bot = False
class TypingEvent(StatusEvent):
"""user starts/pauses/stops typing"""
def __init__(self, bot, state_update_event):
super().__init__(bot, state_update_event)
self.user_id = state_update_event.user_id
self.timestamp = state_update_event.timestamp
self.user = self.bot.get_hangups_user(state_update_event.user_id)
if self.user.is_self:
self.from_bot = True
self.text = "typing"
class WatermarkEvent(StatusEvent):
"""user reads up to a certain point in the conversation"""
def __init__(self, bot, state_update_event):
super().__init__(bot, state_update_event)
self.user_id = state_update_event.participant_id
self.timestamp = state_update_event.latest_read_timestamp
self.user = self.bot.get_hangups_user(state_update_event.participant_id)
if self.user.is_self:
self.from_bot = True
self.text = "watermark"
class ConversationEvent(GenericEvent):
"""user joins, leaves, renames or messages a conversation"""
def __init__(self, bot, conv_event):
super().__init__(bot)
self.conv_event = conv_event
self.conv_id = conv_event.conversation_id
self.conv = self.bot._conv_list.get(self.conv_id)
self.event_id = conv_event.id_
self.user_id = conv_event.user_id
self.user = self.conv.get_user(self.user_id)
self.timestamp = conv_event.timestamp
self.text = conv_event.text.strip() if isinstance(conv_event, hangups.ChatMessageEvent) else ''
self.log()
def log(self):
if logger.isEnabledFor(self.emit_log):
logger.log(self.emit_log, 'eid/dt: {}/{}'.format(self.event_id, self.timestamp.astimezone(tz=None).strftime('%Y-%m-%d %H:%M:%S')))
logger.log(self.emit_log, 'cid/cn: {}/{}'.format(self.conv_id, self.bot.conversations.get_name(self.conv)))
logger.log(self.emit_log, 'c/g/un: {}/{}/{}'.format(self.user_id.chat_id, self.user_id.gaia_id, self.user.full_name))
logger.log(self.emit_log, 'len/tx: {}/{}'.format(len(self.text), self.text))
| agpl-3.0 | 7,927,655,933,129,028,000 | -8,119,813,794,408,401,000 | 31.670588 | 142 | 0.618293 | false |
MridulS/BinPy | BinPy/examples/source/Combinational/DEMUX.py | 1 | 1066 |
# coding: utf-8
# Example for DEMUX class.
# In[1]:
from __future__ import print_function
from BinPy.Combinational.combinational import *
# In[2]:
# Initializing the DEMUX class
# Must be a single input
demux = DEMUX(1)
# Put select lines
# Select Lines must be power of 2
demux.selectLines(0)
# Output of demux
print (demux.output())
# In[3]:
# Input changes
# Input at index 1 is changed to 0
demux.setInput(0, 0)
# New Output of the demux
print (demux.output())
# In[4]:
# Get Input States
print (demux.getInputStates())
# In[5]:
# Using Connectors as the input lines
# Take a Connector
conn = Connector()
# Set Output of demux to Connector conn
# sets conn as the output at index 0
demux.setOutput(0, conn)
# Put this connector as the input to gate1
gate1 = AND(conn, 0)
# Output of the gate1
print (gate1.output())
# In[6]:
# Changing select lines
# selects input line 2
demux.selectLine(0, 1)
# New output of demux
print (demux.output())
# In[7]:
# Information about demux instance can be found by
print (demux)
| bsd-3-clause | 681,864,813,154,022,400 | -3,067,083,119,941,865,000 | 10.714286 | 50 | 0.687617 | false |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/click/testing.py | 136 | 11002 | import os
import sys
import shutil
import tempfile
import contextlib
from ._compat import iteritems, PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
if PY2:
from cStringIO import StringIO
else:
import io
from ._compat import _find_binary_reader
class EchoingStdin(object):
def __init__(self, input, output):
self._input = input
self._output = output
def __getattr__(self, x):
return getattr(self._input, x)
def _echo(self, rv):
self._output.write(rv)
return rv
def read(self, n=-1):
return self._echo(self._input.read(n))
def readline(self, n=-1):
return self._echo(self._input.readline(n))
def readlines(self):
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self):
return iter(self._echo(x) for x in self._input)
def __repr__(self):
return repr(self._input)
def make_input_stream(input, charset):
# Is already an input stream.
if hasattr(input, 'read'):
if PY2:
return input
rv = _find_binary_reader(input)
if rv is not None:
return rv
raise TypeError('Could not find binary reader for input stream.')
if input is None:
input = b''
elif not isinstance(input, bytes):
input = input.encode(charset)
if PY2:
return StringIO(input)
return io.BytesIO(input)
class Result(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(self, runner, output_bytes, exit_code, exception,
exc_info=None):
#: The runner that created the result
self.runner = runner
#: The output as bytes.
self.output_bytes = output_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happend if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The output as unicode string."""
return self.output_bytes.decode(self.runner.charset, 'replace') \
.replace('\r\n', '\n')
def __repr__(self):
return '<Result %s>' % (
self.exception and repr(self.exception) or 'okay',
)
class CliRunner(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
"""
def __init__(self, charset=None, env=None, echo_stdin=False):
if charset is None:
charset = 'utf-8'
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or 'root'
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = clickpkg.formatting.FORCED_WIDTH
clickpkg.formatting.FORCED_WIDTH = 80
env = self.make_env(env)
if PY2:
sys.stdout = sys.stderr = bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = sys.stderr = io.TextIOWrapper(
bytes_output, encoding=self.charset)
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or '')
val = input.readline().rstrip('\r\n')
sys.stdout.write(val + '\n')
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write((prompt or '') + '\n')
sys.stdout.flush()
return input.readline().rstrip('\r\n')
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = clickpkg.termui.visible_prompt_func
old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
old__getchar_func = clickpkg.termui._getchar
old_should_strip_ansi = clickpkg.utils.should_strip_ansi
clickpkg.termui.visible_prompt_func = visible_input
clickpkg.termui.hidden_prompt_func = hidden_input
clickpkg.termui._getchar = _getchar
clickpkg.utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield bytes_output
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
clickpkg.termui.visible_prompt_func = old_visible_prompt_func
clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
clickpkg.termui._getchar = old__getchar_func
clickpkg.utils.should_strip_ansi = old_should_strip_ansi
clickpkg.formatting.FORCED_WIDTH = old_forced_width
def invoke(self, cli, args=None, input=None, env=None,
catch_exceptions=True, color=False, **extra):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as out:
exception = None
exit_code = 0
try:
cli.main(args=args or (),
prog_name=self.get_default_prog_name(cli), **extra)
except SystemExit as e:
if e.code != 0:
exception = e
exc_info = sys.exc_info()
exit_code = e.code
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write('\n')
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = -1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
output = out.getvalue()
return Result(runner=self,
output_bytes=output,
exit_code=exit_code,
exception=exception,
exc_info=exc_info)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError):
pass
| mit | -5,304,775,288,766,831,000 | -6,004,887,410,381,367,000 | 33.167702 | 79 | 0.567624 | false |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_panelnd.py | 2 | 3445 | # -*- coding: utf-8 -*-
import nose
from pandas.core import panelnd
from pandas.core.panel import Panel
from pandas.util.testing import assert_panel_equal
import pandas.util.testing as tm
class TestPanelnd(tm.TestCase):
def setUp(self):
pass
def test_4d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_alt(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_error(self):
# create a 4D
self.assertRaises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.ix['C1', :, :, 0:3, :]
expected = p4d.ix[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit | 2,054,343,477,366,958,000 | 140,750,800,191,442,610 | 33.79798 | 75 | 0.484761 | false |
harikishen/addons-server | src/olympia/amo/tasks.py | 1 | 2584 | import datetime
from django.core.mail import EmailMessage, EmailMultiAlternatives
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.celery import task
from olympia.amo.utils import get_email_backend
from olympia.bandwagon.models import Collection
from olympia.stats.models import Contribution
log = olympia.core.logger.getLogger('z.task')
@task
def send_email(recipient, subject, message, from_email=None,
html_message=None, attachments=None, real_email=False,
cc=None, headers=None, fail_silently=False, async=False,
max_retries=None, reply_to=None, **kwargs):
backend = EmailMultiAlternatives if html_message else EmailMessage
connection = get_email_backend(real_email)
result = backend(subject, message, from_email, to=recipient, cc=cc,
connection=connection, headers=headers,
attachments=attachments, reply_to=reply_to)
if html_message:
result.attach_alternative(html_message, 'text/html')
try:
result.send(fail_silently=False)
return True
except Exception as e:
log.error('send_mail failed with error: %s' % e)
if async:
return send_email.retry(exc=e, max_retries=max_retries)
elif not fail_silently:
raise
else:
return False
@task
def set_modified_on_object(obj, **kw):
"""Sets modified on one object at a time."""
try:
log.info('Setting modified on object: %s, %s' %
(obj.__class__.__name__, obj.pk))
obj.update(modified=datetime.datetime.now())
except Exception, e:
log.error('Failed to set modified on: %s, %s - %s' %
(obj.__class__.__name__, obj.pk, e))
@task
def delete_logs(items, **kw):
log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=amo.LOG_KEEP).delete()
@task
def delete_stale_contributions(items, **kw):
log.info('[%s@%s] Deleting stale contributions' %
(len(items), delete_stale_contributions.rate_limit))
Contribution.objects.filter(
transaction_id__isnull=True, pk__in=items).delete()
@task
def delete_anonymous_collections(items, **kw):
log.info('[%s@%s] Deleting anonymous collections' %
(len(items), delete_anonymous_collections.rate_limit))
Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS,
pk__in=items).delete()
| bsd-3-clause | 3,015,933,902,747,499,500 | -6,793,557,486,660,076,000 | 33 | 76 | 0.64822 | false |
sctjkc01/ofCourse | ofcourse/participants.py | 1 | 3800 | import os
from datetime import datetime, date, timedelta
from urlparse import urlparse
import yaml
from flask import Blueprint, redirect
from flask.ext.mako import render_template
import ofcourse
from ofcourse.util import app_path, get_hw_keys
participants_bp = Blueprint('participants_bp',
__name__,
template_folder=app_path('templates'))
currentYear = str(date.today().year)
currentTerm = "fall" if date.today().month > 7 else "spring"
@participants_bp.route('/')
def participants_blank():
"""
This is the default landing
for the participants listing page.
It will list all of the participants
in the current term for HFOSS
"""
return participants_year_term(currentYear, currentTerm)
@participants_bp.route('/<year_or_nick>')
def participants_year(year_or_nick):
"""
This will get all the participants
within a given year
"""
p_url = find_participant(year_or_nick)
if p_url is not None:
# render individual page
return redirect(p_url)
# otherwise render as a year
return participants(year_or_nick + '/')
@participants_bp.route('/<year>/<term>')
def participants_year_term(year, term):
"""
This will get all the participants
within a given year and term
"""
return participants(year + '/' + term + '/')
@participants_bp.route('/all')
def participants_all():
return participants('')
"""
This will get all the participants
who have taken HFOSS
"""
def participants(root_dir):
"""
Render the participants page,
which shows a directory of all
the students with their forge
links, blog posts, assignment
links, and etc.
"""
yaml_dir = app_path('people', root_dir)
student_data = []
for dirpath, dirnames, files in os.walk(yaml_dir):
dirpath = dirpath.rstrip("/")
for fname in sorted(files):
if fname.endswith('.yaml'):
with open(dirpath + '/' + fname) as students:
contents = yaml.safe_load(students)
contents['yaml'] = dirpath + '/' + fname
year_term_data = dirpath.split('/')
contents['participant_page'] = "{y}/{t}/{u}".format(
y=year_term_data[-2],
t=year_term_data[-1],
u=os.path.splitext(fname)[0]
)
for forge in contents['forges']:
url = urlparse(forge)
if "github.com" in url.netloc:
contents['github'] = url.path[1:]
contents['isActive'] = (currentYear in year_term_data and
currentTerm in year_term_data)
student_data.append(contents)
assignments = get_hw_keys()
elapsed = (datetime.today() - ofcourse.site.COURSE_START).total_seconds()
target_number = int(elapsed / timedelta(weeks=1).total_seconds() + 1 +
len(assignments))
return render_template(
'blogs.mak', name='mako',
student_data=student_data,
gravatar=ofcourse.site.gravatar,
target_number=target_number,
hw_keys=assignments
)
def find_participant(nick):
yaml_dir = app_path('people')
for dirpath, dirnames, files in os.walk(yaml_dir):
for fname in files:
if (fname.lower().startswith(nick.lower()) and
fname.endswith('.yaml')):
participant = os.path.join(
dirpath,
fname
).replace(yaml_dir, '')
participant = participant.replace('.yaml', '')
return 'participants' + participant
| apache-2.0 | 4,032,689,492,452,114,400 | -2,583,085,619,316,633,000 | 28.6875 | 77 | 0.569737 | false |
doduytrung/odoo-8.0 | addons/account/wizard/account_validate_account_move.py | 381 | 3203 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class validate_account_move(osv.osv_memory):
_name = "validate.account.move"
_description = "Validate Account Move"
_columns = {
'journal_ids': fields.many2many('account.journal', 'wizard_validate_account_move_journal', 'wizard_id', 'journal_id', 'Journal', required=True),
'period_ids': fields.many2many('account.period', 'wizard_validate_account_move_period', 'wizard_id', 'period_id', 'Period', required=True, domain=[('state','<>','done')]),
}
def validate_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
if context is None:
context = {}
data = self.read(cr, uid, ids[0], context=context)
ids_move = obj_move.search(cr, uid, [('state','=','draft'),('journal_id','in',tuple(data['journal_ids'])),('period_id','in',tuple(data['period_ids']))], order='date')
if not ids_move:
raise osv.except_osv(_('Warning!'), _('Specified journals do not have any account move entries in draft state for the specified periods.'))
obj_move.button_validate(cr, uid, ids_move, context=context)
return {'type': 'ir.actions.act_window_close'}
class validate_account_move_lines(osv.osv_memory):
_name = "validate.account.move.lines"
_description = "Validate Account Move Lines"
def validate_move_lines(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
obj_move = self.pool.get('account.move')
move_ids = []
if context is None:
context = {}
data_line = obj_move_line.browse(cr, uid, context['active_ids'], context)
for line in data_line:
if line.move_id.state=='draft':
move_ids.append(line.move_id.id)
move_ids = list(set(move_ids))
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Selected Entry Lines does not have any account move entries in draft state.'))
obj_move.button_validate(cr, uid, move_ids, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 915,810,944,456,144,400 | 2,032,567,980,143,422,700 | 48.276923 | 179 | 0.622229 | false |
akarki15/mozillians | vendor-local/lib/python/unidecode/x07e.py | 252 | 4682 | data = (
'Xia ', # 0x00
'Yuan ', # 0x01
'Zong ', # 0x02
'Xu ', # 0x03
'Nawa ', # 0x04
'Odoshi ', # 0x05
'Geng ', # 0x06
'Sen ', # 0x07
'Ying ', # 0x08
'Jin ', # 0x09
'Yi ', # 0x0a
'Zhui ', # 0x0b
'Ni ', # 0x0c
'Bang ', # 0x0d
'Gu ', # 0x0e
'Pan ', # 0x0f
'Zhou ', # 0x10
'Jian ', # 0x11
'Cuo ', # 0x12
'Quan ', # 0x13
'Shuang ', # 0x14
'Yun ', # 0x15
'Xia ', # 0x16
'Shuai ', # 0x17
'Xi ', # 0x18
'Rong ', # 0x19
'Tao ', # 0x1a
'Fu ', # 0x1b
'Yun ', # 0x1c
'Zhen ', # 0x1d
'Gao ', # 0x1e
'Ru ', # 0x1f
'Hu ', # 0x20
'Zai ', # 0x21
'Teng ', # 0x22
'Xian ', # 0x23
'Su ', # 0x24
'Zhen ', # 0x25
'Zong ', # 0x26
'Tao ', # 0x27
'Horo ', # 0x28
'Cai ', # 0x29
'Bi ', # 0x2a
'Feng ', # 0x2b
'Cu ', # 0x2c
'Li ', # 0x2d
'Suo ', # 0x2e
'Yin ', # 0x2f
'Xi ', # 0x30
'Zong ', # 0x31
'Lei ', # 0x32
'Zhuan ', # 0x33
'Qian ', # 0x34
'Man ', # 0x35
'Zhi ', # 0x36
'Lu ', # 0x37
'Mo ', # 0x38
'Piao ', # 0x39
'Lian ', # 0x3a
'Mi ', # 0x3b
'Xuan ', # 0x3c
'Zong ', # 0x3d
'Ji ', # 0x3e
'Shan ', # 0x3f
'Sui ', # 0x40
'Fan ', # 0x41
'Shuai ', # 0x42
'Beng ', # 0x43
'Yi ', # 0x44
'Sao ', # 0x45
'Mou ', # 0x46
'Zhou ', # 0x47
'Qiang ', # 0x48
'Hun ', # 0x49
'Sem ', # 0x4a
'Xi ', # 0x4b
'Jung ', # 0x4c
'Xiu ', # 0x4d
'Ran ', # 0x4e
'Xuan ', # 0x4f
'Hui ', # 0x50
'Qiao ', # 0x51
'Zeng ', # 0x52
'Zuo ', # 0x53
'Zhi ', # 0x54
'Shan ', # 0x55
'San ', # 0x56
'Lin ', # 0x57
'Yu ', # 0x58
'Fan ', # 0x59
'Liao ', # 0x5a
'Chuo ', # 0x5b
'Zun ', # 0x5c
'Jian ', # 0x5d
'Rao ', # 0x5e
'Chan ', # 0x5f
'Rui ', # 0x60
'Xiu ', # 0x61
'Hui ', # 0x62
'Hua ', # 0x63
'Zuan ', # 0x64
'Xi ', # 0x65
'Qiang ', # 0x66
'Un ', # 0x67
'Da ', # 0x68
'Sheng ', # 0x69
'Hui ', # 0x6a
'Xi ', # 0x6b
'Se ', # 0x6c
'Jian ', # 0x6d
'Jiang ', # 0x6e
'Huan ', # 0x6f
'Zao ', # 0x70
'Cong ', # 0x71
'Jie ', # 0x72
'Jiao ', # 0x73
'Bo ', # 0x74
'Chan ', # 0x75
'Yi ', # 0x76
'Nao ', # 0x77
'Sui ', # 0x78
'Yi ', # 0x79
'Shai ', # 0x7a
'Xu ', # 0x7b
'Ji ', # 0x7c
'Bin ', # 0x7d
'Qian ', # 0x7e
'Lan ', # 0x7f
'Pu ', # 0x80
'Xun ', # 0x81
'Zuan ', # 0x82
'Qi ', # 0x83
'Peng ', # 0x84
'Li ', # 0x85
'Mo ', # 0x86
'Lei ', # 0x87
'Xie ', # 0x88
'Zuan ', # 0x89
'Kuang ', # 0x8a
'You ', # 0x8b
'Xu ', # 0x8c
'Lei ', # 0x8d
'Xian ', # 0x8e
'Chan ', # 0x8f
'Kou ', # 0x90
'Lu ', # 0x91
'Chan ', # 0x92
'Ying ', # 0x93
'Cai ', # 0x94
'Xiang ', # 0x95
'Xian ', # 0x96
'Zui ', # 0x97
'Zuan ', # 0x98
'Luo ', # 0x99
'Xi ', # 0x9a
'Dao ', # 0x9b
'Lan ', # 0x9c
'Lei ', # 0x9d
'Lian ', # 0x9e
'Si ', # 0x9f
'Jiu ', # 0xa0
'Yu ', # 0xa1
'Hong ', # 0xa2
'Zhou ', # 0xa3
'Xian ', # 0xa4
'He ', # 0xa5
'Yue ', # 0xa6
'Ji ', # 0xa7
'Wan ', # 0xa8
'Kuang ', # 0xa9
'Ji ', # 0xaa
'Ren ', # 0xab
'Wei ', # 0xac
'Yun ', # 0xad
'Hong ', # 0xae
'Chun ', # 0xaf
'Pi ', # 0xb0
'Sha ', # 0xb1
'Gang ', # 0xb2
'Na ', # 0xb3
'Ren ', # 0xb4
'Zong ', # 0xb5
'Lun ', # 0xb6
'Fen ', # 0xb7
'Zhi ', # 0xb8
'Wen ', # 0xb9
'Fang ', # 0xba
'Zhu ', # 0xbb
'Yin ', # 0xbc
'Niu ', # 0xbd
'Shu ', # 0xbe
'Xian ', # 0xbf
'Gan ', # 0xc0
'Xie ', # 0xc1
'Fu ', # 0xc2
'Lian ', # 0xc3
'Zu ', # 0xc4
'Shen ', # 0xc5
'Xi ', # 0xc6
'Zhi ', # 0xc7
'Zhong ', # 0xc8
'Zhou ', # 0xc9
'Ban ', # 0xca
'Fu ', # 0xcb
'Zhuo ', # 0xcc
'Shao ', # 0xcd
'Yi ', # 0xce
'Jing ', # 0xcf
'Dai ', # 0xd0
'Bang ', # 0xd1
'Rong ', # 0xd2
'Jie ', # 0xd3
'Ku ', # 0xd4
'Rao ', # 0xd5
'Die ', # 0xd6
'Heng ', # 0xd7
'Hui ', # 0xd8
'Gei ', # 0xd9
'Xuan ', # 0xda
'Jiang ', # 0xdb
'Luo ', # 0xdc
'Jue ', # 0xdd
'Jiao ', # 0xde
'Tong ', # 0xdf
'Geng ', # 0xe0
'Xiao ', # 0xe1
'Juan ', # 0xe2
'Xiu ', # 0xe3
'Xi ', # 0xe4
'Sui ', # 0xe5
'Tao ', # 0xe6
'Ji ', # 0xe7
'Ti ', # 0xe8
'Ji ', # 0xe9
'Xu ', # 0xea
'Ling ', # 0xeb
'[?] ', # 0xec
'Xu ', # 0xed
'Qi ', # 0xee
'Fei ', # 0xef
'Chuo ', # 0xf0
'Zhang ', # 0xf1
'Gun ', # 0xf2
'Sheng ', # 0xf3
'Wei ', # 0xf4
'Mian ', # 0xf5
'Shou ', # 0xf6
'Beng ', # 0xf7
'Chou ', # 0xf8
'Tao ', # 0xf9
'Liu ', # 0xfa
'Quan ', # 0xfb
'Zong ', # 0xfc
'Zhan ', # 0xfd
'Wan ', # 0xfe
'Lu ', # 0xff
)
| bsd-3-clause | 3,596,095,255,921,814,000 | -6,499,740,551,490,300,000 | 17.147287 | 20 | 0.396412 | false |
MartinSavc/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause | -5,012,723,891,370,346,000 | 7,628,285,398,166,743,000 | 35.704082 | 79 | 0.590492 | false |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/fill.py | 10 | 10642 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Bool,
Integer,
Set,
NoneSet,
Typed,
MinMax,
Sequence,
)
from openpyxl.descriptors.excel import Relation
from openpyxl.descriptors.nested import (
NestedNoneSet,
NestedSequence,
)
from openpyxl.xml.constants import DRAWING_NS
from .colors import ColorChoice
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
from .effect import *
"""
Fill elements from drawing main schema
"""
class PatternFillProperties(Serialisable):
tagname = "pattFill"
prst = NoneSet(values=(['pct5', 'pct10', 'pct20', 'pct25', 'pct30', 'pct40',
'pct50', 'pct60', 'pct70', 'pct75', 'pct80', 'pct90', 'horz', 'vert',
'ltHorz', 'ltVert', 'dkHorz', 'dkVert', 'narHorz', 'narVert', 'dashHorz',
'dashVert', 'cross', 'dnDiag', 'upDiag', 'ltDnDiag', 'ltUpDiag',
'dkDnDiag', 'dkUpDiag', 'wdDnDiag', 'wdUpDiag', 'dashDnDiag',
'dashUpDiag', 'diagCross', 'smCheck', 'lgCheck', 'smGrid', 'lgGrid',
'dotGrid', 'smConfetti', 'lgConfetti', 'horzBrick', 'diagBrick',
'solidDmnd', 'openDmnd', 'dotDmnd', 'plaid', 'sphere', 'weave', 'divot',
'shingle', 'wave', 'trellis', 'zigZag']))
preset = Alias("prst")
fgClr = Typed(expected_type=ColorChoice, allow_none=True)
foreground = Alias("fgClr")
bgClr = Typed(expected_type=ColorChoice, allow_none=True)
background = Alias("bgClr")
__elements__ = ("fgClr", "bgClr")
def __init__(self,
prst=None,
fgClr=None,
bgClr=None,
):
self.prst = prst
self.fgClr = fgClr
self.bgClr = bgClr
class RelativeRect(Serialisable):
tagname = "rect"
namespace = DRAWING_NS
l = MinMax(min=0, max=100, allow_none=True)
left = Alias('l')
t = MinMax(min=0, max=100, allow_none=True)
top = Alias('t')
r = MinMax(min=0, max=100, allow_none=True)
right = Alias('r')
b = MinMax(min=0, max=100, allow_none=True)
bottom = Alias('b')
def __init__(self,
l=None,
t=None,
r=None,
b=None,
):
self.l = l
self.t = t
self.r = r
self.b = b
class StretchInfoProperties(Serialisable):
tagname = "stretch"
namespace = DRAWING_NS
fillRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
fillRect=None,
):
self.fillRect = fillRect
class GradientStop(Serialisable):
tagname = "gradStop"
pos = MinMax(min=0, max=100, allow_none=True)
# Color Choice Group
def __init__(self,
pos=None,
):
self.pos = pos
class GradientStopList(Serialisable):
tagname = "gradStopLst"
gs = Sequence(expected_type=GradientStop)
def __init__(self,
gs=None,
):
if gs is None:
gs = [GradientStop(), GradientStop()]
self.gs = gs
class LinearShadeProperties(Serialisable):
ang = Integer()
scaled = Bool(allow_none=True)
def __init__(self,
ang=None,
scaled=None,
):
self.ang = ang
self.scaled = scaled
class PathShadeProperties(Serialisable):
path = Set(values=(['shape', 'circle', 'rect']))
fillToRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
path=None,
fillToRect=None,
):
self.path = path
self.fillToRect = fillToRect
class GradientFillProperties(Serialisable):
tagname = "gradFill"
flip = NoneSet(values=(['x', 'y', 'xy']))
rotWithShape = Bool(allow_none=True)
gsLst = Typed(expected_type=GradientStopList, allow_none=True)
stop_list = Alias("gsLst")
lin = Typed(expected_type=LinearShadeProperties, allow_none=True)
linear = Alias("lin")
path = Typed(expected_type=PathShadeProperties, allow_none=True)
tileRect = Typed(expected_type=RelativeRect, allow_none=True)
__elements__ = ('gsLst', 'lin', 'path', 'tileRect')
def __init__(self,
flip=None,
rotWithShape=None,
gsLst=None,
lin=None,
path=None,
tileRect=None,
):
self.flip = flip
self.rotWithShape = rotWithShape
self.gsLst = gsLst
self.lin = lin
self.path = path
self.tileRect = tileRect
class Blip(Serialisable):
tagname = "blip"
namespace = DRAWING_NS
#Using attribute groupAG_Blob
cstate = NoneSet(values=(['email', 'screen', 'print', 'hqprint']))
embed = Relation() #rId
link = Relation() #hyperlink
noGrp = Bool(allow_none=True)
noSelect = Bool(allow_none=True)
noRot = Bool(allow_none=True)
noChangeAspect = Bool(allow_none=True)
noMove = Bool(allow_none=True)
noResize = Bool(allow_none=True)
noEditPoints = Bool(allow_none=True)
noAdjustHandles = Bool(allow_none=True)
noChangeArrowheads = Bool(allow_none=True)
noChangeShapeType = Bool(allow_none=True)
# some elements are choice
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
alphaBiLevel = Typed(expected_type=AlphaBiLevelEffect, allow_none=True)
alphaCeiling = Typed(expected_type=AlphaCeilingEffect, allow_none=True)
alphaFloor = Typed(expected_type=AlphaFloorEffect, allow_none=True)
alphaInv = Typed(expected_type=AlphaInverseEffect, allow_none=True)
alphaMod = Typed(expected_type=AlphaModulateEffect, allow_none=True)
alphaModFix = Typed(expected_type=AlphaModulateFixedEffect, allow_none=True)
alphaRepl = Typed(expected_type=AlphaReplaceEffect, allow_none=True)
biLevel = Typed(expected_type=BiLevelEffect, allow_none=True)
blur = Typed(expected_type=BlurEffect, allow_none=True)
clrChange = Typed(expected_type=ColorChangeEffect, allow_none=True)
clrRepl = Typed(expected_type=ColorReplaceEffect, allow_none=True)
duotone = Typed(expected_type=DuotoneEffect, allow_none=True)
fillOverlay = Typed(expected_type=FillOverlayEffect, allow_none=True)
grayscl = Typed(expected_type=GrayscaleEffect, allow_none=True)
hsl = Typed(expected_type=HSLEffect, allow_none=True)
lum = Typed(expected_type=LuminanceEffect, allow_none=True)
tint = Typed(expected_type=TintEffect, allow_none=True)
__elements__ = ('alphaBiLevel', 'alphaCeiling', 'alphaFloor', 'alphaInv',
'alphaMod', 'alphaModFix', 'alphaRepl', 'biLevel', 'blur', 'clrChange',
'clrRepl', 'duotone', 'fillOverlay', 'grayscl', 'hsl', 'lum', 'tint')
def __init__(self,
cstate=None,
embed=None,
link=None,
noGrp=None,
noSelect=None,
noRot=None,
noChangeAspect=None,
noMove=None,
noResize=None,
noEditPoints=None,
noAdjustHandles=None,
noChangeArrowheads=None,
noChangeShapeType=None,
extLst=None,
alphaBiLevel=None,
alphaCeiling=None,
alphaFloor=None,
alphaInv=None,
alphaMod=None,
alphaModFix=None,
alphaRepl=None,
biLevel=None,
blur=None,
clrChange=None,
clrRepl=None,
duotone=None,
fillOverlay=None,
grayscl=None,
hsl=None,
lum=None,
tint=None,
):
self.cstate = cstate
self.embed = embed
self.link = link
self.noGrp = noGrp
self.noSelect = noSelect
self.noRot = noRot
self.noChangeAspect = noChangeAspect
self.noMove = noMove
self.noResize = noResize
self.noEditPoints = noEditPoints
self.noAdjustHandles = noAdjustHandles
self.noChangeArrowheads = noChangeArrowheads
self.noChangeShapeType = noChangeShapeType
self.extLst = extLst
self.alphaBiLevel = alphaBiLevel
self.alphaCeiling = alphaCeiling
self.alphaFloor = alphaFloor
self.alphaInv = alphaInv
self.alphaMod = alphaMod
self.alphaModFix = alphaModFix
self.alphaRepl = alphaRepl
self.biLevel = biLevel
self.blur = blur
self.clrChange = clrChange
self.clrRepl = clrRepl
self.duotone = duotone
self.fillOverlay = fillOverlay
self.grayscl = grayscl
self.hsl = hsl
self.lum = lum
self.tint = tint
class TileInfoProperties(Serialisable):
tx = Integer(allow_none=True)
ty = Integer(allow_none=True)
sx = Integer(allow_none=True)
sy = Integer(allow_none=True)
flip = NoneSet(values=(['x', 'y', 'xy']))
algn = Set(values=(['tl', 't', 'tr', 'l', 'ctr', 'r', 'bl', 'b', 'br']))
def __init__(self,
tx=None,
ty=None,
sx=None,
sy=None,
flip=None,
algn=None,
):
self.tx = tx
self.ty = ty
self.sx = sx
self.sy = sy
self.flip = flip
self.algn = algn
class BlipFillProperties(Serialisable):
tagname = "blipFill"
dpi = Integer(allow_none=True)
rotWithShape = Bool(allow_none=True)
blip = Typed(expected_type=Blip, allow_none=True)
srcRect = Typed(expected_type=RelativeRect, allow_none=True)
tile = Typed(expected_type=TileInfoProperties, allow_none=True)
stretch = Typed(expected_type=StretchInfoProperties, allow_none=True)
__elements__ = ("blip", "srcRect", "tile", "stretch")
def __init__(self,
dpi=None,
rotWithShape=None,
blip=None,
tile=None,
stretch=None,
srcRect=None,
):
self.dpi = dpi
self.rotWithShape = rotWithShape
self.blip = blip
self.tile = tile
self.stretch = stretch
self.srcRect = srcRect
| apache-2.0 | -8,142,554,502,804,581,000 | 5,564,260,886,740,702,000 | 29.492837 | 97 | 0.570757 | false |
django-danceschool/django-danceschool | danceschool/discounts/tests.py | 1 | 20249 | from django.urls import reverse
from django.utils import timezone
from datetime import timedelta
from danceschool.core.constants import REG_VALIDATION_STR, updateConstant
from danceschool.core.utils.tests import DefaultSchoolTestCase
from danceschool.core.models import Invoice, Registration
from .models import (
PointGroup, PricingTierGroup, DiscountCategory, DiscountCombo, DiscountComboComponent
)
class BaseDiscountsTest(DefaultSchoolTestCase):
def create_discount(self, **kwargs):
'''
This method just creates the necessary objects to create a simple discount
with a single required component.
'''
test_group, created = PointGroup.objects.get_or_create(
name=kwargs.get('pointGroupName', 'Test points')
)
pt_group, created = PricingTierGroup.objects.get_or_create(
group=test_group,
pricingTier=self.defaultPricing,
points=kwargs.get('pricingTierGroupPoints', 5),
)
# Create a flat price combo that just knocks $5 off the regular price
test_combo = DiscountCombo(
name=kwargs.get('name', 'Test Discount'),
category=kwargs.get('category', DiscountCategory.objects.get(id=1)),
discountType=kwargs.get('discountType', DiscountCombo.DiscountType.flatPrice),
onlinePrice=kwargs.get('onlinePrice', self.defaultPricing.onlinePrice - 5),
doorPrice=kwargs.get('doorPrice', self.defaultPricing.doorPrice - 5),
dollarDiscount=kwargs.get('dollarDiscount', 10),
percentDiscount=kwargs.get('percentDiscount', 50),
percentUniversallyApplied=kwargs.get('percentUniversallyApplied', False),
active=kwargs.get('active', True),
newCustomersOnly=kwargs.get('newCustomersOnly', False),
daysInAdvanceRequired=kwargs.get('daysInAdvanceRequired', None),
expirationDate=kwargs.get('expirationDate', None),
)
test_combo.save()
test_component = DiscountComboComponent.objects.create(
discountCombo=test_combo,
pointGroup=test_group,
quantity=kwargs.get('quantity', 5),
allWithinPointGroup=kwargs.get('allWithinPointGroup', False),
)
return (test_combo, test_component)
def register_to_check_discount(self, series, expected_amount=None):
'''
This method makes it easy to determine whether discounts are working
correctly for a single class registration
'''
s = series
response = self.client.get(reverse('registration'))
self.assertEqual(response.status_code, 200)
self.assertIn(s, response.context_data.get('regOpenSeries'))
# Sign up for the series, and check that we proceed to the student information page.
# Because of the way that roles are encoded on this form, we just grab the value to pass
# from the form itself.
post_data = {'series_%s_%s' % (
s.id, response.context_data['form'].fields['series_%s' % s.id].field_choices[0].get('value')
): [1,]}
response = self.client.post(reverse('registration'), post_data, follow=True)
self.assertEqual(response.redirect_chain, [(reverse('getStudentInfo'), 302)])
invoice = Invoice.objects.get(
id=self.client.session[REG_VALIDATION_STR].get('invoiceId')
)
tr = Registration.objects.filter(invoice=invoice).first()
self.assertTrue(tr.eventregistration_set.filter(event__id=s.id).exists())
self.assertFalse(tr.final)
# Check that the student info page lists the correct subtotal with
# the discount applied
self.assertEqual(invoice.grossTotal, s.getBasePrice())
if expected_amount is not None:
self.assertEqual(response.context_data.get('invoice').total, expected_amount)
# Continue to the summary page
post_data = {
'firstName': 'Discounted',
'lastName': 'Customer',
'email': '[email protected]',
'agreeToPolicies': True,
}
return self.client.post(reverse('getStudentInfo'), post_data, follow=True)
class DiscountsConditionsTest(BaseDiscountsTest):
def test_inactive_discount(self):
'''
Make a discount inactive and make sure that it doesn't work
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(active=False)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_expired_discount(self):
'''
Create an expired discount and make sure that it doesn't work.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
expirationDate=timezone.now() + timedelta(days=-1)
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discounts_disabled(self):
''' Disable discounts and check that they don't work anymore '''
updateConstant('general__discountsEnabled', False)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_notenoughpoints(self):
'''
Set the discount's components so that this discount is too small to apply, and
check that it doesn't get applied.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=10)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_noearlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it does not work less than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=1)
)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
class DiscountsTypesTest(BaseDiscountsTest):
def test_discount_applies(self):
'''
Create a flat $5 discount and test that it applies
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_earlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it works more than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=4)
)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_allwithinpointgroup(self):
'''
Set a discount to apply to an entire point group and check that the price
is still the flat price
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=1, allWithinPointGroup=True)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_dollarDiscount(self):
'''
Create a $10 off discount and check that it applies appropriately
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 10)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 10
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 10)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_percentDiscount(self):
'''
Create a 50% off discount and check that it applies correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.percentDiscount,
percentDiscount=50,
percentUniversallyApplied=False
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice()*0.5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, 0.5 * invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(
response.context_data.get('total_discount_amount'),
0.5 * invoice.grossTotal
)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_addOnItem(self):
'''
Create a free add-on item and ensure that it is applied correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.addOn,
name='Test Free Add-On',
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertTrue(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discountmakesitfree(self):
'''
Make the dollar discount larger than the base price and check that
the registration is free, that the registration is processed and that
a $0 invoice is created.
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=s.getBasePrice() + 10
)
response = self.register_to_check_discount(s, 0)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(invoice.total, 0)
self.assertEqual(response.context_data.get('zero_balance'), True)
self.assertEqual(response.context_data.get('total_discount_amount'), s.getBasePrice())
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
# Since the above registration was free, check that the registration actually
# processed, and that there exists a paid Invoice for $0
finalReg = response.context_data.get('registration')
invoice = response.context_data.get('invoice')
self.assertTrue(finalReg)
self.assertTrue(finalReg.invoice)
self.assertEqual(finalReg.invoice, invoice)
self.assertTrue(invoice.status == Invoice.PaymentStatus.paid)
self.assertEqual(invoice.outstandingBalance, 0)
self.assertEqual(invoice.total, 0)
self.assertTrue(finalReg.final)
# Check that the invoice no longer has an expiration date
self.assertIsNone(invoice.expirationDate)
# Check that the RegistrationDiscount associated with this registration
# has been applied.
self.assertTrue(finalReg.registrationdiscount_set.first().applied)
# Show that multiple registrations by the same customer are not permitted
response = self.register_to_check_discount(s)
self.assertIn(
'You are already registered for',
' '.join(response.context_data['form'].errors.get('__all__'))
)
def test_largerdiscountapplies(self):
'''
Create both a $10 discount and a $20 discount, and ensure that the
larger discount applies
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
bigger_combo, bigger_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=20,
name='Bigger Discount'
)
response = self.register_to_check_discount(s, s.getBasePrice() - 20)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 20
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 20)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [bigger_combo.name, ])
| bsd-3-clause | -6,661,962,433,318,377,000 | -559,663,101,872,050,300 | 43.503297 | 104 | 0.661909 | false |
smurfix/DaBroker | dabroker/base/transport/__init__.py | 1 | 4226 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from gevent import GreenletExit
from dabroker.util.thread import prep_spawned
import logging
logger = logging.getLogger("dabroker.base.transport")
class ConnectionError(RuntimeError):
pass
class BaseCallbacks(object):
def recv(self,msg):
"""Incoming message from the other side. NOT used for receiving replies!"""
raise NotImplementedError("You need to override {}.recv()".format(self.__class__.__name__))
def send(self,msg):
"""Outgoing message to the other side. NOT used for sending replies!"""
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def ended(self,err=None):
"""Called on receiver error. Do not reconnect here!"""
pass
def reconnect(self,err=None):
"""Called after a closed connection has been cleaned up"""
pass
def register_codec(self,codec):
raise NotImplementedError("You need to override {}.register_codec()".format(self.__class__.__name__))
class RelayedError(Exception):
"""An encapsulation for a server error (with traceback)"""
def __init__(self,err,tb):
self.err = str(err)
self.tb = tb
def __repr__(self):
return "{}({})".format(self.__class__.__name__,self.err)
def __str__(self):
r = repr(self)
if self.tb is None: return r
return r+"\n"+self.tb
class BaseTransport(object):
_job = None
defaults = {}
connection = None
last_msgid = 0
def __init__(self,callbacks, cfg={}):
self.cfg = self.defaults.copy()
self.cfg.update(cfg)
self.callbacks = callbacks
self.trace = cfg.get('trace',0)
def connect(self, purge=False):
"""Connect. (Synchronously.)
Do not override!
Override .connect1() (setup) and .connect2() (initial tasks)"""
assert self.callbacks is not None
assert self.connection is None
self.connect1()
if purge:
self.purge_all()
self.connect2()
def connect1(self):
"""Set up a connection.
Call super() before your code."""
if self._job is not None:
raise RuntimeError("Already connected")
logger.debug("connecting: %r",self)
def connect2(self):
"""Add initial tasks after a connection has been established.
Call super() after your code."""
assert self._job is None
self._job = self._run_job()
self._job.start()
def disconnect(self):
"""Sever the connection; do not auto-reconnect."""
logger.debug("disconnecting: %r",self)
j,self._job = self._job,None
if j:
j.stop()
def disconnected(self, err=None):
"""Clear connection objects.
This will be called by the reader task as it exits.
Do not reconnect from here; do that in your .reconnect"""
logger.debug("disconnected: %r",self)
def purge_all(self):
"""
Clear this transport's message queue.
This should only be called when client and server are known to
be idle AND when you suspect an unprocessable message might
clog the queue.
"""
pass
def send(self,msg):
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def run(self):
raise NotImplementedError("You need to override {}.run()".format(self.__class__.__name__))
@prep_spawned
def _run_job(self):
try:
logger.debug("Running receiver loop: %r",self)
self.run()
except GreenletExit:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
except BaseException as e:
err = e
logger.exception("Receiver loop error: %r",self)
self.callbacks.ended(e)
else:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
finally:
self.disconnected()
if self._job is not None:
self._job = None
self.callbacks.reconnect(err)
| gpl-3.0 | -363,831,245,947,413,400 | 5,208,197,145,718,905,000 | 26.769737 | 103 | 0.689647 | false |
Tennyson53/SUR | magnum/tests/unit/common/cert_manager/test_local.py | 3 | 5314 | # Copyright 2014 Rackspace US, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from magnum.common.cert_manager import cert_manager
from magnum.common.cert_manager import local_cert_manager
from magnum.tests import base
class TestLocalCert(base.BaseTestCase):
def setUp(self):
self.certificate = "My Certificate"
self.intermediates = "My Intermediates"
self.private_key = "My Private Key"
self.private_key_passphrase = "My Private Key Passphrase"
super(TestLocalCert, self).setUp()
def test_local_cert(self):
# Create a cert
cert = local_cert_manager.Cert(
certificate=self.certificate,
intermediates=self.intermediates,
private_key=self.private_key,
private_key_passphrase=self.private_key_passphrase
)
# Validate the cert functions
self.assertEqual(cert.get_certificate(), self.certificate)
self.assertEqual(cert.get_intermediates(), self.intermediates)
self.assertEqual(cert.get_private_key(), self.private_key)
self.assertEqual(cert.get_private_key_passphrase(),
self.private_key_passphrase)
class TestLocalManager(base.BaseTestCase):
def setUp(self):
self.certificate = "My Certificate"
self.intermediates = "My Intermediates"
self.private_key = "My Private Key"
self.private_key_passphrase = "My Private Key Passphrase"
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="certificates", storage_path="/tmp/")
super(TestLocalManager, self).setUp()
def _store_cert(self):
file_mock = mock.mock_open()
# Attempt to store the cert
with mock.patch('__builtin__.open', file_mock, create=True):
cert_id = local_cert_manager.CertManager.store_cert(
certificate=self.certificate,
intermediates=self.intermediates,
private_key=self.private_key,
private_key_passphrase=self.private_key_passphrase
)
# Check that something came back
self.assertIsNotNone(cert_id)
# Verify the correct files were opened
file_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'w')
], any_order=True)
# Verify the writes were made
file_mock().write.assert_has_calls([
mock.call(self.certificate),
mock.call(self.intermediates),
mock.call(self.private_key),
mock.call(self.private_key_passphrase)
], any_order=True)
return cert_id
def _get_cert(self, cert_id):
file_mock = mock.mock_open()
# Attempt to retrieve the cert
with mock.patch('__builtin__.open', file_mock, create=True):
data = local_cert_manager.CertManager.get_cert(cert_id)
# Verify the correct files were opened
file_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'r')
], any_order=True)
# The returned data should be a Cert object
self.assertIsInstance(data, cert_manager.Cert)
return data
def _delete_cert(self, cert_id):
remove_mock = mock.Mock()
# Delete the cert
with mock.patch('os.remove', remove_mock):
local_cert_manager.CertManager.delete_cert(cert_id)
# Verify the correct files were removed
remove_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)))
], any_order=True)
def test_store_cert(self):
self._store_cert()
def test_get_cert(self):
# Store a cert
cert_id = self._store_cert()
# Get the cert
self._get_cert(cert_id)
def test_delete_cert(self):
# Store a cert
cert_id = self._store_cert()
# Verify the cert exists
self._get_cert(cert_id)
# Delete the cert
self._delete_cert(cert_id)
| apache-2.0 | -1,888,023,148,428,827,400 | -1,387,779,027,706,338,300 | 34.905405 | 78 | 0.616108 | false |
iocast/vectorformats | vectorformats/formats/dxf.py | 2 | 1362 | from dxfwrite import DXFEngine as dxf
from .format import Format
class DXF(Format):
_drawing = None
def encode(self, features, **kwargs):
tmpFile = kwargs["tmpFile"]
if len(features) > 0:
self._drawing = dxf.drawing(tmpFile)
self._drawing.add_layer("featureserver")
for feature in features:
self.encode_feature(feature)
self._drawing.save()
return self._drawing
def encode_feature(self, feature):
if feature["geometry"]["type"] == "Point":
self._drawing.add(dxf.point(point=(feature["geometry"]["coordinates"][0],feature["geometry"]["coordinates"][1])))
elif feature["geometry"]["type"] == "LineString":
polyline= dxf.polyline()
coords = feature["geometry"]["coordinates"]
for coord in coords:
polyline.add_vertex((coord[0], coord[1]))
self._drawing.add(polyline)
elif feature["geometry"]["type"] == "Polygon":
polygon = dxf.polyline()
coords = feature["geometry"]["coordinates"]
for coord in coords:
for point in coord:
polygon.add_vertex((point[0], point[1]))
polygon.close()
self._drawing.add(polygon)
| mit | -5,840,325,021,177,553,000 | -7,445,329,932,258,449,000 | 29.266667 | 125 | 0.538179 | false |
joelsmith/openshift-tools | ansible/roles/lib_openshift_3.2/build/src/oc_user.py | 13 | 4702 | # vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCUser(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'users'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
groups=None,
verbose=False):
''' Constructor for OCVolume '''
super(OCUser, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.groups = groups
self._user = None
@property
def user(self):
''' property function service'''
if not self._user:
self.get()
return self._user
@user.setter
def user(self, data):
''' setter function for yedit var '''
self._user = data
def exists(self):
''' return whether a user exists '''
if self.user:
return True
return False
def get(self):
'''return user information '''
result = self._get(self.kind, self.config.username)
if result['returncode'] == 0:
self.user = User(content=result['results'][0])
elif 'users \"%s\" not found' % self.config.username in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.username)
def create_group_entries(self):
''' make entries for user to the provided group list '''
if self.groups != None:
for group in self.groups:
cmd = ['groups', 'add-users', group, self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
return {'returncode': 0}
def create(self):
'''create the object'''
rval = self.create_group_entries()
if rval['returncode'] != 0:
return rval
return self._create_from_content(self.config.username, self.config.data)
def group_update(self):
''' update group membership '''
rval = {'returncode': 0}
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
cmd = ['groups', 'add-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
cmd = ['groups', 'remove-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
def update(self):
'''update the object'''
rval = self.group_update()
if rval['returncode'] != 0:
return rval
# need to update the user's info
return self._replace_content(self.kind, self.config.username, self.config.data, force=True)
def needs_group_update(self):
''' check if there are group membership changes '''
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
return True
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
return True
return False
def needs_update(self):
''' verify an update is needed '''
skip = []
if self.needs_group_update() == True:
return True
return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True)
| apache-2.0 | -7,607,204,467,323,236,000 | -6,667,390,456,354,896,000 | 34.089552 | 107 | 0.54211 | false |
vaidap/zulip | zerver/webhooks/slack/view.py | 3 | 1608 | from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message, create_stream_if_needed
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_string, check_int
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
ZULIP_MESSAGE_TEMPLATE = u"**{message_sender}**: `{text}`"
VALID_OPTIONS = {'SHOULD_NOT_BE_MAPPED': '0', 'SHOULD_BE_MAPPED': '1'}
@api_key_only_webhook_view('Slack')
@has_request_variables
def api_slack_webhook(request, user_profile,
user_name=REQ(),
text=REQ(),
channel_name=REQ(),
stream=REQ(default='slack'),
channels_map_to_topics=REQ(default='1')):
# type: (HttpRequest, UserProfile, str, str, str, str, str) -> HttpResponse
if channels_map_to_topics not in list(VALID_OPTIONS.values()):
return json_error(_('Error: channels_map_to_topics parameter other than 0 or 1'))
if channels_map_to_topics == VALID_OPTIONS['SHOULD_BE_MAPPED']:
subject = "channel: {}".format(channel_name)
else:
stream = channel_name
subject = _("Message from Slack")
content = ZULIP_MESSAGE_TEMPLATE.format(message_sender=user_name, text=text)
check_send_message(user_profile, request.client, "stream", [stream], subject, content)
return json_success()
| apache-2.0 | -369,421,671,068,903,700 | 2,545,614,578,605,985,300 | 44.942857 | 90 | 0.677861 | false |
tbekolay/neurotools | examples/single_neuron/CRF_neuron_vs_signal.py | 3 | 3070 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
CRF_neuron_vs_signal.py
Testing the mean firing rate of a fiber for different signal strengths.
Prints to a figure the mean firing rate for the output (ON and OFF) as a function
of the different parameter values. It's similar to a CRF function.
Results illustrate that
- the higher the value the more the neuron spikes (wouah!),
- that this follows a ramp-type of function
- and that noise "smoothes" the transition in theinput/output function.
TODO: do a better plot as in benchmark_neuron_vs_noise.py
$Id: CRF_neuron_vs_signal.py 362 2008-12-08 17:35:59Z LaurentPerrinet $
"""
import os, sys, numpy, pylab, shelve
from NeuroTools.parameters import *
# this is not mandatory but just a "easy_install progressbar" away
# else remove all corresponding 3 lines in this code...
import progressbar # see http://projects.scipy.org/pipermail/scipy-dev/2008-January/008200.html
N_exp_snr = 20
N_exp_noise = 9
ps = ParameterSpace({
'snr' : ParameterRange(list(numpy.linspace(-1.,4.,N_exp_snr))),
'noise_std' : ParameterRange(list(10.**(numpy.linspace(-.50,1.,N_exp_noise))))})
name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part
results = shelve.open('results/mat-' + name)
try:
CRF = results['CRF']
except:
# calculates the dimension of the parameter space
results_dim, results_label = ps.parameter_space_dimension_labels()
# creates results array with size of parameter space dimension
import simple_single_neuron as model
myFibers = model.FiberChannel()
CRF = numpy.empty(results_dim)
pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()], maxval=numpy.prod(results_dim))
for i_exp,experiment in enumerate(ps.iter_inner()):
params = myFibers.params
params.update(experiment) # updates what changed in the dictionary
# simulate the experiment and get its data
data = myFibers.run(params,verbose=False)
# calculating the index in the parameter space
index = ps.parameter_space_index(experiment)
# put the data at the right position in the results array
CRF[index] = data.mean_rate()#
pbar.update(i_exp)
results['CRF'] = CRF
pbar.finish()
results.close()
#numpy.array(p.noise_std._values),numpy.array(p.snr._values),
#pylab.plot(ps.snr._values,CRF.transpose()) #color = (sin(2*pi*noise_list)**2,cos(2*pi*noise_list)**2,1))
for i_noise, noise in enumerate(ps.noise_std._values):
pylab.plot(ps.snr._values,CRF[i_noise,:], label='noise = %5.3f' % noise)
#pylab.yticks(p.noise_std._values[:2:])
pylab.ylabel('Firing Rate (Hz/neuron)')
#pylab.xticks(p.snr._values[:2:])
pylab.xlabel('Signal')
pylab.legend(loc = 'lower right')
pylab.axis([numpy.min(ps.snr._values), numpy.max(ps.snr._values), 0.0, numpy.max(CRF[:])])
if 0:
pylab.show()
else:
pylab.savefig('results/fig-' + name + '.pdf')
pylab.savefig('results/fig-' + name + '.png')
| gpl-2.0 | 5,842,625,130,732,938,000 | 6,471,986,515,985,873,000 | 35.987952 | 105 | 0.685668 | false |
chrisjaquet/FreeCAD | src/Mod/Path/PathScripts/nc/dynapath.py | 30 | 1067 | import nc
import iso
import math
import datetime
import time
from format import Format
now = datetime.datetime.now()
class Creator(iso.Creator):
def __init__(self):
iso.Creator.__init__(self)
self.output_tool_definitions = False
self.m_codes_on_their_own_line = True
self.output_g98_and_g99 = False
#self.fmt = Format(dp_wanted = False, add_trailing_zeros = True, add_plus = True)
#def SPACE_STR(self): return ' '
def PROGRAM(self): return None
def RETRACT(self, height): return('R' + (self.fmt.string(height)))
def PECK_DEPTH(self, depth): return('O' + (self.fmt.string(depth)))
def program_begin(self, id, name=''):
self.write('(' + name + ')\n')
def imperial(self):
#self.g_list.append(self.IMPERIAL())
self.fmt.number_of_decimal_places = 4
def metric(self):
#self.g_list.append(self.METRIC())
self.fmt.number_of_decimal_places = 3
def comment(self, text):
pass
nc.creator = Creator()
| lgpl-2.1 | 8,049,201,079,331,550,000 | -817,915,462,663,742,200 | 26.078947 | 89 | 0.597001 | false |
MattFaus/CrowdTube-Connector | youtube.py | 1 | 6824 | import os
import urlparse
from lib import gdata
import lib.gdata.youtube.client
import secrets
GDATA_API_CLIENT_ID = 'CrowdTube-Connector'
class YouTubeCaptionEditor(object):
def __init__(self, google_email, google_password, youtube_username):
self.youtube_username = youtube_username
self.youtube_client = lib.gdata.youtube.client.YouTubeClient()
# We shouldn't need this auth_token, but we'll keep it around
self.auth_token = self.youtube_client.client_login(
google_email, google_password, GDATA_API_CLIENT_ID)
# A dictionary of youtube_id and YouTubeVideo objects
self.videos = {}
def get_videos(self):
# Format copied from lib.gdata.youtube.client.py
feed_uri = '%s%s/%s' % (lib.gdata.youtube.client.YOUTUBE_USER_FEED_URI,
self.youtube_username, 'uploads')
all_videos = self.youtube_client.get_videos(uri=feed_uri)
for video in all_videos.entry:
new_video = YouTubeVideo(video, self.youtube_client)
self.videos[new_video.video_id] = new_video
def get_video(self, video_id):
video_entry = self.youtube_client.get_video_entry(video_id=video_id)
return YouTubeVideo(video_entry, self.youtube_client)
def delete_track(self, video_id, track_id):
"""Deletes an existing track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
response = self.youtube_client.delete_track(video_id, track_id,
client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
# http://docs.python.org/release/2.2.3/lib/httpresponse-objects.html
if response.status != 200:
print response.status, response.msg
return False
return True
def add_track(self, video_id, title, language, track_content):
"""Adds a caption track.
If a track with the same title already exists, this will silently fail.
"""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.create_track(video_id, title, language,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
def update_track(self, video_id, track_id, track_content):
"""Adds a caption track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.update_track(video_id, track_id,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
# TODO(mattfaus): Suck these two classes into the YouTubeCaptionEditor, above
# make the YouTubeCaptionEditor behave more like a full-fledged youtube client
# Shouldn't have to pass the youtube_client object around to the sub-classes
# No need to have dictionaries where an array would do just fine (YouTubeVideo.caption_tracks)
class YouTubeVideo(object):
def __init__(self, video_entry, youtube_client=None):
self.youtube_client = youtube_client
# tag:youtube.com,2008:video:SNrEiiJwD4Y
id_parts = video_entry.GetId().split(':')
self.video_id = id_parts[id_parts.index('video') + 1]
self.title = video_entry.title.text
caption_link = video_entry.get_link(
'http://gdata.youtube.com/schemas/2007#video.captionTracks')
self.caption_feed = caption_link.href
# TODO(mattfaus): Make this less ugly
has_entries = [
a.value for a in caption_link.GetAttributes()
if '{http://gdata.youtube.com/schemas/2007}hasEntries' == a._qname]
has_entries = has_entries[0] == 'true'
self.has_entries = has_entries
self.caption_tracks = {}
def get_caption_tracks(self, download=False):
# Don't check self.has_entries. It may be False when only a
# machine-generated caption track exists.
if not self.youtube_client:
raise ValueError('No youtube client available!')
# STOPSHIP(mattfaus): get_caption_feed() only returns the first 24 caption tracks
# so we must iterate to read more
# TODO(mattfaus): Filter this by language with the 'lr' attribute
all_captions = self.youtube_client.get_caption_feed(self.caption_feed)
for caption_entry in all_captions.entry:
new_track = YouTubeCaptionTrack(caption_entry, self.youtube_client)
self.caption_tracks[new_track.track_source] = new_track
if download:
new_track.download_track()
def get_machine_generated_track(self):
self.get_caption_tracks()
for src, caption_track in self.caption_tracks.iteritems():
print src, caption_track
if caption_track.machine_generated:
caption_track.download_track()
return caption_track
class YouTubeCaptionTrack(object):
def __init__(self, caption_entry, youtube_client):
self.youtube_client = youtube_client
self.language = caption_entry.content.lang
self.track_source = caption_entry.content.src
self.machine_generated = YouTubeCaptionTrack._is_machine_generated(
caption_entry)
# Parse the video_id and caption_id out of a url like this:
# https://gdata.youtube.com/feeds/api/videos/Jom6EtXzRMg/captiondata/Ch4LEO3ZhwUaFQjIic2vrcLuxCYSAmVuGgAiA2Fzcgw
o = urlparse.urlparse(self.track_source)
path_parts = o.path.split('/')
self.video_id = path_parts[path_parts.index('videos') + 1]
self.track_id = path_parts[path_parts.index('captiondata') + 1]
self.track_content = None
@staticmethod
def _is_machine_generated(caption_entry):
"""Looks for the derived element, and returns True if it is equal to
speechRecognition.
"""
# TODO(mattfaus): Move this to TrackEntry within youtube/data.py?
derived = caption_entry.GetElements(
tag='derived', namespace='http://gdata.youtube.com/schemas/2007')
if not derived:
return False
else:
derived = derived[0]
return derived.text == 'speechRecognition'
def download_track(self):
response = self.youtube_client.get_caption_track(
track_url=self.track_source, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
self.track_content = response.read(2 ** 31)
return self.track_content
| mit | -5,741,052,065,317,162,000 | 1,175,314,819,881,902,800 | 38.445087 | 120 | 0.651231 | false |
flamholz/thrift | lib/py/src/protocol/TProtocol.py | 75 | 10848 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import *
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TProtocolBase:
"""Base class for Thrift protocol driver."""
def __init__(self, trans):
self.trans = trans
def writeMessageBegin(self, name, ttype, seqid):
pass
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, ttype, fid):
pass
def writeFieldEnd(self):
pass
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
pass
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
pass
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
pass
def writeSetEnd(self):
pass
def writeBool(self, bool_val):
pass
def writeByte(self, byte):
pass
def writeI16(self, i16):
pass
def writeI32(self, i32):
pass
def writeI64(self, i64):
pass
def writeDouble(self, dub):
pass
def writeString(self, str_val):
pass
def readMessageBegin(self):
pass
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
pass
def readFieldEnd(self):
pass
def readMapBegin(self):
pass
def readMapEnd(self):
pass
def readListBegin(self):
pass
def readListEnd(self):
pass
def readSetBegin(self):
pass
def readSetEnd(self):
pass
def readBool(self):
pass
def readByte(self):
pass
def readI16(self):
pass
def readI32(self):
pass
def readI64(self):
pass
def readDouble(self):
pass
def readString(self):
pass
def skip(self, ttype):
if ttype == TType.STOP:
return
elif ttype == TType.BOOL:
self.readBool()
elif ttype == TType.BYTE:
self.readByte()
elif ttype == TType.I16:
self.readI16()
elif ttype == TType.I32:
self.readI32()
elif ttype == TType.I64:
self.readI64()
elif ttype == TType.DOUBLE:
self.readDouble()
elif ttype == TType.STRING:
self.readString()
elif ttype == TType.STRUCT:
name = self.readStructBegin()
while True:
(name, ttype, id) = self.readFieldBegin()
if ttype == TType.STOP:
break
self.skip(ttype)
self.readFieldEnd()
self.readStructEnd()
elif ttype == TType.MAP:
(ktype, vtype, size) = self.readMapBegin()
for i in xrange(size):
self.skip(ktype)
self.skip(vtype)
self.readMapEnd()
elif ttype == TType.SET:
(etype, size) = self.readSetBegin()
for i in xrange(size):
self.skip(etype)
self.readSetEnd()
elif ttype == TType.LIST:
(etype, size) = self.readListBegin()
for i in xrange(size):
self.skip(etype)
self.readListEnd()
# tuple of: ( 'reader method' name, is_container bool, 'writer_method' name )
_TTYPE_HANDLERS = (
(None, None, False), # 0 TType.STOP
(None, None, False), # 1 TType.VOID # TODO: handle void?
('readBool', 'writeBool', False), # 2 TType.BOOL
('readByte', 'writeByte', False), # 3 TType.BYTE and I08
('readDouble', 'writeDouble', False), # 4 TType.DOUBLE
(None, None, False), # 5 undefined
('readI16', 'writeI16', False), # 6 TType.I16
(None, None, False), # 7 undefined
('readI32', 'writeI32', False), # 8 TType.I32
(None, None, False), # 9 undefined
('readI64', 'writeI64', False), # 10 TType.I64
('readString', 'writeString', False), # 11 TType.STRING and UTF7
('readContainerStruct', 'writeContainerStruct', True), # 12 *.STRUCT
('readContainerMap', 'writeContainerMap', True), # 13 TType.MAP
('readContainerSet', 'writeContainerSet', True), # 14 TType.SET
('readContainerList', 'writeContainerList', True), # 15 TType.LIST
(None, None, False), # 16 TType.UTF8 # TODO: handle utf8 types?
(None, None, False) # 17 TType.UTF16 # TODO: handle utf16 types?
)
def readFieldByTType(self, ttype, spec):
try:
(r_handler, w_handler, is_container) = self._TTYPE_HANDLERS[ttype]
except IndexError:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
if r_handler is None:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
reader = getattr(self, r_handler)
if not is_container:
return reader()
return reader(spec)
def readContainerList(self, spec):
results = []
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(list_type, list_len) = self.readListBegin()
if tspec is None:
# list values are simple types
for idx in xrange(list_len):
results.append(reader())
else:
# this is like an inlined readFieldByTType
container_reader = self._TTYPE_HANDLERS[list_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(list_len):
val = val_reader(tspec)
results.append(val)
self.readListEnd()
return results
def readContainerSet(self, spec):
results = set()
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(set_type, set_len) = self.readSetBegin()
if tspec is None:
# set members are simple types
for idx in xrange(set_len):
results.add(reader())
else:
container_reader = self._TTYPE_HANDLERS[set_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(set_len):
results.add(val_reader(tspec))
self.readSetEnd()
return results
def readContainerStruct(self, spec):
(obj_class, obj_spec) = spec
obj = obj_class()
obj.read(self)
return obj
def readContainerMap(self, spec):
results = dict()
key_ttype, key_spec = spec[0], spec[1]
val_ttype, val_spec = spec[2], spec[3]
(map_ktype, map_vtype, map_len) = self.readMapBegin()
# TODO: compare types we just decoded with thrift_spec and
# abort/skip if types disagree
key_reader = getattr(self, self._TTYPE_HANDLERS[key_ttype][0])
val_reader = getattr(self, self._TTYPE_HANDLERS[val_ttype][0])
# list values are simple types
for idx in xrange(map_len):
if key_spec is None:
k_val = key_reader()
else:
k_val = self.readFieldByTType(key_ttype, key_spec)
if val_spec is None:
v_val = val_reader()
else:
v_val = self.readFieldByTType(val_ttype, val_spec)
# this raises a TypeError with unhashable keys types
# i.e. this fails: d=dict(); d[[0,1]] = 2
results[k_val] = v_val
self.readMapEnd()
return results
def readStruct(self, obj, thrift_spec):
self.readStructBegin()
while True:
(fname, ftype, fid) = self.readFieldBegin()
if ftype == TType.STOP:
break
try:
field = thrift_spec[fid]
except IndexError:
self.skip(ftype)
else:
if field is not None and ftype == field[1]:
fname = field[2]
fspec = field[3]
val = self.readFieldByTType(ftype, fspec)
setattr(obj, fname, val)
else:
self.skip(ftype)
self.readFieldEnd()
self.readStructEnd()
def writeContainerStruct(self, val, spec):
val.write(self)
def writeContainerList(self, val, spec):
self.writeListBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeListEnd()
def writeContainerSet(self, val, spec):
self.writeSetBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeSetEnd()
def writeContainerMap(self, val, spec):
k_type = spec[0]
v_type = spec[2]
ignore, ktype_name, k_is_container = self._TTYPE_HANDLERS[k_type]
ignore, vtype_name, v_is_container = self._TTYPE_HANDLERS[v_type]
k_writer = getattr(self, ktype_name)
v_writer = getattr(self, vtype_name)
self.writeMapBegin(k_type, v_type, len(val))
for m_key, m_val in val.iteritems():
if not k_is_container:
k_writer(m_key)
else:
k_writer(m_key, spec[1])
if not v_is_container:
v_writer(m_val)
else:
v_writer(m_val, spec[3])
self.writeMapEnd()
def writeStruct(self, obj, thrift_spec):
self.writeStructBegin(obj.__class__.__name__)
for field in thrift_spec:
if field is None:
continue
fname = field[2]
val = getattr(obj, fname)
if val is None:
# skip writing out unset fields
continue
fid = field[0]
ftype = field[1]
fspec = field[3]
# get the writer method for this value
self.writeFieldBegin(fname, ftype, fid)
self.writeFieldByTType(ftype, val, fspec)
self.writeFieldEnd()
self.writeFieldStop()
self.writeStructEnd()
def writeFieldByTType(self, ttype, val, spec):
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[ttype]
writer = getattr(self, w_handler)
if is_container:
writer(val, spec)
else:
writer(val)
class TProtocolFactory:
def getProtocol(self, trans):
pass
| apache-2.0 | -3,926,184,830,612,731,400 | 7,245,104,078,363,806,000 | 25.719212 | 79 | 0.625737 | false |
qtumproject/qtum | test/functional/feature_filelock.py | 8 | 1833 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second bitcoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, self.chain)
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second bitcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second bitcoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main()
| mit | 8,550,566,340,267,185,000 | -5,000,746,364,210,744,000 | 49.916667 | 177 | 0.69449 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.