code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A I(state) of
'present' will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated string or YAML list)
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- "Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ..."
required: false
default: None
extra_dhcp_opts:
description:
- "Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opts:
- opt_name: opt name1
opt_value: value1
- opt_name: ..."
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list of strings
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list of dicts
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list of dicts
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opts']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opts',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(type='list', default=None),
admin_state_up=dict(type='bool', default=None),
mac_address=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(type='list', default=None),
extra_dhcp_opts=dict(type='list', default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| emersonsoftware/ansiblefork | lib/ansible/modules/cloud/openstack/os_port.py | Python | gpl-3.0 | 12,587 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth', size=30),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle', size=64),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id':fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', size=32, required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', size=64, required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', size=256, required=False, readonly=False),
'visa_no': fields.char('Visa No', size=64, required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/saas3 | openerp/addons/hr_contract/hr_contract.py | Python | agpl-3.0 | 4,997 |
"""
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from uuid import uuid4
import textwrap
import urllib
import re
from oauthlib.oauth1.rfc5849 import signature, parameters
import oauthlib.oauth1
import hashlib
import base64
import mock
import requests
from http import StubHttpRequestHandler, StubHttpService
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://127.0.0.1:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = 'LTI consumer (edX) responded with XML content:<br>' + self.server.grade_data['TC answer']
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
submit_url = '//{}:{}'.format(*self.server.server_address)
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit">
</form>
<form action="{submit_url}/lti2_outcome" method="post">
<input type="submit" name="submit-lti2-button" value="Submit">
</form>
<form action="{submit_url}/lti2_delete" method="post">
<input type="submit" name="submit-lti2-delete-button" value="Submit">
</form>
""").format(submit_url=submit_url)
else:
submit_form = ''
# Show roles only for LTI launch.
if self.post_dict.get('roles'):
role = '<h5>Role: {}</h5>'.format(self.post_dict['roles'])
else:
role = ''
response_str = textwrap.dedent("""
<html>
<head>
<title>TEST TITLE</title>
</head>
<body>
<div>
<h2>IFrame loaded</h2>
<h3>Server response is:</h3>
<h3 class="result">{response}</h3>
{role}
</div>
{submit_form}
</body>
</html>
""").format(response=response_text, role=role, submit_form=submit_form)
# Currently LTI module doublequotes the lis_result_sourcedid parameter.
# Unquote response two times.
return urllib.unquote(urllib.unquote(response_str))
def _is_correct_lti_request(self):
"""
Return a boolean indicating whether the URL path is a valid LTI end-point.
"""
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
return lti_endpoint in self.path
def _oauth_sign(self, url, body, content_type=u'application/x-www-form-urlencoded', method=u'POST'):
"""
Signs request and returns signed Authorization header.
"""
client_key = self.server.config.get('client_key', self.DEFAULT_CLIENT_KEY)
client_secret = self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET)
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
headers = {
# This is needed for body encoding:
'Content-Type': content_type,
}
# Calculate and encode body hash. See http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
sha1 = hashlib.sha1()
sha1.update(body)
oauth_body_hash = unicode(base64.b64encode(sha1.digest())) # pylint: disable=too-many-function-args
params = client.get_oauth_params(None)
params.append((u'oauth_body_hash', oauth_body_hash))
mock_request = mock.Mock(
uri=unicode(urllib.unquote(url)),
headers=headers,
body=u"",
decoded_body=u"",
oauth_params=params,
http_method=unicode(method),
)
sig = client.get_oauth_signature(mock_request)
mock_request.oauth_params.append((u'oauth_signature', sig))
new_headers = parameters.prepare_headers(mock_request.oauth_params, headers, realm=None)
return new_headers['Authorization']
def _check_oauth_signature(self, params, client_signature):
"""
Checks oauth signature from client.
`params` are params from post request except signature,
`client_signature` is signature from request.
Builds mocked request and verifies hmac-sha1 signing::
1. builds string to sign from `params`, `url` and `http_method`.
2. signs it with `client_secret` which comes from server settings.
3. obtains signature after sign and then compares it with request.signature
(request signature comes form client in request)
Returns `True` if signatures are correct, otherwise `False`.
"""
client_secret = unicode(self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET))
port = self.server.server_address[1]
lti_base = self.DEFAULT_LTI_ADDRESS.format(port=port)
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
url = lti_base + lti_endpoint
request = mock.Mock()
request.params = [(unicode(k), unicode(v)) for k, v in params.items()]
request.uri = unicode(url)
request.http_method = u'POST'
request.signature = unicode(client_signature)
return signature.verify_hmac_sha1(request, client_secret)
class StubLtiService(StubHttpService):
"""
A stub LTI provider server that responds
to POST and GET requests to localhost.
"""
HANDLER_CLASS = StubLtiHandler
| wwj718/ANALYSE | common/djangoapps/terrain/stubs/lti.py | Python | agpl-3.0 | 12,436 |
# -*- coding: utf-8 -*-
import json
import re
import unicodedata
import string
from urllib import urlencode
from requests import get
languages = {'de', 'en', 'es', 'fr', 'hu', 'it', 'nl', 'jp'}
url_template = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&{query}&props=labels%7Cdatatype%7Cclaims%7Caliases&languages=' + '|'.join(languages)
url_wmflabs_template = 'http://wdq.wmflabs.org/api?q='
url_wikidata_search_template='http://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
wmflabs_queries = [
'CLAIM[31:8142]', # all devise
]
db = {
'iso4217' : {
},
'names' : {
}
}
def remove_accents(data):
return unicodedata.normalize('NFKD', data).lower()
def normalize_name(name):
return re.sub(' +',' ', remove_accents(name.lower()).replace('-', ' '))
def add_currency_name(name, iso4217):
global db
db_names = db['names']
if not isinstance(iso4217, basestring):
print "problem", name, iso4217
return
name = normalize_name(name)
if name == '':
print "name empty", iso4217
return
iso4217_set = db_names.get(name, None)
if iso4217_set is not None and iso4217 not in iso4217_set:
db_names[name].append(iso4217)
else:
db_names[name] = [ iso4217 ]
def add_currency_label(label, iso4217, language):
global db
db['iso4217'][iso4217] = db['iso4217'].get(iso4217, {})
db['iso4217'][iso4217][language] = label
def get_property_value(data, name):
prop = data.get('claims', {}).get(name, {})
if len(prop) == 0:
return None
value = prop[0].get('mainsnak', {}).get('datavalue', {}).get('value', '')
if value == '':
return None
return value
def parse_currency(data):
iso4217 = get_property_value(data, 'P498')
if iso4217 is not None:
unit = get_property_value(data, 'P558')
if unit is not None:
add_currency_name(unit, iso4217)
labels = data.get('labels', {})
for language in languages:
name = labels.get(language, {}).get('value', None)
if name != None:
add_currency_name(name, iso4217)
add_currency_label(name, iso4217, language)
aliases = data.get('aliases', {})
for language in aliases:
for i in range(0, len(aliases[language])):
alias = aliases[language][i].get('value', None)
add_currency_name(alias, iso4217)
def fetch_data(wikidata_ids):
url = url_template.format(query=urlencode({'ids' : '|'.join(wikidata_ids)}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
entities = jsonresponse.get('entities', {})
for pname in entities:
pvalue = entities.get(pname)
parse_currency(pvalue)
def add_q(i):
return "Q" + str(i)
def fetch_data_batch(wikidata_ids):
while len(wikidata_ids) > 0:
if len(wikidata_ids) > 50:
fetch_data(wikidata_ids[0:49])
wikidata_ids = wikidata_ids[50:]
else:
fetch_data(wikidata_ids)
wikidata_ids = []
def wdq_query(query):
url = url_wmflabs_template + query
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
qlist = map(add_q, jsonresponse.get('items', {}))
error = jsonresponse.get('status', {}).get('error', None)
if error != None and error != 'OK':
print "error for query '" + query + "' :" + error
fetch_data_batch(qlist)
def wd_query(query, offset=0):
qlist = []
url = url_wikidata_search_template.format(query=urlencode({'srsearch': query, 'srlimit': 50, 'sroffset': offset}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
for r in jsonresponse.get('query', {}).get('search', {}):
qlist.append(r.get('title', ''))
fetch_data_batch(qlist)
## fetch ##
for q in wmflabs_queries:
wdq_query(q)
# static
add_currency_name(u"euro", 'EUR')
add_currency_name(u"euros", 'EUR')
add_currency_name(u"dollar", 'USD')
add_currency_name(u"dollars", 'USD')
add_currency_name(u"peso", 'MXN')
add_currency_name(u"pesos", 'MXN')
# write
f = open("currencies.json", "wb")
json.dump(db, f, indent=4, encoding="utf-8")
f.close()
| kdani3/searx | utils/fetch_currencies.py | Python | agpl-3.0 | 4,394 |
# Copyright (C) 2015 The Debsources developers <[email protected]>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=AUTHORS;hb=HEAD
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=COPYING;hb=HEAD
from __future__ import absolute_import
from flask import Blueprint
# naming rule: bp_{dirname}
bp_sources = Blueprint('sources',
__name__,
template_folder='templates',
static_url_path='/static/sources',
static_folder='static')
from . import routes # NOQA
| vivekanand1101/debsources | debsources/app/sources/__init__.py | Python | agpl-3.0 | 1,057 |
from PySide.QtCore import *
from PySide.QtGui import *
import unittest
class MyModel (QAbstractListModel):
stupidLine = QLine(0, 0, 10, 10)
def rowCount(self, parent):
return 1
def data(self, index, role):
return self.stupidLine
class TestBug693(unittest.TestCase):
def testIt(self):
app = QApplication([])
model = MyModel()
view = QListView()
view.setModel(model)
view.show()
# This must NOT throw the exception:
# RuntimeError: Internal C++ object (PySide.QtCore.QLine) already deleted.
MyModel.stupidLine.isNull()
if __name__ == "__main__":
unittest.main()
| enthought/pyside | tests/QtGui/bug_693.py | Python | lgpl-2.1 | 670 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ram_file_system.h."""
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class RamFilesystemTest(test_util.TensorFlowTestCase):
def test_create_and_delete_directory(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.delete_recursively_v2('ram://testdirectory')
def test_create_and_delete_directory_tree_recursive(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.create_dir_v2('ram://testdirectory/subdir1')
file_io.create_dir_v2('ram://testdirectory/subdir2')
file_io.create_dir_v2('ram://testdirectory/subdir1/subdir3')
with gfile.GFile('ram://testdirectory/subdir1/subdir3/a.txt', 'w') as f:
f.write('Hello, world.')
file_io.delete_recursively_v2('ram://testdirectory')
self.assertEqual(gfile.Glob('ram://testdirectory/*'), [])
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_estimator(self):
def model_fn(features, labels, mode, params):
del params
x = core_layers.dense(features, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
y = core_layers.dense(x, 1)
loss = losses.mean_squared_error(labels, y)
opt = adam.AdamOptimizer(learning_rate=0.1)
train_op = opt.minimize(
loss, global_step=training_util.get_or_create_global_step())
return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def input_fn():
batch_size = 128
return (constant_op.constant(np.random.randn(batch_size, 100),
dtype=dtypes.float32),
constant_op.constant(np.random.randn(batch_size, 1),
dtype=dtypes.float32))
config = RunConfig(
model_dir='ram://estimator-0/', save_checkpoints_steps=1)
estimator = Estimator(config=config, model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
def test_savedmodel(self):
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/core/platform/ram_file_system_test.py | Python | apache-2.0 | 5,699 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to run the tests."""
from __future__ import print_function
import sys
import unittest
# Change PYTHONPATH to include dependencies.
sys.path.insert(0, '.')
import utils.dependencies # pylint: disable=wrong-import-position
if __name__ == '__main__':
print('Using Python version {0!s}'.format(sys.version))
fail_unless_has_test_file = '--fail-unless-has-test-file' in sys.argv
setattr(unittest, 'fail_unless_has_test_file', fail_unless_has_test_file)
if fail_unless_has_test_file:
# Remove --fail-unless-has-test-file otherwise it will conflict with
# the argparse tests.
sys.argv.remove('--fail-unless-has-test-file')
dependency_helper = utils.dependencies.DependencyHelper()
if not dependency_helper.CheckTestDependencies():
sys.exit(1)
test_suite = unittest.TestLoader().discover('tests', pattern='*.py')
test_results = unittest.TextTestRunner(verbosity=2).run(test_suite)
if not test_results.wasSuccessful():
sys.exit(1)
| log2timeline/dfwinreg | run_tests.py | Python | apache-2.0 | 1,027 |
#!/usr/bin/env python
import time
from concurrent.futures import ThreadPoolExecutor
from eucaops import Eucaops
from eucaops import S3ops
from eutester.eutestcase import EutesterTestCase
class WalrusConcurrent(EutesterTestCase):
def __init__(self):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument("-n", "--number", type=int, default=100)
self.parser.add_argument("-c", "--concurrent", type=int, default=10)
self.parser.add_argument("-s", "--size", type=int, default=1024)
self.get_args()
# Setup basic eutester object
if self.args.region:
self.tester = S3ops( credpath=self.args.credpath, region=self.args.region)
else:
self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
self.start = time.time()
self.bucket_name = "concurrency-" + str(int(self.start))
self.tester.create_bucket(self.bucket_name)
def clean_method(self):
self.tester.clear_bucket(self.bucket_name)
def Concurrent(self):
key_payload = self.tester.id_generator(self.args.size)
thread_count = self.args.number
thread_pool = []
with ThreadPoolExecutor(max_workers=thread_count) as executor:
for i in xrange(thread_count):
thread_pool.append(executor.submit(self.tester.upload_object, bucket_name=self.bucket_name, key_name="test" + str(i), contents=key_payload))
end = time.time()
total = end - self.start
self.tester.debug("\nExecution time: {0}\n# of Objects: {1}\nObject Size: {2}B\nConcurrency Level of {3}".format(
total, self.args.number, self.args.size, self.args.concurrent))
with ThreadPoolExecutor(max_workers=thread_count) as executor:
for object in thread_pool:
thread_pool.append(executor.submit(self.tester.delete_object, object))
if __name__ == "__main__":
testcase = WalrusConcurrent()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list
list = testcase.args.tests or ["Concurrent"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list)
exit(result) | nagyistoce/eutester | testcases/cloud_user/s3/walrus_concurrency.py | Python | bsd-2-clause | 2,530 |
#
# Copyright (c) 2005
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Geoffrey Mainland <[email protected]>
#
__all__ = ["message", "packet", "utils", "tossim"]
| jf87/smap | python/tinyos/__init__.py | Python | bsd-2-clause | 1,659 |
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script to recover devices in a known bad state."""
import argparse
import glob
import logging
import os
import signal
import sys
import psutil
if __name__ == '__main__':
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil.android import device_denylist
from devil.android import device_errors
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.tools import device_status
from devil.android.tools import script_common
from devil.utils import logging_common
from devil.utils import lsusb
# TODO(jbudorick): Resolve this after experimenting w/ disabling the USB reset.
from devil.utils import reset_usb # pylint: disable=unused-import
logger = logging.getLogger(__name__)
from py_utils import modules_util
# Script depends on features from psutil version 2.0 or higher.
modules_util.RequireVersion(psutil, '2.0')
def KillAllAdb():
def get_all_adb():
for p in psutil.process_iter():
try:
# Retrieve all required process infos at once.
pinfo = p.as_dict(attrs=['pid', 'name', 'cmdline'])
if pinfo['name'] == 'adb':
pinfo['cmdline'] = ' '.join(pinfo['cmdline'])
yield p, pinfo
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p, pinfo in get_all_adb():
try:
pinfo['signal'] = sig
logger.info('kill %(signal)s %(pid)s (%(name)s [%(cmdline)s])', pinfo)
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for _, pinfo in get_all_adb():
try:
logger.error('Unable to kill %(pid)s (%(name)s [%(cmdline)s])', pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def TryAuth(device):
"""Uses anything in ~/.android/ that looks like a key to auth with the device.
Args:
device: The DeviceUtils device to attempt to auth.
Returns:
True if device successfully authed.
"""
possible_keys = glob.glob(os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, '*key'))
if len(possible_keys) <= 1:
logger.warning('Only %d ADB keys available. Not forcing auth.',
len(possible_keys))
return False
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer(keys=possible_keys)
new_state = device.adb.GetState()
if new_state != 'device':
logger.error('Auth failed. Device %s still stuck in %s.', str(device),
new_state)
return False
# It worked! Now register the host's default ADB key on the device so we don't
# have to do all that again.
pub_key = os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, 'adbkey.pub')
if not os.path.exists(pub_key): # This really shouldn't happen.
logger.error('Default ADB key not available at %s.', pub_key)
return False
with open(pub_key) as f:
pub_key_contents = f.read()
try:
device.WriteFile(adb_wrapper.ADB_KEYS_FILE, pub_key_contents, as_root=True)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Unable to write default ADB key to %s.', str(device))
return False
return True
def RecoverDevice(device, denylist, should_reboot=lambda device: True):
if device_status.IsDenylisted(device.adb.GetDeviceSerial(), denylist):
logger.debug('%s is denylisted, skipping recovery.', str(device))
return
if device.adb.GetState() == 'unauthorized' and TryAuth(device):
logger.info('Successfully authed device %s!', str(device))
return
if should_reboot(device):
should_restore_root = device.HasRoot()
try:
device.WaitUntilFullyBooted(retries=0)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception(
'Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logger.warning(
'Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
try:
device.adb.Root()
finally:
# We are already in a failure mode, attempt to reboot regardless of
# what device.adb.Root() returns. If the sysrq reboot fails an
# exception willbe thrown at that level.
device.adb.Shell(
'echo b > /proc/sysrq-trigger',
expect_status=None,
timeout=5,
retries=0)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failed to reboot %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while rebooting %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(
retries=0, timeout=device.REBOOT_DEFAULT_TIMEOUT)
if should_restore_root:
device.EnableRoot()
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failure while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
def RecoverDevices(devices, denylist, enable_usb_reset=False):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
denylist: The current device denylist, which will be used then
reset.
"""
statuses = device_status.DeviceStatus(devices, denylist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status'] or status['adb_status'] in ('offline',
'missing')))
should_restart_adb = should_restart_usb.union(
set(status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_usb.union(
set(status['serial'] for status in statuses if status['denylisted']))
logger.debug('Should restart USB for:')
for d in should_restart_usb:
logger.debug(' %s', d)
logger.debug('Should restart ADB for:')
for d in should_restart_adb:
logger.debug(' %s', d)
logger.debug('Should reboot:')
for d in should_reboot_device:
logger.debug(' %s', d)
if denylist:
denylist.Reset()
if should_restart_adb:
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer()
for serial in should_restart_usb:
try:
# TODO(crbug.com/642194): Resetting may be causing more harm
# (specifically, kernel panics) than it does good.
if enable_usb_reset:
reset_usb.reset_android_usb(serial)
else:
logger.warning('USB reset disabled for %s (crbug.com/642914)', serial)
except IOError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='USB failure')
except device_errors.DeviceUnreachableError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='offline')
device_utils.DeviceUtils.parallel(devices).pMap(
RecoverDevice,
denylist,
should_reboot=lambda device: device.serial in should_reboot_device)
def main():
parser = argparse.ArgumentParser()
logging_common.AddLoggingArguments(parser)
script_common.AddEnvironmentArguments(parser)
parser.add_argument('--denylist-file', help='Device denylist JSON file.')
parser.add_argument(
'--known-devices-file',
action='append',
default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument(
'--enable-usb-reset', action='store_true', help='Reset USB if necessary.')
args = parser.parse_args()
logging_common.InitializeLogging(args)
script_common.InitializeEnvironment(args)
denylist = (device_denylist.Denylist(args.denylist_file)
if args.denylist_file else None)
expected_devices = device_status.GetExpectedDevices(args.known_devices_files)
usb_devices = set(lsusb.get_android_devices())
devices = [
device_utils.DeviceUtils(s) for s in expected_devices.union(usb_devices)
]
RecoverDevices(devices, denylist, enable_usb_reset=args.enable_usb_reset)
if __name__ == '__main__':
sys.exit(main())
| catapult-project/catapult | devil/devil/android/tools/device_recovery.py | Python | bsd-3-clause | 9,284 |
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.hasmethod import hasmethod
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class IsDictContainingEntries(BaseMatcher):
def __init__(self, value_matchers):
self.value_matchers = sorted(value_matchers.items())
def _not_a_dictionary(self, dictionary, mismatch_description):
if mismatch_description:
mismatch_description.append_description_of(dictionary) \
.append_text(' is not a mapping object')
return False
def matches(self, dictionary, mismatch_description=None):
for key, value_matcher in self.value_matchers:
try:
if not key in dictionary:
if mismatch_description:
mismatch_description.append_text('no ') \
.append_description_of(key) \
.append_text(' key in ') \
.append_description_of(dictionary)
return False
except TypeError:
return self._not_a_dictionary(dictionary, mismatch_description)
try:
actual_value = dictionary[key]
except TypeError:
return self._not_a_dictionary(dictionary, mismatch_description)
if not value_matcher.matches(actual_value):
if mismatch_description:
mismatch_description.append_text('value for ') \
.append_description_of(key) \
.append_text(' ')
value_matcher.describe_mismatch(actual_value, mismatch_description)
return False
return True
def describe_mismatch(self, item, mismatch_description):
self.matches(item, mismatch_description)
def describe_keyvalue(self, index, value, description):
"""Describes key-value pair at given index."""
description.append_description_of(index) \
.append_text(': ') \
.append_description_of(value)
def describe_to(self, description):
description.append_text('a dictionary containing {')
first = True
for key, value in self.value_matchers:
if not first:
description.append_text(', ')
self.describe_keyvalue(key, value, description)
first = False
description.append_text('}')
def has_entries(*keys_valuematchers, **kv_args):
"""Matches if dictionary contains entries satisfying a dictionary of keys
and corresponding value matchers.
:param matcher_dict: A dictionary mapping keys to associated value matchers,
or to expected values for
:py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Note that the keys must be actual keys, not matchers. Any value argument
that is not a matcher is implicitly wrapped in an
:py:func:`~hamcrest.core.core.isequal.equal_to` matcher to check for
equality.
Examples::
has_entries({'foo':equal_to(1), 'bar':equal_to(2)})
has_entries({'foo':1, 'bar':2})
``has_entries`` also accepts a list of keyword arguments:
.. function:: has_entries(keyword1=value_matcher1[, keyword2=value_matcher2[, ...]])
:param keyword1: A keyword to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries(foo=equal_to(1), bar=equal_to(2))
has_entries(foo=1, bar=2)
Finally, ``has_entries`` also accepts a list of alternating keys and their
value matchers:
.. function:: has_entries(key1, value_matcher1[, ...])
:param key1: A key (not a matcher) to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries('foo', equal_to(1), 'bar', equal_to(2))
has_entries('foo', 1, 'bar', 2)
"""
if len(keys_valuematchers) == 1:
try:
base_dict = keys_valuematchers[0].copy()
for key in base_dict:
base_dict[key] = wrap_matcher(base_dict[key])
except AttributeError:
raise ValueError('single-argument calls to has_entries must pass a dict as the argument')
else:
if len(keys_valuematchers) % 2:
raise ValueError('has_entries requires key-value pairs')
base_dict = {}
for index in range(int(len(keys_valuematchers) / 2)):
base_dict[keys_valuematchers[2 * index]] = wrap_matcher(keys_valuematchers[2 * index + 1])
for key, value in kv_args.items():
base_dict[key] = wrap_matcher(value)
return IsDictContainingEntries(base_dict)
| msabramo/PyHamcrest | src/hamcrest/library/collection/isdict_containingentries.py | Python | bsd-3-clause | 5,168 |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| jamfang/Agora-WebRTC-Live-Broadcasting-Demo | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py | Python | mit | 17,013 |
"""
Downloads bootloader content for all arches for when the user doesn't want to supply their own.
Copyright 2009, Red Hat, Inc
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import urlgrabber
import clogger
class ContentDownloader:
def __init__(self,config,logger=None):
"""
Constructor
"""
self.config = config
self.settings = config.settings()
if logger is None:
logger = clogger.Logger()
self.logger = logger
def run(self,force=False):
"""
Download bootloader content for all of the latest bootloaders, since the user
has chosen to not supply their own. You may ask "why not get this from yum", though
Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and
further do not want folks to have to install a cross compiler. For those that don't like this approach
they can still source their cross-arch bootloader content manually.
"""
content_server = "http://mdehaan.fedorapeople.org/loaders"
dest = "/var/lib/cobbler/loaders"
files = (
( "%s/README" % content_server, "%s/README" % dest ),
( "%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest ),
( "%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ),
( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
( "%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest),
( "%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest),
)
self.logger.info("downloading content required to netboot all arches")
for f in files:
src = f[0]
dst = f[1]
if os.path.exists(dst) and not force:
self.logger.info("path %s already exists, not overwriting existing content, use --force if you wish to update" % dst)
continue
self.logger.info("downloading %s to %s" % (src,dst))
urlgrabber.urlgrab(src,dst)
return True
| elsonrodriguez/madhatter | cobbler/action_dlcontent.py | Python | gpl-2.0 | 2,907 |
#
# The patch applied in http://bugs.python.org/issue1207589
# changes the structure of rmenu_specs in EditorWindow.py. This breaks a lot of extensions.
# This file is a re-factoring of rmenu code for other extensions to use.
#
| technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/idlexlib/extensions/_rmenu.py | Python | gpl-3.0 | 231 |
"""
Default settings for the ``mezzanine.generic`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
generic_comments = getattr(settings, "COMMENTS_APP", "") == "mezzanine.generic"
if generic_comments:
register_setting(
name="COMMENTS_ACCOUNT_REQUIRED",
label=_("Accounts required for commenting"),
description=_("If ``True``, users must log in to comment."),
editable=True,
default=False,
)
register_setting(
name="COMMENTS_DISQUS_SHORTNAME",
label=_("Disqus shortname"),
description=_("Shortname for the http://disqus.com comments "
"service."),
editable=True,
default="",
)
register_setting(
name="COMMENTS_DISQUS_API_PUBLIC_KEY",
label=_("Disqus public key"),
description=_("Public key for http://disqus.com developer API"),
editable=True,
default="",
)
register_setting(
name="COMMENTS_DISQUS_API_SECRET_KEY",
label=_("Disqus secret key"),
description=_("Secret key for http://disqus.com developer API"),
editable=True,
default="",
)
register_setting(
name="COMMENTS_DEFAULT_APPROVED",
label=_("Auto-approve comments"),
description=_("If ``True``, built-in comments are approved by "
"default."),
editable=True,
default=True,
)
register_setting(
name="COMMENT_FILTER",
description=_("Dotted path to the function to call on a comment's "
"value before it is rendered to the template."),
editable=False,
default=None,
)
register_setting(
name="COMMENTS_NOTIFICATION_EMAILS",
label=_("Comment notification email addresses"),
description=_("A comma separated list of email addresses that "
"will receive an email notification each time a "
"new comment is posted on the site."),
editable=True,
default="",
)
register_setting(
name="COMMENTS_NUM_LATEST",
label=_("Admin comments"),
description=_("Number of latest comments shown in the admin "
"dashboard."),
editable=True,
default=5,
)
register_setting(
name="COMMENTS_UNAPPROVED_VISIBLE",
label=_("Show unapproved comments"),
description=_("If ``True``, comments that have ``is_public`` "
"unchecked will still be displayed, but replaced with a "
"``waiting to be approved`` message."),
editable=True,
default=True,
)
register_setting(
name="COMMENTS_REMOVED_VISIBLE",
label=_("Show removed comments"),
description=_("If ``True``, comments that have ``removed`` "
"checked will still be displayed, but replaced "
"with a ``removed`` message."),
editable=True,
default=True,
)
register_setting(
name="COMMENTS_USE_RATINGS",
description=_("If ``True``, comments can be rated."),
editable=False,
default=True,
)
register_setting(
name="RATINGS_ACCOUNT_REQUIRED",
label=_("Accounts required for rating"),
description=_("If ``True``, users must log in to rate content "
"such as blog posts and comments."),
editable=True,
default=False,
)
register_setting(
name="RATINGS_RANGE",
description=_("A sequence of integers that are valid ratings."),
editable=False,
default=range(getattr(settings, "RATINGS_MIN", 1),
getattr(settings, "RATINGS_MAX", 5) + 1),
)
| orlenko/bccf | src/mezzanine/generic/defaults.py | Python | unlicense | 4,223 |
import glob
import logging
import os
from typing import Any, Dict, List, Optional
from django.conf import settings
from zerver.lib.storage import static_path
# See https://jackstromberg.com/2013/01/useraccountcontrol-attributeflag-values/
# for docs on what these values mean.
LDAP_USER_ACCOUNT_CONTROL_NORMAL = "512"
LDAP_USER_ACCOUNT_CONTROL_DISABLED = "514"
def generate_dev_ldap_dir(mode: str, num_users: int = 8) -> Dict[str, Dict[str, Any]]:
mode = mode.lower()
ldap_data = []
for i in range(1, num_users + 1):
name = f"LDAP User {i}"
email = f"ldapuser{i}@zulip.com"
phone_number = f"999999999{i}"
birthdate = f"19{i:02}-{i:02}-{i:02}"
ldap_data.append((name, email, phone_number, birthdate))
profile_images = []
for path in glob.glob(os.path.join(static_path("images/team"), "*")):
with open(path, "rb") as f:
profile_images.append(f.read())
ldap_dir = {}
for i, user_data in enumerate(ldap_data):
email = user_data[1].lower()
email_username = email.split("@")[0]
common_data = {
"cn": [user_data[0]],
"userPassword": [email_username],
"phoneNumber": [user_data[2]],
"birthDate": [user_data[3]],
}
if mode == "a":
ldap_dir["uid=" + email + ",ou=users,dc=zulip,dc=com"] = dict(
uid=[email],
thumbnailPhoto=[profile_images[i % len(profile_images)]],
userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL],
**common_data,
)
elif mode == "b":
ldap_dir["uid=" + email_username + ",ou=users,dc=zulip,dc=com"] = dict(
uid=[email_username],
jpegPhoto=[profile_images[i % len(profile_images)]],
**common_data,
)
elif mode == "c":
ldap_dir["uid=" + email_username + ",ou=users,dc=zulip,dc=com"] = dict(
uid=[email_username], email=[email], **common_data
)
return ldap_dir
def init_fakeldap(
directory: Optional[Dict[str, Dict[str, List[str]]]] = None
) -> None: # nocoverage
# We only use this in development. Importing mock inside
# this function is an import time optimization, which
# avoids the expensive import of the mock module (slow
# because its dependency pbr uses pkgresources, which is
# really slow to import.)
from unittest import mock
from fakeldap import MockLDAP
# Silent `django_auth_ldap` logger in dev mode to avoid
# spammy user not found log messages.
ldap_auth_logger = logging.getLogger("django_auth_ldap")
ldap_auth_logger.setLevel(logging.CRITICAL)
fakeldap_logger = logging.getLogger("fakeldap")
fakeldap_logger.setLevel(logging.CRITICAL)
ldap_patcher = mock.patch("django_auth_ldap.config.ldap.initialize")
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = directory or generate_dev_ldap_dir(
settings.FAKE_LDAP_MODE, settings.FAKE_LDAP_NUM_USERS
)
| rht/zulip | zerver/lib/dev_ldap_directory.py | Python | apache-2.0 | 3,149 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add CycleTaskGroupObject.object
Revision ID: 26d9c9c91542
Revises: 19a67dc67c3
Create Date: 2014-07-15 21:49:34.073412
"""
# revision identifiers, used by Alembic.
revision = '26d9c9c91542'
down_revision = '19a67dc67c3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('cycle_task_group_objects', sa.Column('object_id', sa.Integer(), nullable=False))
op.add_column('cycle_task_group_objects', sa.Column('object_type', sa.String(length=250), nullable=False))
op.execute('''
UPDATE cycle_task_group_objects
JOIN task_group_objects
ON cycle_task_group_objects.task_group_object_id = task_group_objects.id
SET
cycle_task_group_objects.object_id = task_group_objects.object_id,
cycle_task_group_objects.object_type = task_group_objects.object_type;
''')
def downgrade():
op.drop_column('cycle_task_group_objects', 'object_type')
op.drop_column('cycle_task_group_objects', 'object_id')
| VinnieJohns/ggrc-core | src/ggrc_workflows/migrations/versions/20140715214934_26d9c9c91542_add_cycletaskgroupobject_object.py | Python | apache-2.0 | 1,141 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for attention functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders.attention import AttentionLayerDot
from seq2seq.decoders.attention import AttentionLayerBahdanau
class AttentionLayerTest(tf.test.TestCase):
"""
Tests the AttentionLayer module.
"""
def setUp(self):
super(AttentionLayerTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 8
self.attention_dim = 128
self.input_dim = 16
self.seq_len = 10
self.state_dim = 32
def _create_layer(self):
"""Creates the attention layer. Should be implemented by child classes"""
raise NotImplementedError
def _test_layer(self):
"""Tests Attention layer with a given score type"""
inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim))
inputs_length_pl = tf.placeholder(tf.int32, [None])
state_pl = tf.placeholder(tf.float32, (None, self.state_dim))
attention_fn = self._create_layer()
scores, context = attention_fn(
query=state_pl,
keys=inputs_pl,
values=inputs_pl,
values_length=inputs_length_pl)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len,
self.input_dim)
feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim)
feed_dict[inputs_length_pl] = np.arange(self.batch_size) + 1
scores_, context_ = sess.run([scores, context], feed_dict)
np.testing.assert_array_equal(scores_.shape,
[self.batch_size, self.seq_len])
np.testing.assert_array_equal(context_.shape,
[self.batch_size, self.input_dim])
for idx, batch in enumerate(scores_, 1):
# All scores that are padded should be zero
np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:]))
# Scores should sum to 1
scores_sum = np.sum(scores_, axis=1)
np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest):
"""Tests the AttentionLayerDot class"""
def _create_layer(self):
return AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest):
"""Tests the AttentionLayerBahdanau class"""
def _create_layer(self):
return AttentionLayerBahdanau(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
if __name__ == "__main__":
tf.test.main()
| shashankrajput/seq2seq | seq2seq/test/attention_test.py | Python | apache-2.0 | 3,532 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = map_fn.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = map_fn.map_fn(_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = map_fn.map_fn(_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
rewrite_options.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
config.graph_options.optimizer_options.opt_level = -1
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.num_cores = 24
named_device.properties.frequency = 1000
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
@test_util.for_all_test_methods(test_util.no_xla_auto_jit,
'Test does not apply in XLA setting')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
self.evaluate(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
@test_util.deprecated_graph_mode_only
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
@test_util.deprecated_graph_mode_only
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBinaryOpSecondPort(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNormV3-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testGradient(self):
meta_graph = _simple_metagraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
@test_util.deprecated_graph_mode_only
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| ghchinoy/tensorflow | tensorflow/python/grappler/layout_optimizer_test.py | Python | apache-2.0 | 60,128 |
"""Support for switches through the SmartThings cloud API."""
from __future__ import annotations
from collections.abc import Sequence
from pysmartthings import Capability
from homeassistant.components.switch import SwitchEntity
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add switches for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsSwitch(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "switch")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
# Must be able to be turned on/off.
if Capability.switch in capabilities:
return [Capability.switch, Capability.energy_meter, Capability.power_meter]
return None
class SmartThingsSwitch(SmartThingsEntity, SwitchEntity):
"""Define a SmartThings switch."""
async def async_turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the switch on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
| jawilson/home-assistant | homeassistant/components/smartthings/switch.py | Python | apache-2.0 | 1,911 |
#!/usr/bin/env python
"""
split_file.py [-o <dir>] <path>
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
"""
import getopt
import os
import re
import sys
def usage():
sys.stderr.write(__doc__.strip() + "\n")
sys.exit(1)
fp_out = None
dest_dir = '.'
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:h')
for (opt, arg) in opts:
if opt == '-o':
dest_dir = arg
elif opt == '-h':
usage()
except getopt.GetoptError:
usage()
if len(args) != 1:
usage()
fp_in = open(args[0], 'r')
for line in fp_in:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(dest_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
fp_in.close()
if fp_out:
fp_out.close()
| khizkhiz/swift | utils/split_file.py | Python | apache-2.0 | 1,030 |
#!/usr/bin/env python
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
from mi.core.log import get_logger
from mi.core.versioning import version
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.mmp_cds_base import MmpCdsParser
log = get_logger()
__author__ = 'Joe Padula'
@version("0.0.3")
def parse(unused, source_file_path, particle_data_handler):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_mmp_cds',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaAbcdjmMmpCdsParserDataParticle'
}
def exception_callback(exception):
log.debug("ERROR: %r", exception)
particle_data_handler.setParticleDataCaptureFailure()
with open(source_file_path, 'rb') as stream_handle:
parser = MmpCdsParser(parser_config, stream_handle, exception_callback)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
| janeen666/mi-instrument | mi/dataset/driver/dosta_abcdjm/mmp_cds/dosta_abcdjm_mmp_cds_recovered_driver.py | Python | bsd-2-clause | 1,067 |
# $Id$
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import vtk
class stlWRT(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
# need to make sure that we're all happy triangles and stuff
self._cleaner = vtk.vtkCleanPolyData()
self._tf = vtk.vtkTriangleFilter()
self._tf.SetInput(self._cleaner.GetOutput())
self._writer = vtk.vtkSTLWriter()
self._writer.SetInput(self._tf.GetOutput())
# sorry about this, but the files get REALLY big if we write them
# in ASCII - I'll make this a gui option later.
#self._writer.SetFileTypeToBinary()
# following is the standard way of connecting up the devide progress
# callback to a VTK object; you should do this for all objects in
mm = self._module_manager
for textobj in (('Cleaning data', self._cleaner),
('Converting to triangles', self._tf),
('Writing STL data', self._writer)):
module_utils.setup_vtk_object_progress(self, textobj[1],
textobj[0])
# ctor for this specific mixin
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'STL data (*.stl)|*.stl|All files (*)|*',
{'vtkSTLWriter': self._writer},
fileOpen=False)
# set up some defaults
self._config.filename = ''
self.sync_module_logic_with_config()
def close(self):
# we should disconnect all inputs
self.set_input(0, None)
del self._writer
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ('vtkPolyData',)
def set_input(self, idx, input_stream):
self._cleaner.SetInput(input_stream)
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise Exception
def logic_to_config(self):
filename = self._writer.GetFileName()
if filename == None:
filename = ''
self._config.filename = filename
def config_to_logic(self):
self._writer.SetFileName(self._config.filename)
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
if len(self._writer.GetFileName()):
self._writer.Write()
| nagyistoce/devide | modules/writers/stlWRT.py | Python | bsd-3-clause | 2,674 |
__version__=''' $Id'''
__doc__='''basic tests.'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import unittest
def getrc(defns,depth=1):
from sys import getrefcount, _getframe
f = _getframe(depth)
G0 = f.f_globals
L = f.f_locals
if L is not G0:
LL = [L]
while 1:
f = f.f_back
G = f.f_globals
L = f.f_locals
if G is not G0 or G is L: break
LL.append(L)
L = {}
LL.reverse()
for l in LL:
L.update(l)
else:
L = L.copy()
G0 = G0.copy()
return ' '.join([str(getrefcount(eval(x,L,G0))-1) for x in defns.split()])
def checkrc(defns,rcv0):
rcv1 = getrc(defns,2)
return ' '.join(["%s %s-->%s" % (x,v,w) for x,v,w in zip(defns.split(),rcv0.split(),rcv1.split()) if v!=w])
class RlAccelTestCase(unittest.TestCase):
def testFpStr(self):
# should give siz decimal places if less than 1.
# if more, give up to seven sig figs
from _rl_accel import fp_str
assert fp_str(1,2,3)=='1 2 3'
assert fp_str(1) == '1'
assert fp_str(595.275574) == '595.2756'
assert fp_str(59.5275574) == '59.52756'
assert fp_str(5.95275574) == '5.952756'
def test_AsciiBase85Encode(self):
from _rl_accel import _AsciiBase85Encode
assert _AsciiBase85Encode('Dragan Andric')=='6ul^K@;[2RDIdd%@f~>'
def test_AsciiBase85Decode(self):
from _rl_accel import _AsciiBase85Decode
assert _AsciiBase85Decode('6ul^K@;[2RDIdd%@f~>')=='Dragan Andric'
def testEscapePDF(self):
from _rl_accel import escapePDF
assert escapePDF('(test)')=='\\(test\\)'
def test_instanceEscapePDF(self):
from _rl_accel import _instanceEscapePDF
assert _instanceEscapePDF('', '(test)')=='\\(test\\)'
def testCalcChecksum(self):
from _rl_accel import calcChecksum
assert calcChecksum('test')==1952805748
def test_instanceStringWidth(self):
from reportlab.pdfbase.pdfmetrics import registerFont, getFont, _fonts, unicode2T1
from reportlab.pdfbase.ttfonts import TTFont
ttfn = 'Vera'
t1fn = 'Times-Roman'
registerFont(TTFont(ttfn, "Vera.ttf"))
ttf = getFont(ttfn)
t1f = getFont(t1fn)
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
enc='cp1252'
senc = 'utf8'
ts = 'ABCDEF\xce\x91\xce\xb2G'
utext = 'ABCDEF\xce\x91\xce\xb2G'.decode(senc)
fontSize = 12
defns="ttfn t1fn ttf t1f testCp1252 enc senc ts utext fontSize ttf.face ttf.face.charWidths ttf.face.defaultWidth t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
def tfunc(f,ts,fontSize,enc):
w1 = f.stringWidth(ts,fontSize,enc)
w2 = f._py_stringWidth(ts,fontSize,enc)
assert abs(w1-w2)<1e-10,"f(%r).stringWidthU(%r,%s,%r)-->%r != f._py_stringWidth(...)-->%r" % (f,ts,fontSize,enc,w1,w2)
tfunc(t1f,testCp1252,fontSize,enc)
tfunc(t1f,ts,fontSize,senc)
tfunc(t1f,utext,fontSize,senc)
tfunc(ttf,ts,fontSize,senc)
tfunc(ttf,testCp1252,fontSize,enc)
tfunc(ttf,utext,fontSize,senc)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_unicode2T1(self):
from reportlab.pdfbase.pdfmetrics import _py_unicode2T1, getFont, _fonts
from _rl_accel import unicode2T1
t1fn = 'Times-Roman'
t1f = getFont(t1fn)
enc = 'cp1252'
senc = 'utf8'
testCp1252 = ('copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))).decode(enc)
utext = 'This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86. This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86'.decode('utf8')
def tfunc(f,ts):
w1 = unicode2T1(ts,[f]+f.substitutionFonts)
w2 = _py_unicode2T1(ts,[f]+f.substitutionFonts)
assert w1==w2,"%r != %r" % (w1,w2)
defns="t1fn t1f testCp1252 enc senc utext t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
tfunc(t1f,testCp1252)
tfunc(t1f,utext)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_sameFrag(self):
from _rl_accel import _sameFrag
class ABag:
def __init__(self,**kwd):
self.__dict__.update(kwd)
def __str__(self):
V=['%s=%r' % v for v in self.__dict__.items()]
V.sort()
return 'ABag(%s)' % ','.join(V)
a=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
b=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
for name in ("fontName", "fontSize", "textColor", "rise", "underline", "strike", "link"):
old = getattr(a,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,None)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(a,name)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(b,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,old)
setattr(b,name,old)
def makeSuite():
# only run the tests if _rl_accel is present
try:
import _rl_accel
Klass = RlAccelTestCase
except:
class Klass(unittest.TestCase):
pass
return makeSuiteForClasses(Klass)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| mattjmorrison/ReportLab | tests/test_rl_accel.py | Python | bsd-3-clause | 6,284 |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import warnings
from collections import Iterator, Mapping, OrderedDict
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.query_utils import Q, PathInfo, refs_expression
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, QUERY_TERMS, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None, context=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
self.context = context or {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params, context=self.context.copy())
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %s>" % self
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = dict((key, adapter(val)) for key, val in six.iteritems(self.params))
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query(object):
"""
A single SQL query.
"""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A list of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = []
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
self.context = {}
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
@property
def aggregates(self):
warnings.warn(
"The aggregates property is deprecated. Use annotations instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.annotations
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def _prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
elif self.group_by is True:
obj.group_by = True
else:
obj.group_by = self.group_by[:]
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.values_select = self.values_select[:]
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.max_depth = self.max_depth
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
if 'alias_prefix' in self.__dict__:
obj.alias_prefix = self.alias_prefix
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
obj.context = self.context.copy()
return obj
def add_context(self, key, value):
self.context[key] = value
def get_context(self, key, default=None):
return self.context.get(key, default)
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, Col):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, then those operations must be
# done in a subquery so that we are aggregating on the limit and/or
# distinct results instead of applying the distinct and limit after the
# aggregation.
if (isinstance(self.group_by, list) or has_limit or has_existing_annotations or
self.distinct):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.tables}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == [] and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = []
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None for q in outer_query.annotation_select.items()]
converters = compiler.get_converters(outer_query.annotation_select.values())
result = compiler.apply_converters(result, converters)
return {
alias: val
for (alias, annotation), val
in zip(outer_query.annotation_select.items(), result)
}
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
self.add_select(col.relabeled_clone(change_map))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field in model._meta.fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias
and self.alias_map[parent_alias].join_type == LOUTER)
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].parent_alias == alias
and join not in aliases))
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, list):
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [col.relabeled_clone(change_map) for col in self.select]
if self._annotations:
self._annotations = OrderedDict(
(key, relabel_column(col)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in six.iteritems(change_map):
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generates a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.tables):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3. All new
joins are created as LOUTER if nullable is True.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
reuse = [a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join]
if reuse:
self.ref_alias(reuse[0])
return reuse[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if chain is None:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def add_aggregate(self, aggregate, model, alias, is_summary):
warnings.warn(
"add_aggregate() is deprecated. Use add_annotation() instead.",
RemovedInDjango20Warning, stacklevel=2)
self.add_annotation(aggregate, alias, is_summary)
def add_annotation(self, annotation, alias, is_summary=False):
"""
Adds a single annotation expression to the Query
"""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def prepare_lookup_value(self, value, lookups, can_reuse, allow_joins=True):
# Default lookup if none given is exact.
used_joins = []
if len(lookups) == 0:
lookups = ['exact']
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookups[-1] not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
lookups[-1] = 'isnull'
value = True
elif hasattr(value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
used_joins = [k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)]
# Subqueries need to use a different set of aliases than the
# outer query. Call bump_prefix to change aliases of the inner
# query (the value).
if hasattr(value, 'query') and hasattr(value.query, 'bump_prefix'):
value = value._clone()
value.query.bump_prefix(self)
if hasattr(value, 'bump_prefix'):
value = value.clone()
value.bump_prefix(self)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookups[-1] == 'exact' and value == ''):
value = True
lookups[-1] = 'isnull'
return value, lookups, used_joins
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
aggregate, aggregate_lookups = refs_expression(lookup_splitted, self.annotations)
if aggregate:
return aggregate_lookups, (), aggregate
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) == 0:
lookup_parts = ['exact']
elif len(lookup_parts) > 1:
if not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__))
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not (value._meta.concrete_model == opts.concrete_model
or opts.concrete_model in value._meta.get_parent_list()
or value._meta.concrete_model in opts.get_parent_list()):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
if field.is_relation:
# QuerySets implement is_compatible_query_object_type() to
# determine compatibility with the given field.
if hasattr(value, 'is_compatible_query_object_type'):
if not value.is_compatible_query_object_type(opts):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.model_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts)
def build_lookup(self, lookups, lhs, rhs):
"""
Tries to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
lookups = lookups[:]
while lookups:
name = lookups[0]
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = self.try_transform(lhs, name, lookups)
final_lookup = lhs.get_lookup('exact')
return final_lookup(lhs, rhs)
lhs = self.try_transform(lhs, name, lookups)
lookups = lookups[1:]
def try_transform(self, lhs, name, rest_of_lookups):
"""
Helper method for build_lookup. Tries to fetch and initialize
a transform for name parameter from lhs.
"""
next = lhs.get_transform(name)
if next:
return next(lhs, rest_of_lookups)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND, allow_joins=True, split_subq=True):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_aggregate = self.solve_lookup_type(arg)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, lookups, used_joins = self.prepare_lookup_value(value, lookups, can_reuse, allow_joins)
clause = self.where_class()
if reffed_aggregate:
condition = self.build_lookup(lookups, reffed_aggregate, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(field, value, opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if can_reuse is not None:
can_reuse.update(join_list)
used_joins = set(used_joins).union(set(join_list))
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if field.is_relation:
# No support for transforms for relational fields
assert len(lookups) == 1
lookup_class = field.get_lookup(lookups[0])
if len(targets) == 1:
lhs = targets[0].get_col(alias, field)
else:
lhs = MultiColSource(alias, targets, sources, field)
condition = lookup_class(lhs, value)
lookup_type = lookup_class.lookup_name
else:
col = targets[0].get_col(alias, field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and value is True and not current_negated
if current_negated and (lookup_type != 'isnull' or value is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, sources[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = set(
(a for a in self.alias_map if self.alias_map[a].join_type == INNER))
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, connector=connector,
allow_joins=allow_joins, split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the list of names and turns them into PathInfo tuples. Note that
a single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field). Finally, the method returns
those names that weren't found (which are likely transforms and the
final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
try:
field = opts.get_field(name)
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
model = field.model._meta.concrete_model
except FieldDoesNotExist:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(
PathInfo(final_field.model._meta, opts, targets, final_field, False, True)
)
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
field, sources, opts, join_list, path = self.setup_joins(
field_list, self.get_meta(),
self.get_initial_alias(), reuse)
targets, _, join_list = self.trim_joins(sources, join_list, path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], sources[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.values_select = []
def add_select(self, col):
self.default_cols = False
self.select.append(col)
def set_select(self, cols):
self.default_cols = False
self.select = cols
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
_, targets, _, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(targets, joins, path)
for target in targets:
self.add_select(target.get_col(final_alias))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra)
+ list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col in self.select:
self.group_by.append(col)
if self._annotations:
for alias, annotation in six.iteritems(self.annotations):
for col in annotation.get_group_by_cols():
self.group_by.append(col)
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = {f.attname for f in fields}
def set_aggregate_mask(self, names):
warnings.warn(
"set_aggregate_mask() is deprecated. Use set_annotation_mask() instead.",
RemovedInDjango20Warning, stacklevel=2)
self.set_annotation_mask(names)
def set_annotation_mask(self, names):
"Set the mask of annotations that will actually be returned by the SELECT"
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_aggregate_mask(self, names):
warnings.warn(
"append_aggregate_mask() is deprecated. Use append_annotation_mask() instead.",
RemovedInDjango20Warning, stacklevel=2)
self.append_annotation_mask(names)
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(set(names).union(self.annotation_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
@property
def annotation_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def aggregate_select(self):
warnings.warn(
"aggregate_select() is deprecated. Use annotation_select() instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.annotation_select
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.tables:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter(object):
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.outer_votes = {}
self.inner_votes = {}
def add_votes(self, inner_votes):
"""
Add single vote per item to self.inner_votes. Parameter can be any
iterable.
"""
for voted in inner_votes:
self.inner_votes[voted] = self.inner_votes.get(voted, 0) + 1
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.inner_votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| gannetson/django | django/db/models/sql/query.py | Python | bsd-3-clause | 92,461 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting [class]
module.add_class('Ipv4NixVectorRouting', parent=root_module['ns3::Ipv4RoutingProtocol'])
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >', 'ns3::NixMap_t')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >*', 'ns3::NixMap_t*')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >&', 'ns3::NixMap_t&')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >', 'ns3::Ipv4RouteMap_t')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >*', 'ns3::Ipv4RouteMap_t*')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >&', 'ns3::Ipv4RouteMap_t&')
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Ipv4NixVectorRouting_methods(root_module, root_module['ns3::Ipv4NixVectorRouting'])
return
def register_Ns3Ipv4NixVectorRouting_methods(root_module, cls):
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting(ns3::Ipv4NixVectorRouting const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4NixVectorRouting const &', 'arg0')])
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting() [constructor]
cls.add_constructor([])
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::FlushGlobalNixRoutingCache() [member function]
cls.add_method('FlushGlobalNixRoutingCache',
'void',
[])
## ipv4-nix-vector-routing.h: static ns3::TypeId ns3::Ipv4NixVectorRouting::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True, visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: bool ns3::Ipv4NixVectorRouting::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4NixVectorRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| annegabrielle/secure_adhoc_network_ns-3 | ns3_source_code/ns-3.10/bindings/python/apidefs/gcc-LP64/ns3_module_nix_vector_routing.py | Python | gpl-2.0 | 11,439 |
# -*- coding: utf-8 -*-
from ..internal.XFSAccount import XFSAccount
class ExashareCom(XFSAccount):
__name__ = "ExashareCom"
__type__ = "account"
__version__ = "0.06"
__status__ = "testing"
__description__ = """Exashare.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
PLUGIN_DOMAIN = "exashare.com"
| Arno-Nymous/pyload | module/plugins/accounts/ExashareCom.py | Python | gpl-3.0 | 388 |
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2011 Krzysztof Tarnowski ([email protected])
# Copyright (C) 2009, 2010, 2011 OpenHatch, Inc.
# Copyright (C) 2011 Jairo E. Lopez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import django.test
from django.core.urlresolvers import reverse
import twill
from twill import commands as tc
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles.handlers import StaticFilesHandler
from StringIO import StringIO
from django.test.client import Client
import os
import os.path
import subprocess
from django.core.cache import cache
from django.conf import settings
import mock
import datetime
import logging
from django.utils import unittest
from django.utils.unittest import expectedFailure
import mysite.base.view_helpers
import mysite.base.decorators
import mysite.search.models
import mysite.base.templatetags.base_extras
import mysite.base.unicode_sanity
import mysite.profile.views
import mysite.base.views
import mysite.project.views
import mysite.settings
import mysite.base.management.commands.nagios
import mysite.profile.management.commands.send_emails
logger = logging.getLogger(__name__)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def better_make_twill_url(url):
return make_twill_url(url.replace('+', '%2B'))
def twill_goto_view(view_name, kwargs):
url = "http://openhatch.org" + reverse(view_name, kwargs=kwargs)
tc.go(better_make_twill_url(url))
mock_get = mock.Mock()
mock_get.return_value = None
class TwillTests(django.test.TestCase):
@staticmethod
def _twill_setup():
app = StaticFilesHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
@staticmethod
def _twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
'''Some basic methods needed by other testing classes.'''
def setUp(self):
self.real_get = django.core.cache.cache.get
django.core.cache.cache.get = mock_get
self.old_dbe = settings.DEBUG_PROPAGATE_EXCEPTIONS
settings.DEBUG_PROPAGATE_EXCEPTIONS = True
TwillTests._twill_setup()
TwillTests._twill_quiet()
def tearDown(self):
# If you get an error on one of these lines,
# maybe you didn't run base.TwillTests.setUp?
settings.DEBUG_PROPAGATE_EXCEPTIONS = self.old_dbe
twill.remove_wsgi_intercept('127.0.0.1', 8080)
tc.reset_browser()
django.core.cache.cache.get = self.real_get
def login_with_twill(self):
# Visit login page
login_url = 'http://openhatch.org/account/login/old'
tc.go(make_twill_url(login_url))
# Log in
username = "paulproteus"
password = "paulproteus's unbreakable password"
tc.fv('login', 'username', username)
tc.fv('login', 'password', password)
tc.submit()
def login_with_client(self, username='paulproteus',
password="paulproteus's unbreakable password"):
client = Client()
success = client.login(username=username,
password=password)
self.assertTrue(success)
return client
def login_with_client_as_barry(self):
return self.login_with_client(username='barry', password='parallelism')
class MySQLRegex(TwillTests):
def test_escape(self):
before2after = {
'n': '[n]',
']': ']',
'[n': '[[][n]'
}
for before, after in before2after.items():
self.assertEqual(
mysite.base.view_helpers.mysql_regex_escape(before),
after)
class TestUriDataHelper(TwillTests):
def test(self):
request = mysite.base.view_helpers.ObjectFromDict({
'is_secure': lambda: True,
'META': {'SERVER_PORT': '443',
'SERVER_NAME': 'name'}})
data = ((mysite.base.view_helpers.
get_uri_metadata_for_generating_absolute_links(request)))
self.assertEqual(data, {'uri_scheme': 'https',
'url_prefix': 'name'})
class GeocoderCanGeocode(TwillTests):
def get_geocoding_in_json_for_unicode_string(self):
unicode_str = u'Bark\xe5ker, T\xf8nsberg, Vestfold, Norway'
# Just exercise the geocoder and ensure it doesn't blow up.
return mysite.base.view_helpers.cached_geocoding_in_json(unicode_str)
def test_unicode_string(self):
self.get_geocoding_in_json_for_unicode_string()
class RemoveByteOrderMarker(unittest.TestCase):
def test(self):
sample_bytes = '\xef\xbb\xbf' + 'hi'
as_fd = StringIO(sample_bytes)
self.assertNotEqual('hi', as_fd.read())
as_fd = StringIO(sample_bytes)
cleaned_up_fd = (
mysite.base.unicode_sanity.wrap_file_object_in_utf8_check(as_fd))
result = cleaned_up_fd.read()
self.assertEqual(type(result), str) # not unicode
self.assertEqual(result, 'hi')
class GeocoderCanCache(django.test.TestCase):
unicode_address = u'Bark\xe5ker, T\xf8nsberg, Vestfold, Norway'
def get_geocoding_in_json_for_unicode_string(self):
# Just exercise the geocoder and ensure it doesn't blow up.
return mysite.base.view_helpers.cached_geocoding_in_json(
self.unicode_address)
mock_geocoder = mock.Mock()
@mock.patch("mysite.base.view_helpers._geocode", mock_geocoder)
def test_unicode_strings_get_cached(self):
# Let's make sure that the first time, this runs with original_json,
# that the cache is empty, and we populate it with original_json.
cache.delete(
mysite.base.view_helpers.address2cache_key_name(
self.unicode_address))
# NOTE This test uses django.tests.TestCase to skip our
# monkey-patching of the cache framework
# When the geocoder's results are being cached properly,
# the base controller named '_geocode' will not run more than once.
original_json = "{'key': 'original value'}"
different_json = (
"{'key': 'if caching works we should never get this value'}")
self.mock_geocoder.return_value = eval(original_json)
self.assertTrue(
'original value' in
self.get_geocoding_in_json_for_unicode_string())
self.mock_geocoder.return_value = eval(different_json)
try:
json = self.get_geocoding_in_json_for_unicode_string()
self.assertTrue('original value' in json)
except AssertionError:
raise AssertionError(
"Geocoded location in json was not cached; it now equals "
+ json)
class TestUnicodifyDecorator(TwillTests):
def test(self):
utf8_data = u'\xc3\xa9'.encode('utf-8') # é
@mysite.base.decorators.unicodify_strings_when_inputted
def sample_thing(arg):
self.assertEqual(type(arg), unicode)
sample_thing(utf8_data)
class Feed(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_feed_shows_answers(self):
# Visit the homepage, notice that there are no answers in the context.
def get_answers_from_homepage():
homepage_response = self.client.get('/')
return homepage_response.context[0]['recent_feed_items']
self.assertFalse(get_answers_from_homepage())
# Create a few answers on the project discussion page.
for x in range(4):
mysite.search.models.Answer.create_dummy()
recent_feed_items = (
mysite.search.models.Answer.objects.all().order_by(
'-modified_date'))
# Visit the homepage, assert that the feed item data is on the page,
# ordered by date descending.
actual_answer_pks = [
answer.pk for answer in get_answers_from_homepage()]
expected_answer_pks = [answer.pk for answer in recent_feed_items]
self.assertEqual(actual_answer_pks, expected_answer_pks)
def test_feed_shows_wanna_help(self):
# set things up so there was a wanna help button click
person = mysite.profile.models.Person.objects.get(
user__username='paulproteus')
p_before = mysite.search.models.Project.create_dummy()
client = self.login_with_client()
post_to = reverse(mysite.project.views.wanna_help_do)
response = client.post(post_to, {u'project': unicode(p_before.pk)})
# Now when we GET the home page, we see a Note
# to that effect in the feed
response = client.get('/')
items = response.context[0]['recent_feed_items']
note_we_want_to_see = (
mysite.search.models.WannaHelperNote.objects.get(
person=person, project=p_before))
self.assertTrue(note_we_want_to_see in items)
class CacheMethod(TwillTests):
@mock.patch('django.core.cache.cache')
def test(self, mock_cache):
# Step 0: mock_cache.get() needs to return None
mock_cache.get.return_value = None
# Step 1: Create a method where we can test if it was cached (+ cache
# it)
class SomeClass:
def __init__(self):
self.call_counter = 0
def cache_key_getter_name(self):
return 'doodles'
@mysite.base.decorators.cache_method('cache_key_getter_name')
def some_method(self):
self.call_counter += 1
return str(self.call_counter)
# Step 2: Call it once to fill the cache
sc = SomeClass()
self.assertEqual(sc.some_method(), '1')
# Step 3: See if the cache has it now
mock_cache.set.assert_called_with(
'doodles', '{"value": "1"}', 86400 * 10)
class EnhanceNextWithNewUserMetadata(TwillTests):
def test_easy(self):
sample_input = '/'
wanted = '/?newuser=true'
got = (
mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
def test_with_existing_query_string(self):
sample_input = '/?a=b'
wanted = '/?a=b&newuser=true'
got = (
mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
def test_with_existing_newuser_equals_true(self):
sample_input = '/?a=b&newuser=true'
wanted = sample_input
got = (mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
class Unsubscribe(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_verify_unsubscribe_token(self):
"""Generate a valid unsubscribe token. Use it. See that it works. Use
an invalid one. See that it doesn't work."""
dude = mysite.profile.models.Person.objects.get(user__username='paulproteus')
# Generate an invalid token (easiest to do this first)
plausible_but_invalid_token_string = dude.generate_new_unsubscribe_token().string
# Make that token invalid by nuking the UnsubscribeToken table
mysite.profile.models.UnsubscribeToken.objects.all().delete()
# Generate a once-valid but now-expired token
expired_token = dude.generate_new_unsubscribe_token()
just_over_three_months_ago = datetime.datetime.utcnow() - datetime.timedelta(days=91)
expired_token.created_date = just_over_three_months_ago
expired_token.save()
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
owner = mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(valid_token_string)
self.assertEqual(owner, dude)
# This should definitely be false
self.assertNotEqual(valid_token_string, plausible_but_invalid_token_string)
# The invalid token should fail
self.assertFalse(mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(plausible_but_invalid_token_string))
self.assertFalse(mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(expired_token.string))
def test_unsubscribe_view(self):
dude = mysite.profile.models.Person.objects.get(user__username='paulproteus')
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
# Test that the unsubscribe view's context contains the owner
url = reverse(mysite.profile.views.unsubscribe, kwargs={'token_string': valid_token_string})
logger.debug("url %s", url)
response = self.client.get(url)
logger.debug("response %s", response)
self.assertEqual(
mysite.profile.models.Person.objects.get(),
response.context['unsubscribe_this_user'])
def test_unsubscribe_post_handler(self):
def get_dude():
return mysite.profile.models.Person.objects.get(user__username='paulproteus')
dude = get_dude()
self.assertTrue(get_dude().email_me_re_projects)
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
self.client.post(reverse(mysite.profile.views.unsubscribe_do), {'token_string': valid_token_string})
self.assertFalse(get_dude().email_me_re_projects)
@expectedFailure
def test_submit_form(self):
def get_dude():
return mysite.profile.models.Person.objects.get(user__username='paulproteus')
dude = get_dude()
self.assertTrue(get_dude().email_me_re_projects)
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
self.assertIsNone(twill_goto_view(mysite.profile.views.unsubscribe, kwargs={'token_string': valid_token_string}))
#TODO Figure out why tc.submit() returns a NoneType and fails
#A couple of ideas:
# South migration on MySQL
# submit is broken
# twill should leave the code base for WebTest
self.assertIsNone(tc.submit())
self.assertIsNotNone(get_dude().email_me_re_projects)
class TimestampTests(django.test.TestCase):
def test_bugzilla_urls_get_and_update_timestamp_without_errors(self):
# List of URLs to test (from Bugzila trackers)
urls = {
'Miro bitesized':
'http://bugzilla.pculture.org/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field-1-0-0=bug_status&field-1-1-0=product&field-1-2-0=keywords&keywords=bitesized&product=Miro&query_format=advanced&remaction=&type-1-0-0=anyexact&type-1-1-0=anyexact&type-1-2-0=anywords&value-1-0-0=NEW%2CASSIGNED%2CREOPENED&value-1-1-0=Miro&value-1-2-0=bitesized',
'KDE bitesized':
'https://bugs.kde.org/buglist.cgi?query_format=advanced&keywords=junior-jobs&resolution=---',
'KDE documentation':
'https://bugs.kde.org/buglist.cgi?query_format=advanced&product=docs&resolution=---',
'MediaWiki bitesized':
'https://bugzilla.wikimedia.org/buglist.cgi?keywords=easy&query_format=advanced&resolution=LATER&resolution=---',
'MediaWiki documentation':
'https://bugzilla.wikimedia.org/buglist.cgi?query_format=advanced&component=Documentation&resolution=---',
'Gnome bitesized':
'https://bugzilla.gnome.org/buglist.cgi?columnlist=id&keywords=gnome-love&query_format=advanced&resolution=---',
'Mozilla bitesized':
'https://bugzilla.mozilla.org/buglist.cgi?resolution=---;status_whiteboard_type=substring;query_format=advanced;status_whiteboard=[good%20first%20bug]',
'Songbird helpwanted':
'http://bugzilla.songbirdnest.com/buglist.cgi?query_format=advanced&resolution=---&keywords=helpwanted',
'Songbird documentation':
'http://bugzilla.songbirdnest.com/buglist.cgi?query_format=advanced&component=Documentation&resolution=---',
'Apertium':
'http://bugs.apertium.org/cgi-bin/bugzilla/buglist.cgi?query_format=advanced&resolution=---',
'RTEMS':
'https://www.rtems.org/bugzilla/buglist.cgi?query_format=advanced&resolution=---',
'XOrg bitesized':
'https://bugs.freedesktop.org/buglist.cgi?query_format=advanced&keywords=janitor&resolution=---&product=xorg',
'XOrg documentation':
'https://bugs.freedesktop.org/buglist.cgi?query_format=advanced&component=Docs%2Fother&component=Documentation&component=Fonts%2Fdoc&resolution=---&product=xorg',
'Locamotion':
'http://bugs.locamotion.org/buglist.cgi?query_format=advanced&resolution=---',
'Hypertriton':
'https://hypertriton.com/bugzilla/buglist.cgi?query_format=advanced&resolution=---&product=Agar&product=EDAcious&product=FabBSD&product=FreeSG',
'pygame':
'http://pygame.motherhamster.org/bugzilla/buglist.cgi?query_format=advanced&resolution=---'
}
for url_name in urls:
logger.info('Testing %s bugs URL.' % url_name)
url = urls[url_name]
# Check there is no timestamp i.e. get zero o'clock
first_timestamp = (
mysite.base.models.Timestamp.get_timestamp_for_string(url))
self.assertEqual(first_timestamp,
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Check the timestamp of the URL can be updated
mysite.base.models.Timestamp.update_timestamp_for_string(url)
# Check the new timestamp is after zero o'clock
new_timestamp = (
mysite.base.models.Timestamp.get_timestamp_for_string(url))
self.assertTrue(new_timestamp >
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Test cases for Nagios integration
class NagiosTests(django.test.TestCase):
# Test for OK Nagios meta data return (0)
def test_nagios_meta_return_ok(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 0
my['Bugs last polled more than three days ago'] = 0
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(0, mysite.base.views.meta_exit_code(data))
# Test for WARNING Nagios meta data return (1)
def test_nagios_meta_return_warning(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 1
my['Bugs last polled more than three days ago'] = 0
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(1, mysite.base.views.meta_exit_code(data))
# Test for CRITICAL Nagios meta data return (2)
def test_nagios_meta_return_critical(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 0
my['Bugs last polled more than three days ago'] = 1
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(2, mysite.base.views.meta_exit_code(data))
# Test for OK Nagios weekly mail return (0)
def test_nagios_weeklymail_return_ok(self):
newtime = datetime.datetime.utcnow() - datetime.timedelta(days=4)
self.assertEqual(0, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code(newtime))
# Test for OK Nagios weekly mail return (0) after send_emails is
# run as a management command
def test_nagios_weeklymail_return_ok_after_send(self):
# Run the send_mail
command = mysite.profile.management.commands.send_emails.Command()
command.handle()
# Now run to see if the function sees things are ok in the
# database
self.assertEqual(0, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code())
# Test for CRITICAL Nagios weekly mail return (2)
def test_nagios_weeklymail_return_critical(self):
newtime = datetime.datetime.utcnow() - datetime.timedelta(days=8)
self.assertEqual(2, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code(newtime))
# Test for CRITICAL Nagios weekly mail return (2) on new database
def test_nagios_weeklymail_return_critical_newdb(self):
self.assertEqual(2, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code())
# Test cases for meta data generation
class MetaDataTests(django.test.TestCase):
def test_meta_data_zero_div(self):
mysite.base.views.meta_data()
def find_git_path():
maybe_git_dir = os.path.abspath(os.getcwd())
while not os.path.exists(os.path.join(maybe_git_dir, '.git')):
maybe_git_dir = os.path.abspath(os.path.join(maybe_git_dir, '..'))
if os.path.exists(os.path.join(maybe_git_dir, '.git')):
return maybe_git_dir
raise ValueError("Could not find git directory path.")
# Test that the git repository has no files that conflict with Windows
class WindowsFilesystemCompatibilityTests(unittest.TestCase):
def test(self):
# Find the base directory
dir_with_git = find_git_path()
# Get a list of files from git
files = subprocess.Popen(
['git', 'ls-files'],
shell=False,
stdout=subprocess.PIPE,
cwd=dir_with_git)
stdout, stderr = files.communicate()
file_set = set(stdout.rstrip().split('\n'))
# Filter that file set down by constraints that would
# apply on Windows. To that end:
# Unify files based on case-insensitivity
files_filtered = set(
[x.lower() for x in file_set])
# Filter out any files with '?' in their names, because that is an
# invalid character for filenames on Windows.
files_filtered = set(
[x for x in file_set
if ('?' not in x)])
self.assertEqual(file_set, files_filtered)
class GoogleApiTests(unittest.TestCase):
def test_google_api(self):
""" Test to see if the google api is returning what we expect """
response_file_path = os.path.join(settings.MEDIA_ROOT, 'sample-data',
'google_api', 'sample_response')
with open(response_file_path, 'r') as f:
response = f.read()
# Check that latitude and longitude are returned and status is 'OK'
geocode = mysite.base.view_helpers._geocode(response_data=response)
self.assertNotEqual(geocode, None)
# Test cases for robots generation
class RenderLiveRobotsTest(django.test.TestCase):
def test_robots_with_debug_false(self):
'''Verify that robots.txt returns render_robots_live_site.txt with
DEBUG set to False
'''
response = self.client.get('/robots.txt')
robots_text = ""
with open('mysite/base/templates/robots_for_live_site.txt', 'rU') as f:
robots_text += f.read()
self.assertEqual(response.content, robots_text)
class RenderDevRobotsTest(django.test.TestCase):
def setUp(self):
self.original_value = settings.DEBUG
settings.DEBUG = True
def test_robots_with_debug_true(self):
'''Verify that robots.txt contains text identical to that seen in
render_robots_for_dev_env.txt with DEBUG set to True
'''
response = self.client.get('/robots.txt')
robots_text = ""
with open('mysite/base/templates/robots_for_dev_env.txt', 'rU') as f:
robots_text += f.read()
settings.DEBUG = False
self.assertEqual(response.content, robots_text)
def tearDown(self):
settings.DEBUG = self.original_value
| nirmeshk/oh-mainline | mysite/base/tests.py | Python | agpl-3.0 | 25,144 |
"""
* Copyright 2007 Google Inc.
# Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
"""*
* Font size enumeration. Represents the seven basic HTML font sizes, as
* defined in CSS.
"""
XX_SMALL = 1
X_SMALL = 2
SMALL = 3
MEDIUM = 4
LARGE = 5
X_LARGE = 6
XX_LARGE = 7
"""*
* Justification enumeration. The three values are <code>left</code>,
* <code>right</code>, <code>center</code>.
"""
CENTER = "Center"
LEFT = "Left"
RIGHT = "Right"
| spaceone/pyjs | pyjswidgets/pyjamas/ui/RichTextAreaConsts.py | Python | apache-2.0 | 998 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
| peterbraden/tensorflow | tensorflow/python/platform/logging_test.py | Python | apache-2.0 | 1,121 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test runner objects that's only for end-to-end tests.
This package defines runners, which are used to execute test pipeline and
verify results.
"""
# Protect against environments where dataflow runner is not available.
# pylint: disable=wrong-import-order, wrong-import-position
from __future__ import absolute_import
try:
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
from apache_beam.runners.direct.test_direct_runner import TestDirectRunner
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
| mxm/incubator-beam | sdks/python/apache_beam/runners/test/__init__.py | Python | apache-2.0 | 1,360 |
"""
Preview Browser Widget.
"""
from xml.sax.saxutils import escape
from PyQt4.QtGui import (
QWidget, QLabel, QListView, QAction, QVBoxLayout, QHBoxLayout, QSizePolicy,
QStyleOption, QStylePainter
)
from PyQt4.QtSvg import QSvgWidget
from PyQt4.QtCore import (
Qt, QSize, QByteArray, QModelIndex, QEvent
)
from PyQt4.QtCore import pyqtSignal as Signal
from ..utils import check_type
from ..gui.dropshadow import DropShadowFrame
from . import previewmodel
NO_PREVIEW_SVG = """
"""
# Default description template
DESCRIPTION_TEMPLATE = """
<h3 class=item-heading>{name}</h3>
<p class=item-description>
{description}
</p>
"""
PREVIEW_SIZE = (440, 295)
class LinearIconView(QListView):
def __init__(self, *args, **kwargs):
QListView.__init__(self, *args, **kwargs)
self.setViewMode(QListView.IconMode)
self.setWrapping(False)
self.setWordWrap(True)
self.setSelectionMode(QListView.SingleSelection)
self.setEditTriggers(QListView.NoEditTriggers)
self.setMovement(QListView.Static)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Fixed)
self.setIconSize(QSize(120, 80))
def sizeHint(self):
if not self.model().rowCount():
return QSize(200, 140)
else:
scrollHint = self.horizontalScrollBar().sizeHint()
height = self.sizeHintForRow(0) + scrollHint.height()
_, top, _, bottom = self.getContentsMargins()
return QSize(200, height + top + bottom + self.verticalOffset())
class TextLabel(QWidget):
"""A plain text label widget with support for elided text.
"""
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Preferred)
self.__text = ""
self.__textElideMode = Qt.ElideMiddle
self.__sizeHint = None
self.__alignment = Qt.AlignLeft | Qt.AlignVCenter
def setText(self, text):
"""Set the `text` string to display.
"""
check_type(text, str)
if self.__text != text:
self.__text = str(text)
self.__update()
def text(self):
"""Return the text
"""
return self.__text
def setTextElideMode(self, mode):
"""Set elide mode (`Qt.TextElideMode`)
"""
if self.__textElideMode != mode:
self.__textElideMode = mode
self.__update()
def elideMode(self):
return self.__elideMode
def setAlignment(self, align):
"""Set text alignment (`Qt.Alignment`).
"""
if self.__alignment != align:
self.__alignment = align
self.__update()
def sizeHint(self):
if self.__sizeHint is None:
option = QStyleOption()
option.initFrom(self)
metrics = option.fontMetrics
self.__sizeHint = QSize(200, metrics.height())
return self.__sizeHint
def paintEvent(self, event):
painter = QStylePainter(self)
option = QStyleOption()
option.initFrom(self)
rect = option.rect
metrics = option.fontMetrics
text = metrics.elidedText(self.__text, self.__textElideMode,
rect.width())
painter.drawItemText(rect, self.__alignment,
option.palette, self.isEnabled(), text,
self.foregroundRole())
painter.end()
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.__update()
return QWidget.changeEvent(self, event)
def __update(self):
self.__sizeHint = None
self.updateGeometry()
self.update()
class PreviewBrowser(QWidget):
"""A Preview Browser for recent/premade scheme selection.
"""
# Emitted when the current previewed item changes
currentIndexChanged = Signal(int)
# Emitted when an item is double clicked in the preview list.
activated = Signal(int)
def __init__(self, *args):
QWidget.__init__(self, *args)
self.__model = None
self.__currentIndex = -1
self.__template = DESCRIPTION_TEMPLATE
self.__setupUi()
def __setupUi(self):
vlayout = QVBoxLayout()
vlayout.setContentsMargins(0, 0, 0, 0)
top_layout = QHBoxLayout()
top_layout.setContentsMargins(12, 12, 12, 12)
# Top row with full text description and a large preview
# image.
self.__label = QLabel(self, objectName="description-label",
wordWrap=True,
alignment=Qt.AlignTop | Qt.AlignLeft)
self.__label.setWordWrap(True)
self.__label.setFixedSize(220, PREVIEW_SIZE[1])
self.__image = QSvgWidget(self, objectName="preview-image")
self.__image.setFixedSize(*PREVIEW_SIZE)
self.__imageFrame = DropShadowFrame(self)
self.__imageFrame.setWidget(self.__image)
# Path text below the description and image
path_layout = QHBoxLayout()
path_layout.setContentsMargins(12, 0, 12, 0)
path_label = QLabel("<b>{0!s}</b>".format(self.tr("Path:")), self,
objectName="path-label")
self.__path = TextLabel(self, objectName="path-text")
path_layout.addWidget(path_label)
path_layout.addWidget(self.__path)
self.__selectAction = \
QAction(self.tr("Select"), self,
objectName="select-action",
)
top_layout.addWidget(self.__label, 1,
alignment=Qt.AlignTop | Qt.AlignLeft)
top_layout.addWidget(self.__image, 1,
alignment=Qt.AlignTop | Qt.AlignRight)
vlayout.addLayout(top_layout)
vlayout.addLayout(path_layout)
# An list view with small preview icons.
self.__previewList = LinearIconView(objectName="preview-list-view")
self.__previewList.doubleClicked.connect(self.__onDoubleClicked)
vlayout.addWidget(self.__previewList)
self.setLayout(vlayout)
def setModel(self, model):
"""Set the item model for preview.
"""
if self.__model != model:
if self.__model:
s_model = self.__previewList.selectionModel()
s_model.selectionChanged.disconnect(self.__onSelectionChanged)
self.__model.dataChanged.disconnect(self.__onDataChanged)
self.__model = model
self.__previewList.setModel(model)
if model:
s_model = self.__previewList.selectionModel()
s_model.selectionChanged.connect(self.__onSelectionChanged)
self.__model.dataChanged.connect(self.__onDataChanged)
if model and model.rowCount():
self.setCurrentIndex(0)
def model(self):
"""Return the item model.
"""
return self.__model
def setPreviewDelegate(self, delegate):
"""Set the delegate to render the preview images.
"""
raise NotImplementedError
def setDescriptionTemplate(self, template):
self.__template = template
self.__update()
def setCurrentIndex(self, index):
"""Set the selected preview item index.
"""
if self.__model is not None and self.__model.rowCount():
index = min(index, self.__model.rowCount() - 1)
index = self.__model.index(index, 0)
sel_model = self.__previewList.selectionModel()
# This emits selectionChanged signal and triggers
# __onSelectionChanged, currentIndex is updated there.
sel_model.select(index, sel_model.ClearAndSelect)
elif self.__currentIndex != -1:
self.__currentIndex = -1
self.__update()
self.currentIndexChanged.emit(-1)
def currentIndex(self):
"""Return the current selected index.
"""
return self.__currentIndex
def __onSelectionChanged(self, *args):
"""Selected item in the preview list has changed.
Set the new description and large preview image.
"""
rows = self.__previewList.selectedIndexes()
if rows:
index = rows[0]
self.__currentIndex = index.row()
else:
index = QModelIndex()
self.__currentIndex = -1
self.__update()
self.currentIndexChanged.emit(self.__currentIndex)
def __onDataChanged(self, topleft, bottomRight):
"""Data changed, update the preview if current index in the changed
range.
"""
if self.__currentIndex <= topleft.row() and \
self.__currentIndex >= bottomRight.row():
self.__update()
def __onDoubleClicked(self, index):
"""Double click on an item in the preview item list.
"""
self.activated.emit(index.row())
def __update(self):
"""Update the current description.
"""
if self.__currentIndex != -1:
index = self.model().index(self.__currentIndex, 0)
else:
index = QModelIndex()
if not index.isValid():
description = ""
name = ""
path = ""
svg = NO_PREVIEW_SVG
else:
description = str(index.data(Qt.WhatsThisRole))
if not description:
description = "No description."
description = escape(description)
description = description.replace("\n", "<br/>")
name = str(index.data(Qt.DisplayRole))
if not name:
name = "Untitled"
name = escape(name)
path = str(index.data(Qt.StatusTipRole))
svg = str(index.data(previewmodel.ThumbnailSVGRole))
desc_text = self.__template.format(description=description, name=name)
self.__label.setText(desc_text)
self.__path.setText(path)
if not svg:
svg = NO_PREVIEW_SVG
if svg:
self.__image.load(QByteArray(svg.encode("utf-8")))
| marinkaz/orange3 | Orange/canvas/preview/previewbrowser.py | Python | bsd-2-clause | 10,352 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=None):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if size_threshold is not None:
warnings.warn('Use of the "size_threshold" is deprecated '
'in 0.19 and it will be removed version '
'0.21 of scikit-learn', DeprecationWarning)
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
filter_params : boolean
Whether to filter invalid parameters or not.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| herilalaina/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 46,964 |
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
'virtual', 'stable', 'http'])
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(args, test_list=self._options.test_list,
fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR) or
self._is_websocket_test(test) or
self.VIRTUAL_HTTP_SUBDIR in test
)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
def _is_websocket_test(self, test):
if self._port.is_wpt_enabled() and self._port.is_wpt_test(test):
return False
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _rename_results_folder(self):
try:
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results.
import errno
if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
self._printer.write_update("No results.html file found in previous run, skipping it.")
return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path)
def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
elif self._filesystem.exists(self._results_directory):
self._limit_archived_results_count()
# Rename the existing results folder for archiving.
self._rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
running_all_tests = False
try:
paths, test_names, running_all_tests = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
num_workers = self._port.num_workers(int(self._options.child_processes))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
for retry_attempt in xrange(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...' %
(grammar.pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries))
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
_log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)' %
(exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
self._options.write_full_results_to)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
self._check_for_stale_w3c_dir()
return test_run_results.RunDetails(
exit_code, summarized_full_results, summarized_failing_results,
initial_results, all_retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _check_for_stale_w3c_dir(self):
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in port/base.py as well.
fs = self._port.host.filesystem
layout_tests_dir = self._port.layout_tests_dir()
if fs.isdir(fs.join(layout_tests_dir, 'w3c')):
_log.warning('WARNING: You still have the old LayoutTests/w3c directory in your checkout. You should delete it!')
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
if not self._filesystem.exists(dir_above_results_path):
return
file_list = self._filesystem.listdir(dir_above_results_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", "layout-tests"),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
else:
_log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
| js0701/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py | Python | bsd-3-clause | 27,322 |
#!/usr/bin/env python
"""Renames *.py files to *.py.park."""
import os
import sys
def main():
"""Drives the main script behavior."""
script_dir = os.path.dirname(os.path.realpath(__file__))
for filename in os.listdir(script_dir):
basename, extension = os.path.splitext(filename)
if basename.startswith("Test") and extension == '.py':
source_path = os.path.join(script_dir, filename)
dest_path = source_path + ".park"
sys.stdout.write("renaming {} to {}\n".format(
source_path, dest_path))
os.rename(source_path, dest_path)
if __name__ == "__main__":
main()
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/issue_verification/disable.py | Python | bsd-3-clause | 656 |
MAX_SCORE = 10
class ParsedValue():
"""
Possible run-time value.
The value data might either be definite or guessed.
"""
def __init__(self, data, description, score=0, raw=None, type_=None):
"""
Ctor
@param data: The data`s human-readable representation.
@param description: A description string for the value data type
@param score: score is a (0-10) value indicating the probability of the value.
score of 0 (default) means the value is certain
"""
self.data = data
self.description = description
self.type = type_
self.raw = raw # TODO: Validate value is a string (representing hex values)
# If score cannot be validated set its value to 10 (Guessed).
if self._validate_score(score):
self.score = score
else:
self.score = MAX_SCORE
def _validate_score(self, score):
"""
Validate that score value is in range 0-10.
@param score: Score value to validate
@return: True if score is valid, otherwise False.
"""
return 0 <= score <= MAX_SCORE
def is_guessed(self):
"""
Check if the value is guessed
@return: True if the value is guessed, otherwise False
"""
return not self.score == 0 | xujun10110/DIE | DIE/Lib/ParsedValue.py | Python | mit | 1,350 |
import pytest
from .utils import *
import psi4
from qcengine.testing import using
@pytest.mark.parametrize('engine', [
pytest.param('optking'),
pytest.param('geometric', marks=using('geometric')),
]) # yapf: disable
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.027032783717, 'ref_nuc': 9.300794299874}, id='rhf(df)'),
pytest.param({'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.027053512764, 'ref_nuc': 9.300838770294}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.230938589591, 'ref_nuc': 9.133271168193}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.230989373502, 'ref_nuc': 9.133125471291}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.420645414834, 'ref_nuc': 9.090397129492}, id='b3lyp'),
]) # yapf: disable
def test_h2o(inp, engine):
"""Optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine=engine)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
@using('geometric')
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.02079629252714, 'ref_nuc': 9.265341708725257}, id='rhf(df)'),
pytest.param({'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.02082389228, 'ref_nuc': 9.26528625744628}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.22711819393223, 'ref_nuc': 9.09137805747361}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.2271678506303, 'ref_nuc': 9.091178486990861}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.41632755714534, 'ref_nuc': 9.04535641436914}, id='b3lyp'),
]) # yapf: disable
def test_h2o_constrained(inp):
"""Constrained optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
# geometric specific options
geometric_keywords = {
'coordsys' : 'tric',
'enforce' : 0.0,
'constraints' : {
'set' : [{'type' : 'angle',
'indices' : [1, 0, 2],
'value' : 90.0 }]
}
}
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine='geometric', optimizer_keywords=geometric_keywords)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
| ashutoshvt/psi4 | tests/pytests/test_geometric.py | Python | lgpl-3.0 | 3,139 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
import pytz
from airflow.providers.google.cloud.operators.workflows import (
WorkflowsCancelExecutionOperator,
WorkflowsCreateExecutionOperator,
WorkflowsCreateWorkflowOperator,
WorkflowsDeleteWorkflowOperator,
WorkflowsGetExecutionOperator,
WorkflowsGetWorkflowOperator,
WorkflowsListExecutionsOperator,
WorkflowsListWorkflowsOperator,
WorkflowsUpdateWorkflowOperator,
)
BASE_PATH = "airflow.providers.google.cloud.operators.workflows.{}"
LOCATION = "europe-west1"
WORKFLOW_ID = "workflow_id"
EXECUTION_ID = "execution_id"
WORKFLOW = {"aa": "bb"}
EXECUTION = {"ccc": "ddd"}
PROJECT_ID = "airflow-testing"
METADATA = None
TIMEOUT = None
RETRY = None
FILTER_ = "aaaa"
ORDER_BY = "bbb"
UPDATE_MASK = "aaa,bbb"
GCP_CONN_ID = "test-conn"
IMPERSONATION_CHAIN = None
class TestWorkflowsCreateWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsCreateWorkflowOperator(
task_id="test_task",
workflow=WORKFLOW,
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_workflow.assert_called_once_with(
workflow=WORKFLOW,
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowsUpdateWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsUpdateWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
update_mask=UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.update_workflow.assert_called_once_with(
workflow=mock_hook.return_value.get_workflow.return_value,
update_mask=UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowsDeleteWorkflowOperator:
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(
self,
mock_hook,
):
op = WorkflowsDeleteWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestWorkflowsListWorkflowsOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
workflow_mock = mock.MagicMock()
workflow_mock.start_time = datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(minutes=5)
mock_hook.return_value.list_workflows.return_value = [workflow_mock]
op = WorkflowsListWorkflowsOperator(
task_id="test_task",
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_workflows.assert_called_once_with(
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == [mock_object.to_dict.return_value]
class TestWorkflowsGetWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsGetWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowExecutionsCreateExecutionOperator:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
@mock.patch(BASE_PATH.format("WorkflowsCreateExecutionOperator.xcom_push"))
def test_execute(self, mock_xcom, mock_hook, mock_object):
mock_hook.return_value.create_execution.return_value.name = "name/execution_id"
op = WorkflowsCreateExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution=EXECUTION,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_execution.assert_called_once_with(
workflow_id=WORKFLOW_ID,
execution=EXECUTION,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_xcom.assert_called_once_with({}, key="execution_id", value="execution_id")
assert result == mock_object.to_dict.return_value
class TestWorkflowExecutionsCancelExecutionOperator:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsCancelExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.cancel_execution.assert_called_once_with(
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowExecutionsListExecutionsOperator:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
execution_mock = mock.MagicMock()
execution_mock.start_time = datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(minutes=5)
mock_hook.return_value.list_executions.return_value = [execution_mock]
op = WorkflowsListExecutionsOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_executions.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == [mock_object.to_dict.return_value]
class TestWorkflowExecutionsGetExecutionOperator:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsGetExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_execution.assert_called_once_with(
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
| apache/incubator-airflow | tests/providers/google/cloud/operators/test_workflows.py | Python | apache-2.0 | 12,510 |
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1, use_hdfs=True, hdfs_version='cdh4', hdfs_name_node='mr-0x6')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_hdfs_multi_copies(self):
print "\nUse the new regex capabilities for selecting hdfs: try *copies* at /datasets"
# pop open a browser on the cloud
# h2b.browseTheCloud()
# defaults to /datasets
parseResult = h2i.import_parse(path='datasets/manyfiles-nflx-gz/*', schema='hdfs', hex_key='manyfiles.hex',
exclude=None, header=None, timeoutSecs=600)
print "parse result:", parseResult['destination_key']
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
| rowhit/h2o-2 | py/testdir_0xdata_only/test_hdfs_multi_copies.py | Python | apache-2.0 | 1,029 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import tf_export
class _ParseExampleDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that parses `example` dataset into a `dict` dataset."""
def __init__(self, input_dataset, features, num_parallel_calls,
deterministic):
self._input_dataset = input_dataset
if not structure.are_compatible(
input_dataset.element_spec,
tensor_spec.TensorSpec([None], dtypes.string)):
raise TypeError("Input dataset should be a dataset of vectors of strings")
self._num_parallel_calls = num_parallel_calls
if deterministic is None:
self._deterministic = "default"
elif deterministic:
self._deterministic = "true"
else:
self._deterministic = "false"
# pylint: disable=protected-access
self._features = parsing_ops._prepend_none_dimension(features)
# TODO(b/112859642): Pass sparse_index and sparse_values for SparseFeature
params = parsing_ops._ParseOpParams.from_features(self._features, [
parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature,
parsing_ops.RaggedFeature
])
# pylint: enable=protected-access
self._sparse_keys = params.sparse_keys
self._sparse_types = params.sparse_types
self._ragged_keys = params.ragged_keys
self._ragged_value_types = params.ragged_value_types
self._ragged_split_types = params.ragged_split_types
self._dense_keys = params.dense_keys
self._dense_defaults = params.dense_defaults_vec
self._dense_shapes = params.dense_shapes_as_proto
self._dense_types = params.dense_types
input_dataset_shape = dataset_ops.get_legacy_output_shapes(
self._input_dataset)
self._element_spec = {}
for (key, value_type) in zip(params.sparse_keys, params.sparse_types):
self._element_spec[key] = sparse_tensor.SparseTensorSpec(
input_dataset_shape.concatenate([None]), value_type)
for (key, value_type, dense_shape) in zip(params.dense_keys,
params.dense_types,
params.dense_shapes):
self._element_spec[key] = tensor_spec.TensorSpec(
input_dataset_shape.concatenate(dense_shape), value_type)
for (key, value_type, splits_type) in zip(params.ragged_keys,
params.ragged_value_types,
params.ragged_split_types):
self._element_spec[key] = ragged_tensor.RaggedTensorSpec(
input_dataset_shape.concatenate([None]), value_type, 1, splits_type)
variant_tensor = (
gen_experimental_dataset_ops.parse_example_dataset_v2(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_parallel_calls,
self._dense_defaults,
self._sparse_keys,
self._dense_keys,
self._sparse_types,
self._dense_shapes,
deterministic=self._deterministic,
ragged_keys=self._ragged_keys,
ragged_value_types=self._ragged_value_types,
ragged_split_types=self._ragged_split_types,
**self._flat_structure))
super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
# TODO(b/111553342): add arguments names and example names as well.
@tf_export("data.experimental.parse_example_dataset")
def parse_example_dataset(features, num_parallel_calls=1, deterministic=None):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`,
`SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to
`VarLenFeature`, `RaggedFeature`, `SparseFeature`, and `FixedLenFeature`
objects. Each `VarLenFeature` and `SparseFeature` is mapped to a
`SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, `RaggedFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
of order if some parsing calls complete faster than others. If
`deterministic` is `None`, the
`tf.data.Options.experimental_deterministic` dataset option (`True` by
default) is used to decide whether to produce elements
deterministically.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
if features is None:
raise ValueError("Missing: features was %s." % features)
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls,
deterministic)
if any(
isinstance(feature, parsing_ops.SparseFeature) or
(isinstance(feature, parsing_ops.RaggedFeature) and feature.partitions)
for feature in features.values()):
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
out_dataset = out_dataset.map(
lambda x: parsing_ops._construct_tensors_for_composite_features(
features, x),
num_parallel_calls=num_parallel_calls)
return out_dataset
return _apply_fn
| karllessard/tensorflow | tensorflow/python/data/experimental/ops/parsing_ops.py | Python | apache-2.0 | 7,348 |
"""The tests for the google calendar component."""
# pylint: disable=protected-access
import logging
import unittest
from unittest.mock import patch, Mock
import pytest
import homeassistant.components.calendar as calendar_base
from homeassistant.components.google import calendar
import homeassistant.util.dt as dt_util
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from tests.common import get_test_home_assistant, MockDependency
TEST_PLATFORM = {calendar_base.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
class TestComponentsGoogleCalendar(unittest.TestCase):
"""Test the Google calendar."""
hass = None # HomeAssistant
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.http = Mock()
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone('America/Regina'))
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
dt_util.set_default_time_zone(dt_util.get_time_zone('UTC'))
self.hass.stop()
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_all_day_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() \
+ dt_util.dt.timedelta(days=7)
event = {
'summary': 'Test All Day Event',
'start': {
'date': week_from_today.isoformat()
},
'end': {
'date': (week_from_today + dt_util.dt.timedelta(days=1))
.isoformat()
},
'location': 'Test Cases',
'description': 'We\'re just testing that all day events get setup '
'correctly',
'kind': 'calendar#event',
'created': '2016-06-23T16:37:57.000Z',
'transparency': 'transparent',
'updated': '2016-06-24T01:57:21.045Z',
'reminders': {'useDefault': True},
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'id': '_c8rinwq863h45qnucyoi43ny8',
'etag': '"2933466882090000"',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
'iCalUID': '[email protected]',
'status': 'confirmed'
}
mock_next_event.return_value.event = event
device_name = 'Test All Day'
cal = calendar.GoogleCalendarEventDevice(self.hass, None,
'', {'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': True,
'offset_reached': False,
'start_time': '{} 00:00:00'.format(event['start']['date']),
'end_time': '{} 00:00:00'.format(event['end']['date']),
'location': event['location'],
'description': event['description'],
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_future_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() \
+ dt_util.dt.timedelta(minutes=30)
event = {
'start': {
'dateTime': one_hour_from_now.isoformat()
},
'end': {
'dateTime': (one_hour_from_now
+ dt_util.dt.timedelta(minutes=60))
.isoformat()
},
'summary': 'Test Event in 30 minutes',
'reminders': {'useDefault': True},
'id': 'aioehgni435lihje',
'status': 'confirmed',
'updated': '2016-11-05T15:52:07.329Z',
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True,
},
'created': '2016-11-05T15:52:07.000Z',
'iCalUID': '[email protected]',
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
},
'etag': '"2956722254658000"',
'kind': 'calendar#event',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
}
mock_next_event.return_value.event = event
device_name = 'Test Future Event'
device_id = 'test_future_event'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': False,
'offset_reached': False,
'start_time': one_hour_from_now.strftime(DATE_STR_FORMAT),
'end_time':
(one_hour_from_now + dt_util.dt.timedelta(minutes=60))
.strftime(DATE_STR_FORMAT),
'location': '',
'description': '',
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_in_progress_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() \
- dt_util.dt.timedelta(minutes=30)
event = {
'start': {
'dateTime': middle_of_event.isoformat()
},
'end': {
'dateTime': (middle_of_event + dt_util.dt
.timedelta(minutes=60))
.isoformat()
},
'summary': 'Test Event in Progress',
'reminders': {'useDefault': True},
'id': 'aioehgni435lihje',
'status': 'confirmed',
'updated': '2016-11-05T15:52:07.329Z',
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True,
},
'created': '2016-11-05T15:52:07.000Z',
'iCalUID': '[email protected]',
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
},
'etag': '"2956722254658000"',
'kind': 'calendar#event',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
}
mock_next_event.return_value.event = event
device_name = 'Test Event in Progress'
device_id = 'test_event_in_progress'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_ON
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': False,
'offset_reached': False,
'start_time': middle_of_event.strftime(DATE_STR_FORMAT),
'end_time':
(middle_of_event + dt_util.dt.timedelta(minutes=60))
.strftime(DATE_STR_FORMAT),
'location': '',
'description': '',
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_offset_in_progress_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() \
+ dt_util.dt.timedelta(minutes=14)
event_summary = 'Test Event in Progress'
event = {
'start': {
'dateTime': middle_of_event.isoformat()
},
'end': {
'dateTime': (middle_of_event + dt_util.dt
.timedelta(minutes=60))
.isoformat()
},
'summary': '{} !!-15'.format(event_summary),
'reminders': {'useDefault': True},
'id': 'aioehgni435lihje',
'status': 'confirmed',
'updated': '2016-11-05T15:52:07.329Z',
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True,
},
'created': '2016-11-05T15:52:07.000Z',
'iCalUID': '[email protected]',
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
},
'etag': '"2956722254658000"',
'kind': 'calendar#event',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
}
mock_next_event.return_value.event = event
device_name = 'Test Event in Progress'
device_id = 'test_event_in_progress'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert cal.offset_reached()
assert cal.device_state_attributes == {
'message': event_summary,
'all_day': False,
'offset_reached': True,
'start_time': middle_of_event.strftime(DATE_STR_FORMAT),
'end_time':
(middle_of_event + dt_util.dt.timedelta(minutes=60))
.strftime(DATE_STR_FORMAT),
'location': '',
'description': '',
}
@pytest.mark.skip
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_all_day_offset_in_progress_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() \
+ dt_util.dt.timedelta(days=1)
event_summary = 'Test All Day Event Offset In Progress'
event = {
'summary': '{} !!-25:0'.format(event_summary),
'start': {
'date': tomorrow.isoformat()
},
'end': {
'date': (tomorrow + dt_util.dt.timedelta(days=1))
.isoformat()
},
'location': 'Test Cases',
'description': 'We\'re just testing that all day events get setup '
'correctly',
'kind': 'calendar#event',
'created': '2016-06-23T16:37:57.000Z',
'transparency': 'transparent',
'updated': '2016-06-24T01:57:21.045Z',
'reminders': {'useDefault': True},
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'id': '_c8rinwq863h45qnucyoi43ny8',
'etag': '"2933466882090000"',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
'iCalUID': '[email protected]',
'status': 'confirmed'
}
mock_next_event.return_value.event = event
device_name = 'Test All Day Offset In Progress'
device_id = 'test_all_day_offset_in_progress'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert cal.offset_reached()
assert cal.device_state_attributes == {
'message': event_summary,
'all_day': True,
'offset_reached': True,
'start_time': '{} 06:00:00'.format(event['start']['date']),
'end_time': '{} 06:00:00'.format(event['end']['date']),
'location': event['location'],
'description': event['description'],
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_all_day_offset_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() \
+ dt_util.dt.timedelta(days=2)
offset_hours = (1 + dt_util.now().hour)
event_summary = 'Test All Day Event Offset'
event = {
'summary': '{} !!-{}:0'.format(event_summary, offset_hours),
'start': {
'date': tomorrow.isoformat()
},
'end': {
'date': (tomorrow + dt_util.dt.timedelta(days=1))
.isoformat()
},
'location': 'Test Cases',
'description': 'We\'re just testing that all day events get setup '
'correctly',
'kind': 'calendar#event',
'created': '2016-06-23T16:37:57.000Z',
'transparency': 'transparent',
'updated': '2016-06-24T01:57:21.045Z',
'reminders': {'useDefault': True},
'organizer': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'sequence': 0,
'creator': {
'email': '[email protected]',
'displayName': 'Organizer Name',
'self': True
},
'id': '_c8rinwq863h45qnucyoi43ny8',
'etag': '"2933466882090000"',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
'iCalUID': '[email protected]',
'status': 'confirmed'
}
mock_next_event.return_value.event = event
device_name = 'Test All Day Offset'
device_id = 'test_all_day_offset'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event_summary,
'all_day': True,
'offset_reached': False,
'start_time': '{} 00:00:00'.format(event['start']['date']),
'end_time': '{} 00:00:00'.format(event['end']['date']),
'location': event['location'],
'description': event['description'],
}
@MockDependency("httplib2")
def test_update_false(self, mock_httplib2):
"""Test that the update returns False upon Error."""
mock_service = Mock()
mock_service.get = Mock(
side_effect=mock_httplib2.ServerNotFoundError("unit test"))
cal = calendar.GoogleCalendarEventDevice(self.hass, mock_service, None,
{'name': "test"})
result = cal.data.update()
assert not result
| PetePriority/home-assistant | tests/components/google/test_calendar.py | Python | apache-2.0 | 16,362 |
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
params = {
'response': 1049,
'family': 'binomial',
'beta_epsilon': 0.0001,
'alpha': 1.0,
'lambda': 1e-05,
'n_folds': 1,
'max_iter': 20,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_syn_2659x1049(self):
csvFilename = "syn_2659x1049.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=120, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
def test_GLM2_syn_2659x1049x2enum(self):
csvFilename = "syn_2659x1049x2enum.csv"
csvPathname = 'logreg' + '/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
kwargs = params
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=240, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| 111t8e/h2o-2 | py/testdir_single_jvm/test_GLM2_syn_2659x1049.py | Python | apache-2.0 | 1,417 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK version information and utilities."""
__version__ = '2.1.0.dev'
| wtanaka/beam | sdks/python/apache_beam/version.py | Python | apache-2.0 | 870 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
import abc
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule(object):
"""The learning rate schedule base class.
You can use a learning rate schedule to modulate how the learning rate
of your optimizer changes over time.
Several built-in learning rate schedules are available, such as
`tf.keras.optimizers.schedules.ExponentialDecay` or
`tf.keras.optimizers.schedules.PiecewiseConstantDecay`:
```python
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=10000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
```
A `LearningRateSchedule` instance can be passed in as the `learning_rate`
argument of any optimizer.
To implement your own schedule object, you should implement the `__call__`
method, which takes a `step` argument (scalar integer tensor, the
current training step count).
Like for any other Keras object, you can also optionally
make your object serializable by implementing the `get_config`
and `from_config` methods.
Example:
```python
class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
def __call__(self, step):
return self.initial_learning_rate / (step + 1)
optimizer = tf.keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1))
```
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError("Learning rate schedule must override __call__")
@abc.abstractmethod
def get_config(self):
raise NotImplementedError("Learning rate schedule must override get_config")
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
"""
super(ExponentialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
return math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
"""
def __init__(
self,
boundaries,
values,
name=None):
"""Piecewise constant from boundaries and interval values.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as the
optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super(PiecewiseConstantDecay, self).__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.boundaries))
values = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.values))
x_recomp = ops.convert_to_tensor_v2_with_dispatch(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
"""
super(PolynomialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = array_ops.where_v2(
math_ops.equal(global_step_recomp, 0), 1.0,
math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
decay_steps_recomp)
p = math_ops.divide(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
"""
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.CosineDecay",
"keras.experimental.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
"""
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.optimizers.schedules.CosineDecayRestarts",
"keras.experimental.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
# Note: this code is still used by V1 APIs.
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
# Note: this code is still used by V1 APIs.
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
"""
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
initial_variance = math_ops.cast(self.initial_variance, dtype)
variance_decay = math_ops.cast(self.variance_decay, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule):
return generic_utils.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None):
return generic_utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay")
| tensorflow/tensorflow | tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py | Python | apache-2.0 | 39,529 |
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure("scikit-learn parallel %s benchmark results" % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel("n_samples")
plt.ylabel("Time (s)")
plt.title("Parallel %s" % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| manhhomienbienthuy/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | Python | bsd-3-clause | 1,272 |
#!/usr/bin/env python
'''relay handling module'''
import time
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
class RelayModule(mp_module.MPModule):
def __init__(self, mpstate):
super(RelayModule, self).__init__(mpstate, "relay")
self.add_command('relay', self.cmd_relay, "relay commands")
self.add_command('servo', self.cmd_servo, "servo commands")
self.add_command('motortest', self.cmd_motortest, "motortest commands")
def cmd_relay(self, args):
'''set relays'''
if len(args) == 0 or args[0] not in ['set', 'repeat']:
print("Usage: relay <set|repeat>")
return
if args[0] == "set":
if len(args) < 3:
print("Usage: relay set <RELAY_NUM> <0|1>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_RELAY, 0,
int(args[1]), int(args[2]),
0, 0, 0, 0, 0)
if args[0] == "repeat":
if len(args) < 4:
print("Usage: relay repeat <RELAY_NUM> <COUNT> <PERIOD>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_REPEAT_RELAY, 0,
int(args[1]), int(args[2]), float(args[3]),
0, 0, 0, 0)
def cmd_servo(self, args):
'''set servos'''
if len(args) == 0 or args[0] not in ['set', 'repeat']:
print("Usage: servo <set|repeat>")
return
if args[0] == "set":
if len(args) < 3:
print("Usage: servo set <SERVO_NUM> <PWM>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0,
int(args[1]), int(args[2]),
0, 0, 0, 0, 0)
if args[0] == "repeat":
if len(args) < 5:
print("Usage: servo repeat <SERVO_NUM> <PWM> <COUNT> <PERIOD>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_REPEAT_SERVO, 0,
int(args[1]), int(args[2]), int(args[3]), float(args[4]),
0, 0, 0)
def cmd_motortest(self, args):
'''run motortests on copter'''
if len(args) != 4:
print("Usage: motortest motornum type value timeout")
return
self.master.mav.command_long_send(self.target_system,
0,
mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST, 0,
int(args[0]), int(args[1]), int(args[2]), int(args[3]),
0, 0, 0)
def init(mpstate):
'''initialise module'''
return RelayModule(mpstate)
| fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_relay.py | Python | gpl-3.0 | 3,625 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from report_aeroo.ctt_objects import ctt_currency
class trl(ctt_currency):
def _init_currency(self):
self.language = u'tr_TR'
self.code = u'TRL'
self.fractions = 100
self.cur_singular = u' Lira'
# default plural form for currency
self.cur_plural = u' Lira'
self.frc_singular = u' kuruş'
# default plural form for fractions
self.frc_plural = u' kuruş'
# grammatical genders: f - feminine, m - masculine, n -neuter
self.cur_gram_gender = 'm'
self.frc_gram_gender = 'm'
trl()
| dhp-denero/LibrERP | report_aeroo/ctt_languages/tr_TR/currencies/trl.py | Python | agpl-3.0 | 623 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi driver for compute
"""
import sys
from datetime import datetime
from libcloud.common.gandi import BaseGandiDriver, GandiException,\
NetworkInterface, IPAddress, Disk
from libcloud.compute.base import StorageVolume
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
NODE_STATE_MAP = {
'running': NodeState.RUNNING,
'halted': NodeState.TERMINATED,
'paused': NodeState.TERMINATED,
'locked': NodeState.TERMINATED,
'being_created': NodeState.PENDING,
'invalid': NodeState.UNKNOWN,
'legally_locked': NodeState.PENDING,
'deleted': NodeState.TERMINATED
}
NODE_PRICE_HOURLY_USD = 0.02
INSTANCE_TYPES = {
'small': {
'id': 'small',
'name': 'Small instance',
'cpu': 1,
'memory': 256,
'disk': 3,
'bandwidth': 10240,
},
'medium': {
'id': 'medium',
'name': 'Medium instance',
'cpu': 1,
'memory': 1024,
'disk': 20,
'bandwidth': 10240,
},
'large': {
'id': 'large',
'name': 'Large instance',
'cpu': 2,
'memory': 2048,
'disk': 50,
'bandwidth': 10240,
},
'x-large': {
'id': 'x-large',
'name': 'Extra Large instance',
'cpu': 4,
'memory': 4096,
'disk': 100,
'bandwidth': 10240,
},
}
class GandiNodeDriver(BaseGandiDriver, NodeDriver):
"""
Gandi node driver
"""
api_name = 'gandi'
friendly_name = 'Gandi.net'
website = 'http://www.gandi.net/'
country = 'FR'
type = Provider.GANDI
# TODO : which features to enable ?
features = {}
def __init__(self, *args, **kwargs):
"""
@inherits: :class:`NodeDriver.__init__`
"""
super(BaseGandiDriver, self).__init__(*args, **kwargs)
def _resource_info(self, type, id):
try:
obj = self.connection.request('hosting.%s.info' % type, int(id))
return obj.object
except Exception:
e = sys.exc_info()[1]
raise GandiException(1003, e)
return None
def _node_info(self, id):
return self._resource_info('vm', id)
def _volume_info(self, id):
return self._resource_info('disk', id)
# Generic methods for driver
def _to_node(self, vm):
return Node(
id=vm['id'],
name=vm['hostname'],
state=NODE_STATE_MAP.get(
vm['state'],
NodeState.UNKNOWN
),
public_ips=vm.get('ips', []),
private_ips=[],
driver=self,
extra={
'ai_active': vm.get('ai_active'),
'datacenter_id': vm.get('datacenter_id'),
'description': vm.get('description')
}
)
def _to_nodes(self, vms):
return [self._to_node(v) for v in vms]
def _to_volume(self, disk):
extra = {'can_snapshot': disk['can_snapshot']}
return StorageVolume(
id=disk['id'],
name=disk['name'],
size=int(disk['size']),
driver=self,
extra=extra)
def _to_volumes(self, disks):
return [self._to_volume(d) for d in disks]
def list_nodes(self):
vms = self.connection.request('hosting.vm.list').object
ips = self.connection.request('hosting.ip.list').object
for vm in vms:
vm['ips'] = []
for ip in ips:
if vm['ifaces_id'][0] == ip['iface_id']:
ip = ip.get('ip', None)
if ip:
vm['ips'].append(ip)
nodes = self._to_nodes(vms)
return nodes
def reboot_node(self, node):
op = self.connection.request('hosting.vm.reboot', int(node.id))
self._wait_operation(op.object['id'])
vm = self._node_info(int(node.id))
if vm['state'] == 'running':
return True
return False
def destroy_node(self, node):
vm = self._node_info(node.id)
if vm['state'] == 'running':
# Send vm_stop and wait for accomplish
op_stop = self.connection.request('hosting.vm.stop', int(node.id))
if not self._wait_operation(op_stop.object['id']):
raise GandiException(1010, 'vm.stop failed')
# Delete
op = self.connection.request('hosting.vm.delete', int(node.id))
if self._wait_operation(op.object['id']):
return True
return False
def deploy_node(self, **kwargs):
"""
deploy_node is not implemented for gandi driver
:rtype: ``bool``
"""
raise NotImplementedError(
'deploy_node not implemented for gandi driver')
def create_node(self, **kwargs):
"""
Create a new Gandi node
:keyword name: String with a name for this new node (required)
:type name: ``str``
:keyword image: OS Image to boot on node. (required)
:type image: :class:`NodeImage`
:keyword location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
:type location: :class:`NodeLocation`
:keyword size: The size of resources allocated to this node.
(required)
:type size: :class:`NodeSize`
:keyword login: user name to create for login on machine (required)
:type login: ``str``
:keyword password: password for user that'll be created (required)
:type password: ``str``
:keyword inet_family: version of ip to use, default 4 (optional)
:type inet_family: ``int``
:rtype: :class:`Node`
"""
if kwargs.get('login') is None or kwargs.get('password') is None:
raise GandiException(
1020, 'login and password must be defined for node creation')
location = kwargs.get('location')
if location and isinstance(location, NodeLocation):
dc_id = int(location.id)
else:
raise GandiException(
1021, 'location must be a subclass of NodeLocation')
size = kwargs.get('size')
if not size and not isinstance(size, NodeSize):
raise GandiException(
1022, 'size must be a subclass of NodeSize')
# If size name is in INSTANCE_TYPE we use new rating model
instance = INSTANCE_TYPES.get(size.id)
cores = instance['cpu'] if instance else int(size.id)
src_disk_id = int(kwargs['image'].id)
disk_spec = {
'datacenter_id': dc_id,
'name': 'disk_%s' % kwargs['name']
}
vm_spec = {
'datacenter_id': dc_id,
'hostname': kwargs['name'],
'login': kwargs['login'],
'password': kwargs['password'], # TODO : use NodeAuthPassword
'memory': int(size.ram),
'cores': cores,
'bandwidth': int(size.bandwidth),
'ip_version': kwargs.get('inet_family', 4),
}
# Call create_from helper api. Return 3 operations : disk_create,
# iface_create,vm_create
(op_disk, op_iface, op_vm) = self.connection.request(
'hosting.vm.create_from',
vm_spec, disk_spec, src_disk_id
).object
# We wait for vm_create to finish
if self._wait_operation(op_vm['id']):
# after successful operation, get ip information
# thru first interface
node = self._node_info(op_vm['vm_id'])
ifaces = node.get('ifaces')
if len(ifaces) > 0:
ips = ifaces[0].get('ips')
if len(ips) > 0:
node['ip'] = ips[0]['ip']
return self._to_node(node)
return None
def _to_image(self, img):
return NodeImage(
id=img['disk_id'],
name=img['label'],
driver=self.connection.driver
)
def list_images(self, location=None):
try:
if location:
filtering = {'datacenter_id': int(location.id)}
else:
filtering = {}
images = self.connection.request('hosting.image.list', filtering)
return [self._to_image(i) for i in images.object]
except Exception:
e = sys.exc_info()[1]
raise GandiException(1011, e)
def _to_size(self, id, size):
return NodeSize(
id=id,
name='%s cores' % id,
ram=size['memory'],
disk=size['disk'],
bandwidth=size['bandwidth'],
price=(self._get_size_price(size_id='1') * id),
driver=self.connection.driver,
)
def _instance_type_to_size(self, instance):
return NodeSize(
id=instance['id'],
name=instance['name'],
ram=instance['memory'],
disk=instance['disk'],
bandwidth=instance['bandwidth'],
price=self._get_size_price(size_id=instance['id']),
driver=self.connection.driver,
)
def list_instance_type(self, location=None):
return [self._instance_type_to_size(instance)
for name, instance in INSTANCE_TYPES.items()]
def list_sizes(self, location=None):
account = self.connection.request('hosting.account.info').object
if account.get('rating_enabled'):
# This account use new rating model
return self.list_instance_type(location)
# Look for available shares, and return a list of share_definition
available_res = account['resources']['available']
if available_res['shares'] == 0:
return None
else:
share_def = account['share_definition']
available_cores = available_res['cores']
# 0.75 core given when creating a server
max_core = int(available_cores + 0.75)
shares = []
if available_res['servers'] < 1:
# No server quota, no way
return shares
for i in range(1, max_core + 1):
share = {id: i}
share_is_available = True
for k in ['memory', 'disk', 'bandwidth']:
if share_def[k] * i > available_res[k]:
# We run out for at least one resource inside
share_is_available = False
else:
share[k] = share_def[k] * i
if share_is_available:
nb_core = i
shares.append(self._to_size(nb_core, share))
return shares
def _to_loc(self, loc):
return NodeLocation(
id=loc['id'],
name=loc['name'],
country=loc['country'],
driver=self
)
def list_locations(self):
res = self.connection.request('hosting.datacenter.list')
return [self._to_loc(l) for l in res.object]
def list_volumes(self):
"""
:rtype: ``list`` of :class:`StorageVolume`
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_volumes(res.object)
def create_volume(self, size, name, location=None, snapshot=None):
disk_param = {
'name': name,
'size': int(size),
'datacenter_id': int(location.id)
}
if snapshot:
op = self.connection.request('hosting.disk.create_from',
disk_param, int(snapshot.id))
else:
op = self.connection.request('hosting.disk.create', disk_param)
if self._wait_operation(op.object['id']):
disk = self._volume_info(op.object['disk_id'])
return self._to_volume(disk)
return None
def attach_volume(self, node, volume, device=None):
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def detach_volume(self, node, volume):
"""
Detaches a volume from a node.
:param node: Node which should be used
:type node: :class:`Node`
:param volume: Volume to be detached
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def destroy_volume(self, volume):
op = self.connection.request('hosting.disk.delete', int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def _to_iface(self, iface):
ips = []
for ip in iface.get('ips', []):
new_ip = IPAddress(
ip['id'],
NODE_STATE_MAP.get(
ip['state'],
NodeState.UNKNOWN
),
ip['ip'],
self.connection.driver,
version=ip.get('version'),
extra={'reverse': ip['reverse']}
)
ips.append(new_ip)
return NetworkInterface(
iface['id'],
NODE_STATE_MAP.get(
iface['state'],
NodeState.UNKNOWN
),
mac_address=None,
driver=self.connection.driver,
ips=ips,
node_id=iface.get('vm_id'),
extra={'bandwidth': iface['bandwidth']},
)
def _to_ifaces(self, ifaces):
return [self._to_iface(i) for i in ifaces]
def ex_list_interfaces(self):
"""
Specific method to list network interfaces
:rtype: ``list`` of :class:`GandiNetworkInterface`
"""
ifaces = self.connection.request('hosting.iface.list').object
ips = self.connection.request('hosting.ip.list').object
for iface in ifaces:
iface['ips'] = list(
filter(lambda i: i['iface_id'] == iface['id'], ips))
return self._to_ifaces(ifaces)
def _to_disk(self, element):
disk = Disk(
id=element['id'],
state=NODE_STATE_MAP.get(
element['state'],
NodeState.UNKNOWN
),
name=element['name'],
driver=self.connection.driver,
size=element['size'],
extra={'can_snapshot': element['can_snapshot']}
)
return disk
def _to_disks(self, elements):
return [self._to_disk(el) for el in elements]
def ex_list_disks(self):
"""
Specific method to list all disk
:rtype: ``list`` of :class:`GandiDisk`
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_disks(res.object)
def ex_node_attach_disk(self, node, disk):
"""
Specific method to attach a disk to a node
:param node: Node which should be used
:type node: :class:`Node`
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_disk(self, node, disk):
"""
Specific method to detach a disk from a node
:param node: Node which should be used
:type node: :class:`Node`
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_attach_interface(self, node, iface):
"""
Specific method to attach an interface to a node
:param node: Node which should be used
:type node: :class:`Node`
:param iface: Network interface which should be used
:type iface: :class:`GandiNetworkInterface`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.iface_attach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_interface(self, node, iface):
"""
Specific method to detach an interface from a node
:param node: Node which should be used
:type node: :class:`Node`
:param iface: Network interface which should be used
:type iface: :class:`GandiNetworkInterface`
:rtype: ``bool``
"""
op = self.connection.request('hosting.vm.iface_detach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_snapshot_disk(self, disk, name=None):
"""
Specific method to make a snapshot of a disk
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:param name: Name which should be used
:type name: ``str``
:rtype: ``bool``
"""
if not disk.extra.get('can_snapshot'):
raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id)
if not name:
suffix = datetime.today().strftime('%Y%m%d')
name = 'snap_%s' % (suffix)
op = self.connection.request(
'hosting.disk.create_from',
{'name': name, 'type': 'snapshot', },
int(disk.id),
)
if self._wait_operation(op.object['id']):
return True
return False
def ex_update_disk(self, disk, new_size=None, new_name=None):
"""Specific method to update size or name of a disk
WARNING: if a server is attached it'll be rebooted
:param disk: Disk which should be used
:type disk: :class:`GandiDisk`
:param new_size: New size
:type new_size: ``int``
:param new_name: New name
:type new_name: ``str``
:rtype: ``bool``
"""
params = {}
if new_size:
params.update({'size': new_size})
if new_name:
params.update({'name': new_name})
op = self.connection.request('hosting.disk.update',
int(disk.id),
params)
if self._wait_operation(op.object['id']):
return True
return False
| sgammon/libcloud | libcloud/compute/drivers/gandi.py | Python | apache-2.0 | 20,171 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.project.vpn import views
urlpatterns = patterns('openstack_dashboard.dashboards.project.vpn.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^addikepolicy$',
views.AddIKEPolicyView.as_view(), name='addikepolicy'),
url(r'^addipsecpolicy$',
views.AddIPSecPolicyView.as_view(), name='addipsecpolicy'),
url(r'^addipsecsiteconnection$',
views.AddIPSecSiteConnectionView.as_view(),
name='addipsecsiteconnection'),
url(r'^addvpnservice$',
views.AddVPNServiceView.as_view(), name='addvpnservice'),
url(r'^ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.IKEPolicyDetailsView.as_view(), name='ikepolicydetails'),
url(r'^ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.IPSecPolicyDetailsView.as_view(), name='ipsecpolicydetails'),
url(r'^vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.VPNServiceDetailsView.as_view(), name='vpnservicedetails'),
url(r'^ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.IPSecSiteConnectionDetailsView.as_view(),
name='ipsecsiteconnectiondetails'))
| Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/vpn/urls.py | Python | apache-2.0 | 1,933 |
import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.utcnow()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
updated_headers = self.update_headers(response)
if updated_headers:
response.headers.update(updated_headers)
warning_header_value = self.warning(response)
if warning_header_value is not None:
response.headers.update({'Warning': warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = expire_after(timedelta(days=1),
date=datetime(*date[:6]))
headers['expires'] = datetime_to_header(expires)
headers['cache-control'] = 'public'
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {
'expires': datetime_to_header(expires),
'cache-control': 'public',
}
def warning(self, response):
tmpl = '110 - Automatically cached for %s. Response might be stale'
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = set([
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
])
def update_headers(self, resp):
headers = resp.headers
if 'expires' in headers:
return {}
if 'cache-control' in headers and headers['cache-control'] != 'public':
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if 'date' not in headers or 'last-modified' not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers['date']))
last_modified = parsedate(headers['last-modified'])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None
| kushalbhola/MyStuff | venv/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py | Python | apache-2.0 | 4,144 |
"""Support for MySensors binary sensors."""
from homeassistant.components import mysensors
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES, DOMAIN, BinarySensorDevice)
from homeassistant.const import STATE_ON
SENSORS = {
'S_DOOR': 'door',
'S_MOTION': 'motion',
'S_SMOKE': 'smoke',
'S_SPRINKLER': 'safety',
'S_WATER_LEAK': 'safety',
'S_SOUND': 'sound',
'S_VIBRATION': 'vibration',
'S_MOISTURE': 'moisture',
}
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for binary sensors."""
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsBinarySensor,
async_add_entities=async_add_entities)
class MySensorsBinarySensor(
mysensors.device.MySensorsEntity, BinarySensorDevice):
"""Representation of a MySensors Binary Sensor child node."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._values.get(self.value_type) == STATE_ON
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
pres = self.gateway.const.Presentation
device_class = SENSORS.get(pres(self.child_type).name)
if device_class in DEVICE_CLASSES:
return device_class
return None
| jnewland/home-assistant | homeassistant/components/mysensors/binary_sensor.py | Python | apache-2.0 | 1,398 |
# sql/crud.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Functions used by compiler.py to determine the parameters rendered
within INSERT and UPDATE statements.
"""
from .. import util
from .. import exc
from . import dml
from . import elements
import operator
REQUIRED = util.symbol('REQUIRED', """
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`.Connection.execute`.
This symbol is typically used when a :func:`.expression.insert`
or :func:`.expression.update` statement is compiled without parameter
values present.
""")
ISINSERT = util.symbol('ISINSERT')
ISUPDATE = util.symbol('ISUPDATE')
ISDELETE = util.symbol('ISDELETE')
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (
restore_isinsert or restore_isupdate or restore_isdelete
) or len(compiler.stack) > 1
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
def _get_crud_params(compiler, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
compiler.postfetch = []
compiler.insert_prefetch = []
compiler.update_prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [
(c, _create_bind_param(
compiler, c, None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
_column_as_key, _getattr_col_key, _col_bind_name = \
_key_getters_for_crud_column(compiler, stmt)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
_get_stmt_parameters_params(
compiler,
parameters, stmt_parameters, _column_as_key, values, kw)
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if compiler.isupdate and stmt._extra_froms and stmt_parameters:
_get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw)
if compiler.isinsert and stmt.select_names:
_scan_insert_from_select_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
else:
_scan_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
if parameters and stmt_parameters:
check = set(parameters).intersection(
_column_as_key(k) for k in stmt_parameters
).difference(check_columns)
if check:
raise exc.CompileError(
"Unconsumed column names: %s" %
(", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values = _extend_values_for_multiparams(compiler, stmt, values, kw)
return values
def _create_bind_param(
compiler, col, value, process=True,
required=False, name=None, **kw):
if name is None:
name = col.key
bindparam = elements.BindParameter(
name, value, type_=col.type, required=required)
bindparam._is_crud = True
if process:
bindparam = bindparam._compiler_dispatch(compiler, **kw)
return bindparam
def _key_getters_for_crud_column(compiler, stmt):
if compiler.isupdate and stmt._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(stmt._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, 'table') and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _scan_insert_from_select_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
cols = [stmt.table.c[_column_as_key(name)]
for name in stmt.select_names]
compiler._insert_from_select = stmt.select
add_select_cols = []
if stmt.include_insert_from_select_defaults:
col_set = set(cols)
for col in stmt.table.columns:
if col not in col_set and col.default:
cols.append(col)
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
parameters.pop(col_key)
values.append((c, None))
else:
_append_param_insert_select_hasdefault(
compiler, stmt, c, add_select_cols, kw)
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
compiler._insert_from_select._raw_columns = \
tuple(compiler._insert_from_select._raw_columns) + tuple(
expr for col, expr in add_select_cols)
def _scan_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
if stmt._parameter_ordering:
parameter_ordering = [
_column_as_key(key) for key in stmt._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
stmt.table.c[key] for key in parameter_ordering
] + [
c for c in stmt.table.c if c.key not in ordered_keys
]
else:
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw)
elif compiler.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
_append_param_insert_pk_returning(
compiler, stmt, c, values, kw)
else:
_append_param_insert_pk(compiler, stmt, c, values, kw)
elif c.default is not None:
_append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults,
values, kw)
elif c.server_default is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif c.primary_key and \
c is not stmt.table._autoincrement_column and \
not c.nullable:
_warn_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw)
def _append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw):
value = parameters.pop(col_key)
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_m0" % _col_bind_name(c),
**kw
)
else:
if isinstance(value, elements.BindParameter) and \
value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
"""Create a primary key expression in the INSERT statement and
possibly a RETURNING clause for it.
If the column has a Python-side default, we will create a bound
parameter for it and "pre-execute" the Python function. If
the column has a SQL expression default, or is a sequence,
we will add it directly into the INSERT statement and add a
RETURNING element to get the new value. If the column has a
server side default or is marked as the "autoincrement" column,
we will add a RETRUNING element to get at the value.
If all the above tests fail, that indicates a primary key column with no
noted default generation capabilities that has no parameter passed;
raise an exception.
"""
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
compiler.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, compiler.process(
c.default.arg.self_group(), **kw))
)
compiler.returning.append(c)
else:
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c))
)
elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
elif not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _create_insert_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.insert_prefetch.append(c)
return param
def _create_update_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.update_prefetch.append(c)
return param
class _multiparam_column(elements.ColumnElement):
def __init__(self, original, index):
self.key = "%s_m%d" % (original.key, index + 1)
self.original = original
self.default = original.default
self.type = original.type
def __eq__(self, other):
return isinstance(other, _multiparam_column) and \
other.key == self.key and \
other.original == self.original
def _process_multiparam_default_bind(compiler, stmt, c, index, kw):
if not c.default:
raise exc.CompileError(
"INSERT value for column %s is explicitly rendered as a bound"
"parameter in the VALUES clause; "
"a Python-side value or SQL expression is required" % c)
elif c.default.is_clause_element:
return compiler.process(c.default.arg.self_group(), **kw)
else:
col = _multiparam_column(c, index)
if isinstance(stmt, dml.Insert):
return _create_insert_prefetch_bind_param(compiler, col)
else:
return _create_update_prefetch_bind_param(compiler, col)
def _append_param_insert_pk(compiler, stmt, c, values, kw):
"""Create a bound parameter in the INSERT statement to receive a
'prefetched' default value.
The 'prefetched' value indicates that we are to invoke a Python-side
default function or expliclt SQL expression before the INSERT statement
proceeds, so that we have a primary key value available.
if the column has no noted default generation capabilities, it has
no value passed in either; raise an exception.
"""
if (
(
# column has a Python-side default
c.default is not None and
(
# and it won't be a Sequence
not c.default.is_sequence or
compiler.dialect.supports_sequences
)
)
or
(
# column is the "autoincrement column"
c is stmt.table._autoincrement_column and
(
# and it's either a "sequence" or a
# pre-executable "autoincrement" sequence
compiler.dialect.supports_sequences or
compiler.dialect.preexecute_autoincrement_sequences
)
)
):
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c))
)
elif c.default is None and c.server_default is None and not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif c.default.is_clause_element:
proc = compiler.process(c.default.arg.self_group(), **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
compiler.postfetch.append(c)
else:
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c))
)
def _append_param_insert_select_hasdefault(
compiler, stmt, c, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = c.default
values.append((c, proc))
elif c.default.is_clause_element:
proc = c.default.arg.self_group()
values.append((c, proc))
else:
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c, process=False))
)
def _append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
else:
values.append(
(c, _create_update_prefetch_bind_param(compiler, c))
)
elif c.server_onupdate is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
stmt._return_defaults is not True and \
c in implicit_return_defaults:
compiler.returning.append(c)
def _get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw):
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c))
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually to be updated - process onupdate
# and server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif (c.onupdate is not None and not
c.onupdate.is_sequence):
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(),
**kw)
)
)
compiler.postfetch.append(c)
else:
values.append(
(c, _create_update_prefetch_bind_param(
compiler, c, name=_col_bind_name(c)))
)
elif c.server_onupdate is not None:
compiler.postfetch.append(c)
def _extend_values_for_multiparams(compiler, stmt, values, kw):
values_0 = values
values = [values]
values.extend(
[
(
c,
(_create_bind_param(
compiler, c, row[c.key],
name="%s_m%d" % (c.key, i + 1)
) if elements._is_literal(row[c.key])
else compiler.process(
row[c.key].self_group(), **kw))
if c.key in row else
_process_multiparam_default_bind(compiler, stmt, c, i, kw)
)
for (c, param) in values_0
]
for i, row in enumerate(stmt.parameters[1:])
)
return values
def _get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw):
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = compiler.process(
elements.BindParameter(None, v, type_=k.type),
**kw)
else:
v = compiler.process(v.self_group(), **kw)
values.append((k, v))
def _get_returning_modifiers(compiler, stmt):
need_pks = compiler.isinsert and \
not compiler.inline and \
not stmt._returning and \
not stmt._has_multi_parameters
implicit_returning = need_pks and \
compiler.dialect.implicit_returning and \
stmt.table.implicit_returning
if compiler.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
elif compiler.isupdate:
implicit_return_defaults = (compiler.dialect.implicit_returning and
stmt.table.implicit_returning and
stmt._return_defaults)
else:
# this line is unused, currently we are always
# isinsert or isupdate
implicit_return_defaults = False # pragma: no cover
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid
return need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid
def _warn_pk_with_no_anticipated_value(c):
msg = (
"Column '%s.%s' is marked as a member of the "
"primary key for table '%s', "
"but has no Python-side or server-side default generator indicated, "
"nor does it indicate 'autoincrement=True' or 'nullable=True', "
"and no explicit value is passed. "
"Primary key columns typically may not store NULL."
%
(c.table.fullname, c.name, c.table.fullname))
if len(c.table.primary_key) > 1:
msg += (
" Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
"indicated explicitly for composite (e.g. multicolumn) primary "
"keys if AUTO_INCREMENT/SERIAL/IDENTITY "
"behavior is expected for one of the columns in the primary key. "
"CREATE TABLE statements are impacted by this change as well on "
"most backends.")
util.warn(msg)
| pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/sqlalchemy/sql/crud.py | Python | mit | 24,802 |
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
(declare-fun x1 () Real)
(declare-fun x2 () Real)
(declare-fun x3 () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_2 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_3 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_4 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_5 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_6 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_7 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_8 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
(declare-fun mode1_{0} () Bool)
(declare-fun x1_{0}_0 () Real)
(declare-fun x1_{0}_t () Real)
(declare-fun mode2_{0} () Bool)
(declare-fun x2_{0}_0 () Real)
(declare-fun x2_{0}_t () Real)
(declare-fun mode3_{0} () Bool)
(declare-fun x3_{0}_0 () Real)
(declare-fun x3_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 1))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1))
(assert (<= 0 x1_{0}_0)) (assert (<= x1_{0}_0 10))
(assert (<= 0 x1_{0}_t)) (assert (<= x1_{0}_t 10))
(assert (<= 0 x2_{0}_0)) (assert (<= x2_{0}_0 10))
(assert (<= 0 x2_{0}_t)) (assert (<= x2_{0}_t 10))
(assert (<= 0 x3_{0}_0)) (assert (<= x3_{0}_0 10))
(assert (<= 0 x3_{0}_t)) (assert (<= x3_{0}_t 10))
"""
cont_cond[0] = ["""
(assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1)
(>= tau_{0}_t 0) (<= tau_{0}_t 1)
(forall_t 1 [0 time_{0}] (>= tau_{0}_t 0))
(forall_t 2 [0 time_{0}] (<= tau_{0}_t 1))))
(assert (or (and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_1)))
(and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_2)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_3)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_4)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_5)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_6)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_7)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_8)))))"""]
jump_cond[0] = ["""
(assert (and (= tau_{0}_t 1) (= tau_{1}_0 0)))
(assert (and (= x1_{1}_0 x1_{0}_t)))
(assert (or (and (< x1_{0}_t 5) (= mode1_{1} true))
(and (>= x1_{0}_t 5) (= mode1_{1} false))))
(assert (and (= x2_{1}_0 x2_{0}_t)))
(assert (or (and (< x2_{0}_t 5) (= mode2_{1} true))
(and (>= x2_{0}_t 5) (= mode2_{1} false))))
(assert (and (= x3_{1}_0 x3_{0}_t)))
(assert (or (and (< x3_{0}_t 5) (= mode3_{1} true))
(and (>= x3_{0}_t 5) (= mode3_{1} false))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (< 0.99 tau_{0}_0))
(assert
(and (> x1_{0}_0 (- 5 4)) (< x1_{0}_0 (+ 5 4))
(> x2_{0}_0 (- 5 4)) (< x2_{0}_0 (+ 5 4))
(> x3_{0}_0 (- 5 4)) (< x3_{0}_0 (+ 5 4))))
"""
goal_cond = """
(assert (< 0.99 tau_{0}_t))
(assert (not
(and (> x1_{0}_t (- 5 4)) (< x1_{0}_t (+ 5 4))
(> x2_{0}_t (- 5 4)) (< x2_{0}_t (+ 5 4))
(> x3_{0}_t (- 5 4)) (< x3_{0}_t (+ 5 4)))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0], 0, init_cond, goal_cond)
| wolvre/dreal | benchmarks/network/water/water-triple-ind.py | Python | gpl-2.0 | 6,830 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Daniel Hokka Zakrisson <[email protected]>
# (c) 2014, Ahti Kitsik <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lineinfile
author:
- "Daniel Hokka Zakrissoni (@dhozac)"
- "Ahti Kitsik (@ahtik)"
extends_documentation_fragment:
- files
- validate
short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in
a file only. See the M(replace) module if you want to change
multiple, similar lines or check M(blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(copy) or M(template) modules.
version_added: "0.7"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: false
version_added: 1.7
description:
- The regular expression to look for in every line of the file. For
C(state=present), the pattern to replace if found; only the last line
found will be replaced. For C(state=absent), the pattern of the line
to remove. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
state:
required: false
choices: [ present, absent ]
default: "present"
aliases: []
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the
file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- Used with C(state=present). If set, line can contain backreferences
(both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly;
C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expression has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expression has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. By default it will fail if the file
is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- lineinfile:
dest: /etc/selinux/config
regexp: '^SELINUX='
line: 'SELINUX=enforcing'
- lineinfile:
dest: /etc/sudoers
state: absent
regexp: '^%wheel'
- lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
owner: root
group: root
mode: 0644
- lineinfile:
dest: /etc/httpd/conf/httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: 'Listen 8080'
- lineinfile:
dest: /etc/services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
# Add a line to a file if it does not exist, without passing regexp
- lineinfile:
dest: /tmp/testfile
line: '192.168.1.99 foo.lab.net foo'
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "
dest: /etc/sudoers
state: present
regexp: '^%wheel'
line: '%wheel ALL=(ALL) NOPASSWD: ALL'
- lineinfile:
dest: /opt/jboss-as/bin/standalone.conf
regexp: '^(.*)Xms(\d+)m(.*)$'
line: '\1Xms${xms}m\3'
backrefs: yes
# Validate the sudoers file before saving
- lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%ADMIN ALL='
line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
"""
import re
import os
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
from ansible.module_utils._text import to_bytes, to_native
def write_changes(module, b_lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.writelines(b_lines)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile,
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if not os.path.exists(b_destpath) and not module.check_mode:
os.makedirs(b_destpath)
b_lines = []
else:
f = open(b_dest, 'rb')
b_lines = f.readlines()
f.close()
if module._diff:
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/inserbefore has been found
index = [-1, -1]
m = None
b_line = to_bytes(line, errors='surrogate_or_strict')
for lineno, b_cur_line in enumerate(b_lines):
if regexp is not None:
match_found = bre_m.search(b_cur_line)
else:
match_found = b_line == b_cur_line.rstrip(b('\r\n'))
if match_found:
index[0] = lineno
m = match_found
elif bre_ins is not None and bre_ins.search(b_cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if insertbefore:
# + 1 for the previous line
index[1] = lineno
msg = ''
changed = False
# Regexp matched a line in the file
b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
if index[0] != -1:
if backrefs:
b_new_line = m.expand(b_line)
else:
# Don't do backref expansion if not asked.
b_new_line = b_line
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
if b_lines[index[0]] != b_new_line:
b_lines[index[0]] = b_new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
b_lines.insert(0, b_line + b_linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if len(b_lines) > 0 and not b_lines[-1][-1:] in (b('\n'), b('\r')):
b_lines.append(b_linesep)
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
# insert* matched, but not the regexp
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = to_native(b('').join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def absent(module, dest, regexp, line, backup):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
module.exit_json(changed=False, msg="file not present")
msg = ''
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
f = open(b_dest, 'rb')
b_lines = f.readlines()
f.close()
if module._diff:
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
found = []
b_line = to_bytes(line, errors='surrogate_or_strict')
def matcher(b_cur_line):
if regexp is not None:
match_found = bre_c.search(b_cur_line)
else:
match_found = b_line == b_cur_line.rstrip(b('\r\n'))
if match_found:
found.append(b_cur_line)
return not match_found
b_lines = [l for l in b_lines if matcher(l)]
changed = len(found) > 0
if module._diff:
diff['after'] = to_native(b('').join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if changed:
msg = "%s line(s) removed" % len(found)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
regexp=dict(default=None),
line=dict(aliases=['value']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
backrefs=dict(default=False, type='bool'),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
dest = params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.isdir(b_dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if params['state'] == 'present':
if backrefs and params['regexp'] is None:
module.fail_json(msg='regexp= is required with backrefs=true')
if params.get('line', None) is None:
module.fail_json(msg='line= is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
line = params['line']
present(module, dest, params['regexp'], line,
ins_aft, ins_bef, create, backup, backrefs)
else:
if params['regexp'] is None and params.get('line', None) is None:
module.fail_json(msg='one of line= or regexp= is required with state=absent')
absent(module, dest, params['regexp'], params.get('line', None), backup)
if __name__ == '__main__':
main()
| chrismeyersfsu/ansible-modules-core | files/lineinfile.py | Python | gpl-3.0 | 15,603 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Matt Martz <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
author: 'Matt Martz (@sivel)'
short_description: 'Deploys a VMware virtual machine from an OVF or OVA file'
description:
- 'This module can be used to deploy a VMware VM from an OVF or OVA file'
module: vmware_deploy_ovf
notes: []
options:
allow_duplicates:
default: "yes"
description:
- Whether or not to allow duplicate VM names. ESXi allows duplicates, vCenter may not.
type: bool
datacenter:
default: ha-datacenter
description:
- Datacenter to deploy to.
type: str
cluster:
description:
- Cluster to deploy to.
type: str
datastore:
default: datastore1
description:
- Datastore to deploy to.
- "You can also specify datastore storage cluster. version_added: 2.9"
type: str
deployment_option:
description:
- The key of the chosen deployment option.
type: str
disk_provisioning:
choices:
- flat
- eagerZeroedThick
- monolithicSparse
- twoGbMaxExtentSparse
- twoGbMaxExtentFlat
- thin
- sparse
- thick
- seSparse
- monolithicFlat
default: thin
description:
- Disk provisioning type.
type: str
fail_on_spec_warnings:
description:
- Cause the module to treat OVF Import Spec warnings as errors.
default: "no"
type: bool
folder:
description:
- Absolute path of folder to place the virtual machine.
- If not specified, defaults to the value of C(datacenter.vmFolder).
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
inject_ovf_env:
description:
- Force the given properties to be inserted into an OVF Environment and injected through VMware Tools.
version_added: "2.8"
type: bool
name:
description:
- Name of the VM to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic.
type: str
networks:
default:
VM Network: VM Network
description:
- 'C(key: value) mapping of OVF network name, to the vCenter network name.'
type: dict
ovf:
description:
- 'Path to OVF or OVA file to deploy.'
aliases:
- ova
power_on:
default: true
description:
- 'Whether or not to power on the virtual machine after creation.'
type: bool
properties:
description:
- The assignment of values to the properties found in the OVF as key value pairs.
type: dict
resource_pool:
default: Resources
description:
- Resource Pool to deploy to.
type: str
wait:
default: true
description:
- 'Wait for the host to power on.'
type: bool
wait_for_ip_address:
default: false
description:
- Wait until vCenter detects an IP address for the VM.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
type: bool
requirements:
- pyvmomi
version_added: "2.7"
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- vmware_deploy_ovf:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
ovf: /path/to/ubuntu-16.04-amd64.ovf
wait_for_ip_address: true
delegate_to: localhost
# Deploys a new VM named 'NewVM' in specific datacenter/cluster, with network mapping taken from variable and using ova template from an absolute path
- vmware_deploy_ovf:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: Datacenter1
cluster: Cluster1
datastore: vsandatastore
name: NewVM
networks: "{u'VM Network':u'{{ ProvisioningNetworkLabel }}'}"
validate_certs: no
power_on: no
ovf: /absolute/path/to/template/mytemplate.ova
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import io
import os
import sys
import tarfile
import time
import traceback
import xml.etree.ElementTree as ET
from threading import Thread
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse
from ansible.module_utils.vmware import (find_network_by_name, find_vm_by_name, PyVmomi,
gather_vm_facts, vmware_argument_spec, wait_for_task, wait_for_vm_ip)
try:
from ansible.module_utils.vmware import vim
from pyVmomi import vmodl
except ImportError:
pass
def path_exists(value):
if not isinstance(value, string_types):
value = str(value)
value = os.path.expanduser(os.path.expandvars(value))
if not os.path.exists(value):
raise ValueError('%s is not a valid path' % value)
return value
class ProgressReader(io.FileIO):
def __init__(self, name, mode='r', closefd=True):
self.bytes_read = 0
io.FileIO.__init__(self, name, mode=mode, closefd=closefd)
def read(self, size=10240):
chunk = io.FileIO.read(self, size)
self.bytes_read += len(chunk)
return chunk
class TarFileProgressReader(tarfile.ExFileObject):
def __init__(self, *args):
self.bytes_read = 0
tarfile.ExFileObject.__init__(self, *args)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except Exception:
pass
def read(self, size=10240):
chunk = tarfile.ExFileObject.read(self, size)
self.bytes_read += len(chunk)
return chunk
class VMDKUploader(Thread):
def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False):
Thread.__init__(self)
self.vmdk = vmdk
if tarinfo:
self.size = tarinfo.size
else:
self.size = os.stat(vmdk).st_size
self.url = url
self.validate_certs = validate_certs
self.tarinfo = tarinfo
self.f = None
self.e = None
self._create = create
@property
def bytes_read(self):
try:
return self.f.bytes_read
except AttributeError:
return 0
def _request_opts(self):
'''
Requests for vmdk files differ from other file types. Build the request options here to handle that
'''
headers = {
'Content-Length': self.size,
'Content-Type': 'application/octet-stream',
}
if self._create:
# Non-VMDK
method = 'PUT'
headers['Overwrite'] = 't'
else:
# VMDK
method = 'POST'
headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk'
return {
'method': method,
'headers': headers,
}
def _open_url(self):
open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts())
def run(self):
if self.tarinfo:
try:
with TarFileProgressReader(self.vmdk, self.tarinfo) as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
else:
try:
with ProgressReader(self.vmdk, 'rb') as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
class VMwareDeployOvf(PyVmomi):
def __init__(self, module):
super(VMwareDeployOvf, self).__init__(module)
self.module = module
self.params = module.params
self.datastore = None
self.datacenter = None
self.resource_pool = None
self.network_mappings = []
self.ovf_descriptor = None
self.tar = None
self.lease = None
self.import_spec = None
self.entity = None
def get_objects(self):
self.datacenter = self.find_datacenter_by_name(self.params['datacenter'])
if not self.datacenter:
self.module.fail_json(msg='%(datacenter)s could not be located' % self.params)
self.datastore = None
datastore_cluster_obj = self.find_datastore_cluster_by_name(self.params['datastore'])
if datastore_cluster_obj:
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
if ds.summary.maintenanceMode != 'normal' or not ds.summary.accessible:
continue
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
self.datastore = datastore
else:
self.datastore = self.find_datastore_by_name(self.params['datastore'], self.datacenter)
if not self.datastore:
self.module.fail_json(msg='%(datastore)s could not be located' % self.params)
if self.params['cluster']:
resource_pools = []
cluster = self.find_cluster_by_name(self.params['cluster'], datacenter_name=self.datacenter)
if cluster is None:
self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
self.resource_pool = self.find_resource_pool_by_cluster(self.params['resource_pool'], cluster=cluster)
else:
self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool'])
if not self.resource_pool:
self.module.fail_json(msg='%(resource_pool)s could not be located' % self.params)
for key, value in self.params['networks'].items():
network = find_network_by_name(self.content, value)
if not network:
self.module.fail_json(msg='%(network)s could not be located' % self.params)
network_mapping = vim.OvfManager.NetworkMapping()
network_mapping.name = key
network_mapping.network = network
self.network_mappings.append(network_mapping)
return self.datastore, self.datacenter, self.resource_pool, self.network_mappings
def get_ovf_descriptor(self):
if tarfile.is_tarfile(self.params['ovf']):
self.tar = tarfile.open(self.params['ovf'])
ovf = None
for candidate in self.tar.getmembers():
dummy, ext = os.path.splitext(candidate.name)
if ext.lower() == '.ovf':
ovf = candidate
break
if not ovf:
self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params)
self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read())
else:
with open(self.params['ovf']) as f:
self.ovf_descriptor = f.read()
return self.ovf_descriptor
def get_lease(self):
datastore, datacenter, resource_pool, network_mappings = self.get_objects()
params = {
'diskProvisioning': self.params['disk_provisioning'],
}
if self.params['name']:
params['entityName'] = self.params['name']
if network_mappings:
params['networkMapping'] = network_mappings
if self.params['deployment_option']:
params['deploymentOption'] = self.params['deployment_option']
if self.params['properties']:
params['propertyMapping'] = []
for key, value in self.params['properties'].items():
property_mapping = vim.KeyValue()
property_mapping.key = key
property_mapping.value = str(value) if isinstance(value, bool) else value
params['propertyMapping'].append(property_mapping)
if self.params['folder']:
folder = self.content.searchIndex.FindByInventoryPath(self.params['folder'])
if not folder:
self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params)
else:
folder = datacenter.vmFolder
spec_params = vim.OvfManager.CreateImportSpecParams(**params)
ovf_descriptor = self.get_ovf_descriptor()
self.import_spec = self.content.ovfManager.CreateImportSpec(
ovf_descriptor,
resource_pool,
datastore,
spec_params
)
errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])]
if self.params['fail_on_spec_warnings']:
errors.extend(
(to_native(w.msg) for w in getattr(self.import_spec, 'warning', []))
)
if errors:
self.module.fail_json(
msg='Failure validating OVF import spec: %s' % '. '.join(errors)
)
for warning in getattr(self.import_spec, 'warning', []):
self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg))
if not self.params['allow_duplicates']:
name = self.import_spec.importSpec.configSpec.name
match = find_vm_by_name(self.content, name, folder=folder)
if match:
self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False)
if self.module.check_mode:
self.module.exit_json(changed=True, instance={'hw_name': name})
try:
self.lease = resource_pool.ImportVApp(
self.import_spec.importSpec,
folder
)
except vmodl.fault.SystemError as e:
self.module.fail_json(
msg='Failed to start import: %s' % to_native(e.msg)
)
while self.lease.state != vim.HttpNfcLease.State.ready:
time.sleep(0.1)
self.entity = self.lease.info.entity
return self.lease, self.import_spec
def _normalize_url(self, url):
'''
The hostname in URLs from vmware may be ``*`` update it accordingly
'''
url_parts = generic_urlparse(urlparse(url))
if url_parts.hostname == '*':
if url_parts.port:
url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port)
else:
url_parts.netloc = self.params['hostname']
return urlunparse(url_parts.as_list())
def upload(self):
if self.params['ovf'] is None:
self.module.fail_json(msg="OVF path is required for upload operation.")
ovf_dir = os.path.dirname(self.params['ovf'])
lease, import_spec = self.get_lease()
uploaders = []
for file_item in import_spec.fileItem:
device_upload_url = None
for device_url in lease.info.deviceUrl:
if file_item.deviceId == device_url.importKey:
device_upload_url = self._normalize_url(device_url.url)
break
if not device_upload_url:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
)
self.module.fail_json(
msg='Failed to find deviceUrl for file %s' % file_item.path
)
vmdk_tarinfo = None
if self.tar:
vmdk = self.tar
try:
vmdk_tarinfo = self.tar.getmember(file_item.path)
except KeyError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
)
self.module.fail_json(
msg='Failed to find VMDK file %s in OVA' % file_item.path
)
else:
vmdk = os.path.join(ovf_dir, file_item.path)
try:
path_exists(vmdk)
except ValueError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
)
self.module.fail_json(
msg='Failed to find VMDK file at %s' % vmdk
)
uploaders.append(
VMDKUploader(
vmdk,
device_upload_url,
self.params['validate_certs'],
tarinfo=vmdk_tarinfo,
create=file_item.create
)
)
total_size = sum(u.size for u in uploaders)
total_bytes_read = [0] * len(uploaders)
for i, uploader in enumerate(uploaders):
uploader.start()
while uploader.is_alive():
time.sleep(0.1)
total_bytes_read[i] = uploader.bytes_read
lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))
if uploader.e:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
)
self.module.fail_json(
msg='%s' % to_native(uploader.e[1]),
exception=''.join(traceback.format_tb(uploader.e[2]))
)
def complete(self):
self.lease.HttpNfcLeaseComplete()
def inject_ovf_env(self):
attrib = {
'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
'oe:id': '',
've:esxId': self.entity._moId
}
env = ET.Element('Environment', **attrib)
platform = ET.SubElement(env, 'PlatformSection')
ET.SubElement(platform, 'Kind').text = self.content.about.name
ET.SubElement(platform, 'Version').text = self.content.about.version
ET.SubElement(platform, 'Vendor').text = self.content.about.vendor
ET.SubElement(platform, 'Locale').text = 'US'
prop_section = ET.SubElement(env, 'PropertySection')
for key, value in self.params['properties'].items():
params = {
'oe:key': key,
'oe:value': str(value) if isinstance(value, bool) else value
}
ET.SubElement(prop_section, 'Property', **params)
opt = vim.option.OptionValue()
opt.key = 'guestinfo.ovfEnv'
opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(ET.tostring(env))
config_spec = vim.vm.ConfigSpec()
config_spec.extraConfig = [opt]
task = self.entity.ReconfigVM_Task(config_spec)
wait_for_task(task)
def deploy(self):
facts = {}
if self.params['inject_ovf_env']:
self.inject_ovf_env()
if self.params['power_on']:
task = self.entity.PowerOn()
if self.params['wait']:
wait_for_task(task)
if self.params['wait_for_ip_address']:
_facts = wait_for_vm_ip(self.content, self.entity)
if not _facts:
self.module.fail_json(msg='Waiting for IP address timed out')
facts.update(_facts)
if not facts:
facts.update(gather_vm_facts(self.content, self.entity))
return facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update({
'name': {},
'datastore': {
'default': 'datastore1',
},
'datacenter': {
'default': 'ha-datacenter',
},
'cluster': {
'default': None,
},
'deployment_option': {
'default': None,
},
'folder': {
'default': None,
},
'inject_ovf_env': {
'default': False,
'type': 'bool',
},
'resource_pool': {
'default': 'Resources',
},
'networks': {
'default': {
'VM Network': 'VM Network',
},
'type': 'dict',
},
'ovf': {
'type': path_exists,
'aliases': ['ova'],
},
'disk_provisioning': {
'choices': [
'flat',
'eagerZeroedThick',
'monolithicSparse',
'twoGbMaxExtentSparse',
'twoGbMaxExtentFlat',
'thin',
'sparse',
'thick',
'seSparse',
'monolithicFlat'
],
'default': 'thin',
},
'power_on': {
'type': 'bool',
'default': True,
},
'properties': {
'type': 'dict',
},
'wait': {
'type': 'bool',
'default': True,
},
'wait_for_ip_address': {
'type': 'bool',
'default': False,
},
'allow_duplicates': {
'type': 'bool',
'default': True,
},
'fail_on_spec_warnings': {
'type': 'bool',
'default': False,
},
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
deploy_ovf = VMwareDeployOvf(module)
deploy_ovf.upload()
deploy_ovf.complete()
facts = deploy_ovf.deploy()
module.exit_json(instance=facts, changed=True)
if __name__ == '__main__':
main()
| roadmapper/ansible | lib/ansible/modules/cloud/vmware/vmware_deploy_ovf.py | Python | gpl-3.0 | 22,956 |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.security_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
class SecurityGroup(Watcher):
index = 'securitygroup'
i_am_singular = 'Security Group'
i_am_plural = 'Security Groups'
def __init__(self, accounts=None, debug=False):
super(SecurityGroup, self).__init__(accounts=accounts, debug=debug)
# TODO: grab those from DB
self.instance_detail = app.config.get("SECURITYGROUP_INSTANCE_DETAIL", 'FULL')
self.honor_ephemerals = True
self.ephemeral_paths = ["assigned_to"]
def get_detail_level(self):
""" Return details level: 'NONE' / 'SUMMARY' / 'FULL' """
if self.instance_detail:
return self.instance_detail
else:
return 'NONE'
def slurp(self):
"""
:returns: item_list - list of Security Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
ec2 = connect(account, 'ec2')
regions = ec2.get_all_regions()
except Exception as e: # EC2ResponseError
# Some Accounts don't subscribe to EC2 and will throw an exception here.
exc = BotoConnectionIssue(str(e), self.index, account, None)
self.slurp_exception((self.index, account), exc, exception_map)
continue
for region in regions:
app.logger.debug("Checking {}/{}/{}".format(self.index, account, region.name))
try:
rec2 = connect(account, 'ec2', region=region)
# Retrieve security groups here
sgs = self.wrap_aws_rate_limited_call(
rec2.get_all_security_groups
)
if self.get_detail_level() != 'NONE':
# We fetch tags here to later correlate instances
tags = self.wrap_aws_rate_limited_call(
rec2.get_all_tags
)
# Retrieve all instances
instances = self.wrap_aws_rate_limited_call(
rec2.get_only_instances
)
app.logger.info("Number of instances found in region {}: {}".format(region.name, len(instances)))
except Exception as e:
if region.name not in TROUBLE_REGIONS:
exc = BotoConnectionIssue(str(e), self.index, account, region.name)
self.slurp_exception((self.index, account, region.name), exc, exception_map)
continue
app.logger.debug("Found {} {}".format(len(sgs), self.i_am_plural))
if self.get_detail_level() != 'NONE':
app.logger.info("Creating mapping of sg_id's to instances")
# map sgid => instance
sg_instances = {}
for instance in instances:
for group in instance.groups:
if group.id not in sg_instances:
sg_instances[group.id] = [instance]
else:
sg_instances[group.id].append(instance)
app.logger.info("Creating mapping of instance_id's to tags")
# map instanceid => tags
instance_tags = {}
for tag in tags:
if tag.res_id not in instance_tags:
instance_tags[tag.res_id] = [tag]
else:
instance_tags[tag.res_id].append(tag)
app.logger.info("Done creating mappings")
for sg in sgs:
if self.check_ignore_list(sg.name):
continue
item_config = {
"id": sg.id,
"name": sg.name,
"description": sg.description,
"vpc_id": sg.vpc_id,
"owner_id": sg.owner_id,
"region": sg.region.name,
"rules": [],
"assigned_to": None
}
for rule in sg.rules:
for grant in rule.grants:
rule_config = {
"ip_protocol": rule.ip_protocol,
"from_port": rule.from_port,
"to_port": rule.to_port,
"cidr_ip": grant.cidr_ip,
"group_id": grant.group_id,
"name": grant.name,
"owner_id": grant.owner_id
}
item_config['rules'].append(rule_config)
item_config['rules'] = sorted(item_config['rules'])
if self.get_detail_level() == 'SUMMARY':
if sg.id in sg_instances:
item_config["assigned_to"] = "{} instances".format(len(sg_instances[sg.id]))
else:
item_config["assigned_to"] = "0 instances"
elif self.get_detail_level() == 'FULL':
assigned_to = []
if sg.id in sg_instances:
for instance in sg_instances[sg.id]:
if instance.id in instance_tags:
tagdict = {tag.name: tag.value for tag in instance_tags[instance.id]}
tagdict["instance_id"] = instance.id
else:
tagdict = {"instance_id": instance.id}
assigned_to.append(tagdict)
item_config["assigned_to"] = assigned_to
# Issue 40: Security Groups can have a name collision between EC2 and
# VPC or between different VPCs within a given region.
if sg.vpc_id:
sg_name = "{0} ({1} in {2})".format(sg.name, sg.id, sg.vpc_id)
else:
sg_name = "{0} ({1})".format(sg.name, sg.id)
item = SecurityGroupItem(region=region.name, account=account, name=sg_name, config=item_config)
item_list.append(item)
return item_list, exception_map
class SecurityGroupItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, config={}):
super(SecurityGroupItem, self).__init__(
index=SecurityGroup.index,
region=region,
account=account,
name=name,
new_config=config)
| vijaykumar0690/security_monkey | security_monkey/watchers/security_group.py | Python | apache-2.0 | 8,192 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_ssl_policy_facts
description:
- Gather facts for GCP SslPolicy
short_description: Gather facts for GCP SslPolicy
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a ssl policy facts
gcp_compute_ssl_policy_facts:
filters:
- name = test_object
project: test_project
auth_kind: service_account
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
creation_timestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
profile:
description:
- Profile specifies the set of SSL features that can be used by the load balancer
when negotiating SSL with clients. This can be one of `COMPATIBLE`, `MODERN`, `RESTRICTED`,
or `CUSTOM`. If using `CUSTOM`, the set of SSL features to enable must be specified
in the `customFeatures` field.
returned: success
type: str
min_tls_version:
description:
- The minimum version of SSL protocol that can be used by the clients to establish
a connection with the load balancer. This can be one of `TLS_1_0`, `TLS_1_1`, `TLS_1_2`.
returned: success
type: str
enabled_features:
description:
- The list of features enabled in the SSL policy.
returned: success
type: list
custom_features:
description:
- A list of features enabled when the selected profile is CUSTOM. The method returns
the set of features that can be specified in this list. This field must be empty
if the profile is not CUSTOM.
returned: success
type: list
fingerprint:
description:
- Fingerprint of this resource. A hash of the contents stored in this object. This
field is used in optimistic locking.
returned: success
type: str
warnings:
description:
- If potential misconfigurations are detected for this SSL policy, this field will
be populated with warning messages.
returned: success
type: complex
contains:
code:
description:
- A warning code, if applicable.
returned: success
type: str
message:
description:
- A human-readable description of the warning code.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
filters=dict(type='list', elements='str'),
)
)
if 'scopes' not in module.params:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslPolicies".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| caphrim007/ansible | lib/ansible/modules/cloud/google/gcp_compute_ssl_policy_facts.py | Python | gpl-3.0 | 7,795 |
import utils
import os
import shutil
import sys
def go( boost_root ):
OUTPUT = "src/third_party/boost"
if os.path.exists( OUTPUT ):
shutil.rmtree( OUTPUT )
cmd = [ "bcp" , "--scan" , "--boost=%s" % boost_root ]
src = utils.getAllSourceFiles()
cmd += src
cmd.append( OUTPUT )
if not os.path.exists( OUTPUT ):
os.makedirs( OUTPUT )
res = utils.execsys( cmd )
out = open( OUTPUT + "/bcp-out.txt" , 'w' )
out.write( res[0] )
out.close()
out = open( OUTPUT + "/notes.txt" , 'w' )
out.write( "command: " + " ".join( cmd ) )
out.close()
print( res[1] )
if __name__ == "__main__":
if len(sys.argv) == 1:
print( "usage: python %s <boost root directory>" % sys.argv[0] )
sys.exit(1)
go( sys.argv[1] )
| robotpilot/robomongo | src/third-party/mongodb/buildscripts/bcp.py | Python | gpl-3.0 | 824 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
js_to_json,
)
class FKTVIE(InfoExtractor):
IE_NAME = 'fernsehkritik.tv'
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<id>[0-9]+)(?:/.*)?'
_TEST = {
'url': 'http://fernsehkritik.tv/folge-1',
'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79',
'info_dict': {
'id': '1',
'ext': 'mp4',
'title': 'Folge 1 vom 10. April 2007',
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
episode = self._match_id(url)
webpage = self._download_webpage(
'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
title = clean_html(self._html_search_regex(
'<h3>([^<]+)</h3>', webpage, 'title'))
thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False)
sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json)
formats = []
for source in sources:
furl = source.get('src')
if furl:
formats.append({
'url': furl,
'format_id': determine_ext(furl),
})
self._sort_formats(formats)
return {
'id': episode,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| akirk/youtube-dl | youtube_dl/extractor/fktv.py | Python | unlicense | 1,557 |
from __future__ import unicode_literals
from django.db import models
from imagekit.models.fields import ProcessedImageField
from imagekit.processors import ResizeToFill
from .utils import unique_filename
class DateTimeModel(models.Model):
class Meta:
abstract = True
modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Ability(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
class Type(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
def _build_dict(self, items):
lst = []
for i in items:
lst.append(dict(
name=i.to.name,
resource_uri='/api/v1/type/' + str(i.to.id) + '/'
))
return lst
def weakness_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='weak')
if items.exists():
return self._build_dict(items)
return []
weaknesses = property(fget=weakness_list)
def resistances_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='resist')
if items.exists():
return self._build_dict(items)
return []
resistances = property(fget=resistances_list)
def super_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='super effective')
if items.exists():
return self._build_dict(items)
return []
supers = property(fget=super_list)
def ineffective_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='ineffective')
if items.exists():
return self._build_dict(items)
return []
ineffectives = property(fget=ineffective_list)
def no_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='noeffect')
if items.exists():
return self._build_dict(items)
return []
no_effects = property(fget=no_list)
class TypeChart(DateTimeModel):
def __unicode__(self):
return ' '.join([self.frm.name, self.ttype, 'against', self.to.name])
frm = models.ForeignKey(
Type, blank=True, null=True, related_name='type_frm')
to = models.ForeignKey(
Type, blank=True, null=True, related_name='type_to')
TYPES = (
('weak', 'weak'),
('super effective', 'super effective'),
('resistant', 'resistant'),
('ineffective', 'ineffective'),
('noeffect', 'noeffect'),
('resist', 'resist'),
)
ttype = models.CharField(
max_length=15, choices=TYPES, blank=True, null=True)
class EggGroup(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
def get_pokes(self):
pokes = Pokemon.objects.filter(
egg_group=self
)
lst = []
if pokes.exists():
for p in pokes:
lst.append(dict(
name=p.name.capitalize(),
resource_uri='/api/v1/pokemon/' + str(p.pkdx_id) + '/'
))
return lst
pokemon = property(fget=get_pokes)
class Game(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
generation = models.IntegerField(max_length=4)
release_year = models.IntegerField(max_length=6)
class Description(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
game = models.ManyToManyField(Game, blank=True, null=True)
def get_game_details(self):
lst = []
for g in self.game.all():
lst.append(dict(
name=g.name,
resource_uri='/api/v1/game/' + str(g.id) + '/')
)
return lst
n_game = property(fget=get_game_details)
def get_pokemon(self):
nm = self.name.split('_')[0]
pokes = Pokemon.objects.filter(
name=nm.lower()
)
if pokes.exists():
return dict(
name=pokes[0].name,
resource_uri='/api/v1/pokemon/' + str(pokes[0].pkdx_id) + '/')
return []
pokemon = property(fget=get_pokemon)
class Move(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
etype = models.ManyToManyField(Type, null=True)
pp = models.IntegerField(max_length=5)
CATEGORY = (
('physical', 'physical'),
('special', 'special'),
('status', 'status'),
)
category = models.CharField(choices=CATEGORY, max_length=10)
power = models.IntegerField(max_length=6)
accuracy = models.IntegerField(max_length=6)
class Sprite(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
image = ProcessedImageField(
[ResizeToFill(96, 96)],
upload_to=unique_filename,
format='PNG',
options={'quality': 80})
def get_pokemon(self):
nm = self.name.split('_')[0]
pokes = Pokemon.objects.filter(
name=nm.lower()
)
if pokes.exists():
return dict(
name=pokes[0].name,
resource_uri='/api/v1/pokemon/' + str(pokes[0].pkdx_id) + '/')
return []
pokemon = property(fget=get_pokemon)
class Pokemon(DateTimeModel):
def __unicode__(self):
return ' - '.join([str(self.pkdx_id), self.name])
name = models.CharField(max_length=50)
pkdx_id = models.IntegerField(max_length=4, blank=True)
species = models.CharField(max_length=30)
height = models.CharField(max_length=10)
weight = models.CharField(max_length=10)
ev_yield = models.CharField(max_length=20)
catch_rate = models.IntegerField(max_length=4)
happiness = models.IntegerField(max_length=4)
exp = models.IntegerField(max_length=5)
GROWTHS = (
('slow', 'slow'),
('medium slow', 'medium slow'),
('medium', 'medium'),
('medium fast', 'medium fast'),
('fast', 'fast'),
)
growth_rate = models.CharField(choices=GROWTHS, max_length=15)
male_female_ratio = models.CharField(max_length=10)
hp = models.IntegerField(max_length=4)
attack = models.IntegerField(max_length=4)
defense = models.IntegerField(max_length=4)
sp_atk = models.IntegerField(max_length=4)
sp_def = models.IntegerField(max_length=4)
speed = models.IntegerField(max_length=4)
total = models.IntegerField(max_length=6)
egg_cycles = models.IntegerField(max_length=6)
abilities = models.ManyToManyField(
Ability, blank=True, null=True)
def ability_names(self):
lst = []
for a in self.abilities.all():
lst.append(dict(
resource_uri='/api/v1/ability/' + str(a.id) + '/',
name=a.name.lower())
)
return lst
ability_list = property(fget=ability_names)
def get_evolution_details(self):
evols = Evolution.objects.filter(
frm=self
)
if evols.exists():
lst = []
for e in evols:
d = dict(
to=e.to.name.capitalize(),
resource_uri='/api/v1/pokemon/' + str(e.to.pkdx_id) + '/',
method=e.method,
)
if e.level > 0:
d['level'] = e.level
if e.detail:
d['detail'] = e.detail
lst.append(d)
return lst
return []
evolutions = property(fget=get_evolution_details)
types = models.ManyToManyField(
Type, blank=True, null=True)
def type_list(self):
lst = []
for t in self.types.all():
lst.append(dict(
resource_uri='/api/v1/type/' + str(t.id) + '/',
name=t.name.lower())
)
return lst
type_list = property(fget=type_list)
egg_group = models.ManyToManyField(
EggGroup, blank=True, null=True)
def get_eggs(self):
lst = []
for e in self.egg_group.all():
lst.append(dict(
name=e.name.capitalize(),
resource_uri='/api/v1/egg/' + str(e.id) + '/'
))
return lst
eggs = property(fget=get_eggs)
descriptions = models.ManyToManyField(
Description, blank=True, null=True)
def get_sprites(self):
lst = []
for s in self.sprites.all():
lst.append(dict(
name=self.name,
resource_uri='/api/v1/sprite/' + str(s.id) + '/')
)
return lst
my_sprites = property(fget=get_sprites)
sprites = models.ManyToManyField(
Sprite, blank=True, null=True)
def get_moves(self):
moves = MovePokemon.objects.filter(
pokemon=self
)
lst = []
if moves.exists():
for m in moves:
d = dict(
name=m.move.name.capitalize(),
resource_uri='/api/v1/move/' + str(m.move.id) + '/',
learn_type=m.learn_type
)
if m.level > 0:
d['level'] = m.level
lst.append(d)
return lst
moves = property(fget=get_moves)
class Evolution(DateTimeModel):
def __unicode__(self):
return self.frm.name + ' to ' + self.to.name
frm = models.ForeignKey(
Pokemon, null=True, blank=True,
related_name='frm_evol_pokemon')
to = models.ForeignKey(
Pokemon, null=True, blank=True,
related_name='to_evol_pokemon')
EVOLV_METHODS = (
('level up', 'level_up'),
('stone', 'stone'),
('trade', 'trade'),
('other', 'other'),
)
level = models.IntegerField(max_length=3, default=0)
method = models.CharField(
choices=EVOLV_METHODS, max_length=10, default=0)
detail = models.CharField(max_length=10, null=True, blank=True)
class MovePokemon(DateTimeModel):
def __unicode__(self):
return self.pokemon.name + ' - ' + self.move.name
pokemon = models.ForeignKey(
Pokemon, related_name='move', null=True, blank=True)
move = models.ForeignKey(
Move, related_name='pokemon', null=True, blank=True)
LEARN = (
('level up', 'level up'),
('machine', 'machine'),
('egg move', 'egg move'),
('tutor', 'tutor'),
('other', 'other'),
)
learn_type = models.CharField(
choices=LEARN, max_length=15, default='level up')
level = models.IntegerField(
max_length=6, default=0, null=True, blank=True)
class Pokedex(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=60)
def _all_pokes(self):
lst = []
for p in Pokemon.objects.all():
lst.append(dict(
name=p.name,
resource_uri='api/v1/pokemon/' + str(p.pkdx_id) + '/'
))
return lst
pokemon = property(fget=_all_pokes)
| beni55/pokeapi | pokemon/models.py | Python | bsd-3-clause | 11,645 |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
import sys
import unittest
class LensDistortOpTest(unittest.TestCase):
def testDistortOpWithStandardLensModel(self):
# The lens model and parameters to use.
o = CompoundObject()
o["lensModel"] = StringData( "StandardRadialLensModel" )
o["distortion"] = DoubleData( 0.2 )
o["anamorphicSqueeze"] = DoubleData( 1. )
o["curvatureX"] = DoubleData( 0.2 )
o["curvatureY"] = DoubleData( 0.5 )
o["quarticDistortion"] = DoubleData( .1 )
# The input image to read.
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr")
img = r.read()
# Create the Op and set it's parameters.
op = LensDistortOp()
op["input"] = img
op["mode"] = LensModel.Undistort
op['lensModel'].setValue(o)
# Run the Op.
out = op()
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindowDistorted.100x100.exr")
img2 = r.read()
self.assertEqual( img.displayWindow, img2.displayWindow )
| code-google-com/cortex-vfx | test/IECore/LensDistortOpTest.py | Python | bsd-3-clause | 2,750 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from nova.db.sqlalchemy import api as db_session
from nova import exception
from nova.openstack.common.gettextutils import _
INIT_VERSION = 215
_REPOSITORY = None
get_engine = db_session.get_engine
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.NovaException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.NovaException(
_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
| eharney/nova | nova/db/sqlalchemy/migration.py | Python | apache-2.0 | 2,918 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.initialize_all_variables())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| ml6973/Course | tf-hands-on/slim/python/slim/nets/alexnet_test.py | Python | apache-2.0 | 5,839 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests frame.inspect() """
import unittest
import sys
import os
from sparktkregtests.lib import sparktk_test
class FrameInspectTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(FrameInspectTest, self).setUp()
dataset = self.get_file("movie_user_5ratings.csv")
schema = [("src", int),
("vertex_type", str),
("dest", int),
("weight", int),
("edge_type", str)]
self.frame = self.context.frame.import_csv(
dataset, schema=schema)
def test_frame_inspect_0_offset(self):
"""Test offset of 0 does nothing"""
inspect = self.frame.inspect(n=5, offset=0)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_large(self):
"""Test offset of a large value"""
inspect = self.frame.inspect(n=5, offset=1000)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_overflow(self):
"""Test inspecting more lines than in frrame from offset truncates"""
inspect = self.frame.inspect(n=10, offset=self.frame.count()-3)
self.assertEqual(len(inspect.rows), 3)
def test_frame_inspect_0_count(self):
"""Test inspecting 0 rows returns nothing"""
inspect = self.frame.inspect(n=0)
self.assertEqual(len(inspect.rows), 0)
def test_frame_inspect_n(self):
"""Test requesting n rows returns n rows"""
inspect = self.frame.inspect(n=1)
self.assertEqual(len(inspect.rows), 1)
def test_frame_inspect_default(self):
"""Test the default number of rows is 10"""
inspect = self.frame.inspect()
self.assertEqual(len(inspect.rows), 10)
def test_frame_inspect_all(self):
"""Test inspecting entire frame returns entire frame"""
inspect = self.frame.inspect(n=self.frame.count())
self.assertEqual(len(inspect.rows), self.frame.count())
def test_frame_inspect_count_overflow(self):
"""Test inspecting more than entire frame returns the entire frame"""
row_count = self.frame.count()
inspect = self.frame.inspect(n=row_count*10)
self.assertEqual(len(inspect.rows), row_count)
#compare 'inspect' with the actual entire frame RowInspection object
self.assertEqual(str(inspect),
str(self.frame.inspect(n=row_count)))
def test_negative_offset(self):
"""Test a negative offset errors"""
with self.assertRaisesRegexp(ValueError, "Expected non-negative integer"):
self.frame.inspect(n=5, offset=-1)
def test_negative_count(self):
"""Test taking a negative number of rows errors"""
with self.assertRaises(ValueError):
self.frame.inspect(n=-1)
def test_float_count(self):
"""Test float for count errors"""
with self.assertRaisesRegexp(TypeError, "Expected type <type 'int'>"):
self.frame.inspect(n=1.5)
def test_float_offset(self):
"""Test float for offset errors"""
with self.assertRaises(TypeError):
self.frame.inspect(n=1, offset=1.5)
def test_take_no_columns(self):
"""Test taking an empty list of columns gets an empty list"""
self.assertEqual([], self.frame.take(n=10, columns=[]))
def test_take_invalid_column(self):
"""Test taking a column that doesn't exist errors"""
with self.assertRaisesRegexp(
ValueError, "Invalid column name .* provided"):
self.frame.take(n=10, columns=["no_such_col", "weight"])
if __name__ == "__main__":
unittest.main()
| karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_inspect_test.py | Python | apache-2.0 | 4,394 |
from __future__ import absolute_import
from celery import current_app
from celery.backends.base import BaseDictBackend
from celery.utils.timeutils import maybe_timedelta
from ..models import TaskMeta, TaskSetMeta
class DatabaseBackend(BaseDictBackend):
"""The database backend.
Using Django models to store task state.
"""
TaskModel = TaskMeta
TaskSetModel = TaskSetMeta
expires = current_app.conf.CELERY_TASK_RESULT_EXPIRES
create_django_tables = True
subpolling_interval = 0.5
def _store_result(self, task_id, result, status, traceback=None):
"""Store return value and status of an executed task."""
self.TaskModel._default_manager.store_result(task_id, result, status,
traceback=traceback)
return result
def _save_taskset(self, taskset_id, result):
"""Store the result of an executed taskset."""
self.TaskSetModel._default_manager.store_result(taskset_id, result)
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
return self.TaskModel._default_manager.get_task(task_id).to_dict()
def _restore_taskset(self, taskset_id):
"""Get taskset metadata for a taskset by id."""
meta = self.TaskSetModel._default_manager.restore_taskset(taskset_id)
if meta:
return meta.to_dict()
def _delete_taskset(self, taskset_id):
self.TaskSetModel._default_manager.delete_taskset(taskset_id)
def _forget(self, task_id):
try:
self.TaskModel._default_manager.get(task_id=task_id).delete()
except self.TaskModel.DoesNotExist:
pass
def cleanup(self):
"""Delete expired metadata."""
expires = maybe_timedelta(self.expires)
for model in self.TaskModel, self.TaskSetModel:
model._default_manager.delete_expired(expires)
| ChristineLaMuse/mozillians | vendor-local/lib/python/djcelery/backends/database.py | Python | bsd-3-clause | 1,952 |
from .base import BasePageTests
from django.contrib.sites.models import Site
from django.contrib.redirects.models import Redirect
class PageViewTests(BasePageTests):
def test_page_view(self):
r = self.client.get('/one/')
self.assertEqual(r.context['page'], self.p1)
# drafts are available only to staff users
self.p1.is_published = False
self.p1.save()
r = self.client.get('/one/')
self.assertEqual(r.status_code, 404)
self.client.login(username='staff_user', password='staff_user')
r = self.client.get('/one/')
self.assertEqual(r.status_code, 200)
def test_with_query_string(self):
r = self.client.get('/one/?foo')
self.assertEqual(r.context['page'], self.p1)
def test_redirect(self):
"""
Check that redirects still have priority over pages.
"""
redirect = Redirect.objects.create(
old_path='/%s/' % self.p1.path,
new_path='http://redirected.example.com',
site=Site.objects.get_current()
)
response = self.client.get(redirect.old_path)
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], redirect.new_path)
redirect.delete()
| lsk112233/Clone-test-repo | pages/tests/test_views.py | Python | apache-2.0 | 1,280 |
from __future__ import unicode_literals
import uuid
from django.forms import UUIDField, ValidationError
from django.test import SimpleTestCase
class UUIDFieldTest(SimpleTestCase):
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
| filias/django | tests/forms_tests/field_tests/test_uuidfield.py | Python | bsd-3-clause | 971 |
self.description = "Backup file relocation"
lp1 = pmpkg("bash")
lp1.files = ["etc/profile*"]
lp1.backup = ["etc/profile"]
self.addpkg2db("local", lp1)
p1 = pmpkg("bash", "1.0-2")
self.addpkg(p1)
lp2 = pmpkg("filesystem")
self.addpkg2db("local", lp2)
p2 = pmpkg("filesystem", "1.0-2")
p2.files = ["etc/profile**"]
p2.backup = ["etc/profile"]
p2.depends = [ "bash" ]
self.addpkg(p2)
self.args = "-U %s" % " ".join([p.filename() for p in (p1, p2)])
self.filesystem = ["etc/profile"]
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=bash|1.0-2")
self.addrule("PKG_VERSION=filesystem|1.0-2")
self.addrule("!FILE_PACSAVE=etc/profile")
self.addrule("FILE_PACNEW=etc/profile")
self.addrule("FILE_EXIST=etc/profile")
| kylon/pacman-fakeroot | test/pacman/tests/upgrade042.py | Python | gpl-2.0 | 725 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: orderer/configuration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='orderer/configuration.proto',
package='orderer',
syntax='proto3',
serialized_pb=_b('\n\x1borderer/configuration.proto\x12\x07orderer\"\x1d\n\rConsensusType\x12\x0c\n\x04type\x18\x01 \x01(\t\"Y\n\tBatchSize\x12\x17\n\x0fmaxMessageCount\x18\x01 \x01(\r\x12\x18\n\x10\x61\x62soluteMaxBytes\x18\x02 \x01(\r\x12\x19\n\x11preferredMaxBytes\x18\x03 \x01(\r\"\x1f\n\x0c\x42\x61tchTimeout\x12\x0f\n\x07timeout\x18\x01 \x01(\t\"\x1f\n\x0cKafkaBrokers\x12\x0f\n\x07\x62rokers\x18\x01 \x03(\t\"(\n\x13\x43hannelRestrictions\x12\x11\n\tmax_count\x18\x01 \x01(\x04\x42U\n%org.hyperledger.fabric.protos.ordererZ,github.com/hyperledger/fabric/protos/ordererb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CONSENSUSTYPE = _descriptor.Descriptor(
name='ConsensusType',
full_name='orderer.ConsensusType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='orderer.ConsensusType.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=69,
)
_BATCHSIZE = _descriptor.Descriptor(
name='BatchSize',
full_name='orderer.BatchSize',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='maxMessageCount', full_name='orderer.BatchSize.maxMessageCount', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='absoluteMaxBytes', full_name='orderer.BatchSize.absoluteMaxBytes', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preferredMaxBytes', full_name='orderer.BatchSize.preferredMaxBytes', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=160,
)
_BATCHTIMEOUT = _descriptor.Descriptor(
name='BatchTimeout',
full_name='orderer.BatchTimeout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timeout', full_name='orderer.BatchTimeout.timeout', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=193,
)
_KAFKABROKERS = _descriptor.Descriptor(
name='KafkaBrokers',
full_name='orderer.KafkaBrokers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='brokers', full_name='orderer.KafkaBrokers.brokers', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=226,
)
_CHANNELRESTRICTIONS = _descriptor.Descriptor(
name='ChannelRestrictions',
full_name='orderer.ChannelRestrictions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_count', full_name='orderer.ChannelRestrictions.max_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=268,
)
DESCRIPTOR.message_types_by_name['ConsensusType'] = _CONSENSUSTYPE
DESCRIPTOR.message_types_by_name['BatchSize'] = _BATCHSIZE
DESCRIPTOR.message_types_by_name['BatchTimeout'] = _BATCHTIMEOUT
DESCRIPTOR.message_types_by_name['KafkaBrokers'] = _KAFKABROKERS
DESCRIPTOR.message_types_by_name['ChannelRestrictions'] = _CHANNELRESTRICTIONS
ConsensusType = _reflection.GeneratedProtocolMessageType('ConsensusType', (_message.Message,), dict(
DESCRIPTOR = _CONSENSUSTYPE,
__module__ = 'orderer.configuration_pb2'
# @@protoc_insertion_point(class_scope:orderer.ConsensusType)
))
_sym_db.RegisterMessage(ConsensusType)
BatchSize = _reflection.GeneratedProtocolMessageType('BatchSize', (_message.Message,), dict(
DESCRIPTOR = _BATCHSIZE,
__module__ = 'orderer.configuration_pb2'
# @@protoc_insertion_point(class_scope:orderer.BatchSize)
))
_sym_db.RegisterMessage(BatchSize)
BatchTimeout = _reflection.GeneratedProtocolMessageType('BatchTimeout', (_message.Message,), dict(
DESCRIPTOR = _BATCHTIMEOUT,
__module__ = 'orderer.configuration_pb2'
# @@protoc_insertion_point(class_scope:orderer.BatchTimeout)
))
_sym_db.RegisterMessage(BatchTimeout)
KafkaBrokers = _reflection.GeneratedProtocolMessageType('KafkaBrokers', (_message.Message,), dict(
DESCRIPTOR = _KAFKABROKERS,
__module__ = 'orderer.configuration_pb2'
# @@protoc_insertion_point(class_scope:orderer.KafkaBrokers)
))
_sym_db.RegisterMessage(KafkaBrokers)
ChannelRestrictions = _reflection.GeneratedProtocolMessageType('ChannelRestrictions', (_message.Message,), dict(
DESCRIPTOR = _CHANNELRESTRICTIONS,
__module__ = 'orderer.configuration_pb2'
# @@protoc_insertion_point(class_scope:orderer.ChannelRestrictions)
))
_sym_db.RegisterMessage(ChannelRestrictions)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n%org.hyperledger.fabric.protos.ordererZ,github.com/hyperledger/fabric/protos/orderer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| cophey/fabric | bddtests/orderer/configuration_pb2.py | Python | apache-2.0 | 7,996 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database migrations for resource-providers."""
from migrate import UniqueConstraint
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Unicode
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'mysql':
nameargs = {'collation': 'utf8_bin'}
else:
nameargs = {}
resource_providers = Table(
'resource_providers', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('name', Unicode(200, **nameargs), nullable=True),
Column('generation', Integer, default=0),
Column('can_host', Integer, default=0),
UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
UniqueConstraint('name', name='uniq_resource_providers0name'),
Index('resource_providers_name_idx', 'name'),
Index('resource_providers_uuid_idx', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
inventories = Table(
'inventories', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('resource_provider_id', Integer, nullable=False),
Column('resource_class_id', Integer, nullable=False),
Column('total', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('min_unit', Integer, nullable=False),
Column('max_unit', Integer, nullable=False),
Column('step_size', Integer, nullable=False),
Column('allocation_ratio', Float, nullable=False),
Index('inventories_resource_provider_id_idx',
'resource_provider_id'),
Index('inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
Index('inventories_resource_class_id_idx',
'resource_class_id'),
UniqueConstraint('resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
allocations = Table(
'allocations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('resource_provider_id', Integer, nullable=False),
Column('consumer_id', String(36), nullable=False),
Column('resource_class_id', Integer, nullable=False),
Column('used', Integer, nullable=False),
Index('allocations_resource_provider_class_used_idx',
'resource_provider_id', 'resource_class_id',
'used'),
Index('allocations_resource_class_id_idx',
'resource_class_id'),
Index('allocations_consumer_id_idx', 'consumer_id'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
resource_provider_aggregates = Table(
'resource_provider_aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('resource_provider_id', Integer, primary_key=True,
nullable=False),
Column('aggregate_id', Integer, primary_key=True, nullable=False),
Index('resource_provider_aggregates_aggregate_id_idx',
'aggregate_id'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
for table in [resource_providers, inventories, allocations,
resource_provider_aggregates]:
table.create(checkfirst=True)
| rahulunair/nova | nova/db/sqlalchemy/api_migrations/migrate_repo/versions/016_resource_providers.py | Python | apache-2.0 | 4,495 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Law.merged_into'
db.add_column('laws_law', 'merged_into', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='duplicates', null=True, to=orm['laws.Law']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Law.merged_into'
db.delete_column('laws_law', 'merged_into_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
| livni/old-OK | src/knesset/laws/migrations/0013_add_law_merged_into.py | Python | bsd-3-clause | 20,248 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
from powerline.lint.selfcheck import havemarks
class WithPath(object):
def __init__(self, import_paths):
self.import_paths = import_paths
def __enter__(self):
self.oldpath = sys.path
sys.path = self.import_paths + sys.path
def __exit__(self, *args):
sys.path = self.oldpath
def import_function(function_type, name, data, context, echoerr, module):
havemarks(name, module)
with WithPath(data['import_paths']):
try:
func = getattr(__import__(str(module), fromlist=[str(name)]), str(name))
except ImportError:
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=name.mark,
problem='failed to import module {0}'.format(module),
problem_mark=module.mark)
return None
except AttributeError:
echoerr(context='Error while loading {0} function (key {key})'.format(function_type, key=context.key),
problem='failed to load function {0} from module {1}'.format(name, module),
problem_mark=name.mark)
return None
if not callable(func):
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=name.mark,
problem='imported “function” {0} from module {1} is not callable'.format(name, module),
problem_mark=module.mark)
return None
return func
def import_segment(*args, **kwargs):
return import_function('segment', *args, **kwargs)
| xfumihiro/powerline | powerline/lint/imp.py | Python | mit | 1,573 |
from pylab import figure,pcolor,scatter,contour,colorbar,show,subplot,plot,connect
from numpy import array,meshgrid,reshape,linspace,min,max
from numpy import concatenate,transpose,ravel
from modshogun import *
from modshogun import *
from modshogun import *
import util
util.set_title('KernelRidgeRegression')
width=20
# positive examples
pos=util.get_realdata(True)
plot(pos[0,:], pos[1,:], "r.")
# negative examples
neg=util.get_realdata(False)
plot(neg[0,:], neg[1,:], "b.")
# train svm
labels = util.get_labels(type='regression')
train = util.get_realfeatures(pos, neg)
gk=GaussianKernel(train, train, width)
krr = KernelRidgeRegression()
krr.set_labels(labels)
krr.set_kernel(gk)
krr.set_tau(1e-3)
krr.train()
# compute output plot iso-lines
x, y, z=util.compute_output_plot_isolines(krr, gk, train, regression=True)
pcolor(x, y, z, shading='interp')
contour(x, y, z, linewidths=1, colors='black', hold=True)
connect('key_press_event', util.quit)
show()
| AzamYahya/shogun | examples/undocumented/python_modular/graphical/kernel_ridge_regression.py | Python | gpl-3.0 | 969 |
# Copyright (C) 2007-2011 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import unittest2, inPy3k
try:
unicode
except NameError:
# Python 3
unicode = str
long = int
import inspect
from mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest2.TestCase):
def testDeletingMagicMethods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def testMagicMethodWrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
# When you pull the function back of the *instance*
# the first argument (self) is removed
def instance_f(name):
pass
self.assertEqual(inspect.getargspec(mock.__getitem__), inspect.getargspec(instance_f))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def testMagicMethodsIsolatedBetweenMocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def testRepr(self):
mock = Mock()
self.assertEqual(repr(mock), object.__repr__(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def testStr(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
@unittest2.skipIf(inPy3k, "no unicode in Python 3")
def testUnicode(self):
mock = Mock()
self.assertEqual(unicode(mock), unicode(str(mock)))
mock.__unicode__ = lambda s: unicode('foo')
self.assertEqual(unicode(mock), unicode('foo'))
def testDictMethods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def testNumeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def testHash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def testNonZero(self):
m = Mock()
self.assertTrue(bool(m))
nonzero = lambda s: False
if not inPy3k:
m.__nonzero__ = nonzero
else:
m.__bool__ = nonzero
self.assertFalse(bool(m))
def testComparison(self):
if not inPy3k:
# incomparable in Python 3
self. assertEqual(Mock() < 3, object() < 3)
self. assertEqual(Mock() > 3, object() > 3)
self. assertEqual(Mock() <= 3, object() <= 3)
self. assertEqual(Mock() >= 3, object() >= 3)
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertEqual(mock != 3, False)
def testLenContainsIter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def testMagicMock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
if inPy3k:
mock.__bool__.return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
else:
mock.__nonzero__.return_value = False
self.assertFalse(hasattr(mock, '__bool__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def testMagicMockDefaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertEqual(long(mock), long(1))
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertEqual(unicode(mock), object.__str__(mock))
self.assertIsInstance(unicode(mock), unicode)
self.assertTrue(bool(mock))
if not inPy3k:
self.assertEqual(oct(mock), '1')
else:
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
@unittest2.skipIf(inPy3k, "no __cmp__ in Python 3")
def testNonDefaultMagicMethods(self):
mock = MagicMock()
self.assertRaises(AttributeError, lambda: mock.__cmp__)
mock = Mock()
mock.__cmp__ = lambda s, o: 0
self.assertEqual(mock, object())
def testMagicMethodsAndSpec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def testMagicMethodsAndSpecSet(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def testSettingUnsupportedMagicMethod(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegexp(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def testAttributesAndReturnValue(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
if __name__ == '__main__':
unittest2.main()
| mzdaniel/oh-mainline | vendor/packages/mock/tests/testmagicmethods.py | Python | agpl-3.0 | 10,336 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
import namespace_test_helper # declares another Foo in namespace mynamespace
class Foo(luigi.Task):
pass
class FooSubclass(Foo):
pass
class TestNamespacing(unittest.TestCase):
def test_vanilla(self):
self.assertEqual(Foo.task_namespace, None)
self.assertEqual(Foo.task_family, "Foo")
self.assertEqual(str(Foo()), "Foo()")
self.assertEqual(FooSubclass.task_namespace, None)
self.assertEqual(FooSubclass.task_family, "FooSubclass")
self.assertEqual(str(FooSubclass()), "FooSubclass()")
def test_namespace(self):
self.assertEqual(namespace_test_helper.Foo.task_namespace, "mynamespace")
self.assertEqual(namespace_test_helper.Foo.task_family, "mynamespace.Foo")
self.assertEqual(str(namespace_test_helper.Foo(1)), "mynamespace.Foo(p=1)")
self.assertEqual(namespace_test_helper.Bar.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Bar.task_family, "othernamespace.Bar")
self.assertEqual(str(namespace_test_helper.Bar(1)), "othernamespace.Bar(p=1)")
| ivannotes/luigi | test/namespace_test.py | Python | apache-2.0 | 1,739 |
from .provider_base import ExternalPaymentProvider
from tapiriik.database import db
from tapiriik.settings import MOTIVATO_PREMIUM_USERS_LIST_URL
import requests
class MotivatoExternalPaymentProvider(ExternalPaymentProvider):
ID = "motivato"
def RefreshPaymentStateForExternalIDs(self, external_ids):
from tapiriik.services import Service, ServiceRecord
external_ids = [str(x) for x in external_ids]
connections = [ServiceRecord(x) for x in db.connections.find({"Service": "motivato", "ExternalID": {"$in": external_ids}})]
users = db.users.find({"ConnectedServices.ID": {"$in": [x._id for x in connections]}})
for user in users:
my_connection = [x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]]][0]
# Defer to the actual service module, where all the session stuff is set up
state = Service.FromID("motivato")._getPaymentState(my_connection)
self.ApplyPaymentState(user, state, my_connection.ExternalID, duration=None)
def RefreshPaymentState(self):
from tapiriik.services import ServiceRecord
from tapiriik.payments import Payments
from tapiriik.auth import User
external_ids = requests.get(MOTIVATO_PREMIUM_USERS_LIST_URL).json()
connections = [ServiceRecord(x) for x in db.connections.find({"Service": "motivato", "ExternalID": {"$in": external_ids}})]
users = list(db.users.find({"ConnectedServices.ID": {"$in": [x._id for x in connections]}}))
payments = []
# Pull relevant payment objects and associate with users
for user in users:
my_connection = [x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]]][0]
pmt = Payments.EnsureExternalPayment(self.ID, my_connection.ExternalID, duration=None)
payments.append(pmt)
User.AssociateExternalPayment(user, pmt, skip_deassoc=True)
# Bulk-remove these payments from users who don't own them (more or less - it'll leave anyone who switched remote accounts)
db.users.update({"_id": {"$nin": [x["_id"] for x in users]}}, {"$pull": {"ExternalPayments": {"_id": {"$in": [x["_id"] for x in payments]}}}}, multi=True)
# We don't bother unsetting users who are no longer on the list - they'll be refreshed at their next sync
ExternalPaymentProvider.Register(MotivatoExternalPaymentProvider()) | cmgrote/tapiriik | tapiriik/payments/external/motivato.py | Python | apache-2.0 | 2,262 |
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Some utility functions to deal with mapping Amazon DynamoDB types to
Python types and vice-versa.
"""
import base64
from decimal import (Decimal, DecimalException, Context,
Clamped, Overflow, Inexact, Underflow, Rounded)
from exceptions import DynamoDBNumberError
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, rounding=None, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
# python2.6 cannot convert floats directly to
# Decimals. This is taken from:
# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
def float_to_decimal(f):
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = DYNAMODB_CONTEXT
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def is_num(n):
types = (int, long, float, bool, Decimal)
return isinstance(n, types) or n in types
def is_str(n):
return isinstance(n, basestring) or (isinstance(n, type) and
issubclass(n, basestring))
def is_binary(n):
return isinstance(n, Binary)
def serialize_num(val):
"""Cast a number to a string and perform
validation to ensure no loss of precision.
"""
if isinstance(val, bool):
return str(int(val))
return str(val)
def convert_num(s):
if '.' in s:
n = float(s)
else:
n = int(s)
return n
def convert_binary(n):
return Binary(base64.b64decode(n))
def get_dynamodb_type(val):
"""
Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type. If the value passed in is
not a supported type, raise a TypeError.
"""
dynamodb_type = None
if is_num(val):
dynamodb_type = 'N'
elif is_str(val):
dynamodb_type = 'S'
elif isinstance(val, (set, frozenset)):
if False not in map(is_num, val):
dynamodb_type = 'NS'
elif False not in map(is_str, val):
dynamodb_type = 'SS'
elif False not in map(is_binary, val):
dynamodb_type = 'BS'
elif isinstance(val, Binary):
dynamodb_type = 'B'
if dynamodb_type is None:
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
raise TypeError(msg)
return dynamodb_type
def dynamize_value(val):
"""
Take a scalar Python value and return a dict consisting
of the Amazon DynamoDB type specification and the value that
needs to be sent to Amazon DynamoDB. If the type of the value
is not supported, raise a TypeError
"""
dynamodb_type = get_dynamodb_type(val)
if dynamodb_type == 'N':
val = {dynamodb_type: serialize_num(val)}
elif dynamodb_type == 'S':
val = {dynamodb_type: val}
elif dynamodb_type == 'NS':
val = {dynamodb_type: map(serialize_num, val)}
elif dynamodb_type == 'SS':
val = {dynamodb_type: [n for n in val]}
elif dynamodb_type == 'B':
val = {dynamodb_type: val.encode()}
elif dynamodb_type == 'BS':
val = {dynamodb_type: [n.encode() for n in val]}
return val
class Binary(object):
def __init__(self, value):
if not isinstance(value, basestring):
raise TypeError('Value must be a string of binary data!')
self.value = value
def encode(self):
return base64.b64encode(self.value)
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%s)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def item_object_hook(dct):
"""
A custom object hook for use when decoding JSON item bodys.
This hook will transform Amazon DynamoDB JSON responses to something
that maps directly to native Python types.
"""
if len(dct.keys()) > 1:
return dct
if 'S' in dct:
return dct['S']
if 'N' in dct:
return convert_num(dct['N'])
if 'SS' in dct:
return set(dct['SS'])
if 'NS' in dct:
return set(map(convert_num, dct['NS']))
if 'B' in dct:
return convert_binary(dct['B'])
if 'BS' in dct:
return set(map(convert_binary, dct['BS']))
return dct
class Dynamizer(object):
"""Control serialization/deserialization of types.
This class controls the encoding of python types to the
format that is expected by the DynamoDB API, as well as
taking DynamoDB types and constructing the appropriate
python types.
If you want to customize this process, you can subclass
this class and override the encoding/decoding of
specific types. For example::
'foo' (Python type)
|
v
encode('foo')
|
v
_encode_s('foo')
|
v
{'S': 'foo'} (Encoding sent to/received from DynamoDB)
|
V
decode({'S': 'foo'})
|
v
_decode_s({'S': 'foo'})
|
v
'foo' (Python type)
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr)
def encode(self, attr):
"""
Encodes a python type to the format expected
by DynamoDB.
"""
dynamodb_type = self._get_dynamodb_type(attr)
try:
encoder = getattr(self, '_encode_%s' % dynamodb_type.lower())
except AttributeError:
raise ValueError("Unable to encode dynamodb type: %s" %
dynamodb_type)
return {dynamodb_type: encoder(attr)}
def _encode_n(self, attr):
try:
if isinstance(attr, float) and not hasattr(Decimal, 'from_float'):
# python2.6 does not support creating Decimals directly
# from floats so we have to do this ourself.
n = str(float_to_decimal(attr))
else:
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
if filter(lambda x: x in n, ('Infinity', 'NaN')):
raise TypeError('Infinity and NaN not supported')
return n
except (TypeError, DecimalException), e:
msg = '{0} numeric for `{1}`\n{2}'.format(
e.__class__.__name__, attr, str(e) or '')
raise DynamoDBNumberError(msg)
def _encode_s(self, attr):
if isinstance(attr, unicode):
attr = attr.encode('utf-8')
elif not isinstance(attr, str):
attr = str(attr)
return attr
def _encode_ns(self, attr):
return map(self._encode_n, attr)
def _encode_ss(self, attr):
return [self._encode_s(n) for n in attr]
def _encode_b(self, attr):
return attr.encode()
def _encode_bs(self, attr):
return [self._encode_b(n) for n in attr]
def decode(self, attr):
"""
Takes the format returned by DynamoDB and constructs
the appropriate python type.
"""
if len(attr) > 1 or not attr:
return attr
dynamodb_type = attr.keys()[0]
if dynamodb_type.lower() == dynamodb_type:
# It's not an actual type, just a single character attr that
# overlaps with the DDB types. Return it.
return attr
try:
decoder = getattr(self, '_decode_%s' % dynamodb_type.lower())
except AttributeError:
return attr
return decoder(attr[dynamodb_type])
def _decode_n(self, attr):
return DYNAMODB_CONTEXT.create_decimal(attr)
def _decode_s(self, attr):
return attr
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
def _decode_ss(self, attr):
return set(map(self._decode_s, attr))
def _decode_b(self, attr):
return convert_binary(attr)
def _decode_bs(self, attr):
return set(map(self._decode_b, attr))
class LossyFloatDynamizer(Dynamizer):
"""Use float/int instead of Decimal for numeric types.
This class is provided for backwards compatibility. Instead of
using Decimals for the 'N', 'NS' types it uses ints/floats.
This class is deprecated and its usage is not encouraged,
as doing so may result in loss of precision. Use the
`Dynamizer` class instead.
"""
def _encode_n(self, attr):
return serialize_num(attr)
def _encode_ns(self, attr):
return [str(i) for i in attr]
def _decode_n(self, attr):
return convert_num(attr)
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
| harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/dynamodb/types.py | Python | gpl-3.0 | 10,121 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
class DB(object):
"""DB is a generic class for SQL Database"""
def __init__(self, table_prefix=''):
self.table_prefix = table_prefix
def stringify(self, val):
"""Get a unicode from a value"""
# If raw string, go in unicode
if isinstance(val, str):
val = val.decode('utf8', 'ignore').replace("'", "''")
elif isinstance(val, unicode):
val = val.replace("'", "''")
else: # other type, we can str
val = unicode(str(val))
val = val.replace("'", "''")
return val
def create_insert_query(self, table, data):
"""Create a INSERT query in table with all data of data (a dict)"""
query = u"INSERT INTO %s " % (self.table_prefix + table)
props_str = u' ('
values_str = u' ('
i = 0 # f or the ',' problem... look like C here...
for prop in data:
i += 1
val = data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
# Get a string of the value
val = self.stringify(val)
if i == 1:
props_str = props_str + u"%s " % prop
values_str = values_str + u"'%s' " % val
else:
props_str = props_str + u", %s " % prop
values_str = values_str + u", '%s' " % val
# Ok we've got data, let's finish the query
props_str = props_str + u' )'
values_str = values_str + u' )'
query = query + props_str + u' VALUES' + values_str
return query
def create_update_query(self, table, data, where_data):
"""Create a update query of table with data, and use where data for
the WHERE clause
"""
query = u"UPDATE %s set " % (self.table_prefix + table)
# First data manage
query_follow = ''
i = 0 # for the , problem...
for prop in data:
# Do not need to update a property that is in where
# it is even dangerous, will raise a warning
if prop not in where_data:
i += 1
val = data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
# Get a string of the value
val = self.stringify(val)
if i == 1:
query_follow += u"%s='%s' " % (prop, val)
else:
query_follow += u", %s='%s' " % (prop, val)
# Ok for data, now WHERE, same things
where_clause = u" WHERE "
i = 0 # For the 'and' problem
for prop in where_data:
i += 1
val = where_data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
# Get a string of the value
val = self.stringify(val)
if i == 1:
where_clause += u"%s='%s' " % (prop, val)
else:
where_clause += u"and %s='%s' " % (prop, val)
query = query + query_follow + where_clause
return query
def fetchone(self):
"""Just get an entry"""
return self.db_cursor.fetchone()
def fetchall(self):
"""Get all entry"""
return self.db_cursor.fetchall()
| naparuba/shinken | shinken/db.py | Python | agpl-3.0 | 4,671 |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
#######################################################
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
self._try_inline_read()
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum read buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
for future in futures:
if (isinstance(self.error, (socket.error, IOError)) and
errno_from_exception(self.error) in _ERRNO_CONNRESET):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert self._read_future is None
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] in _ERRNO_CONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if e.args[0] not in _ERRNO_CONNRESET:
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either a dictionary
of options or an `ssl.SSLContext`. If a ``server_hostname``
is given, it will be used for certificate verification
(as configured in the ``ssl_options``).
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = {}
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options, server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno not in (errno.EINVAL, errno.ECONNRESET):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF, so make that error
# quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (err.args[0] in _ERRNO_CONNRESET or
err.args[0] == errno.EBADF):
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
# Note: Since we don't pass our callback argument along to
# super.connect(), this will always return a Future.
# This is harmless, but a bit less efficient than it could be.
return super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| 0xkag/tornado | tornado/iostream.py | Python | apache-2.0 | 59,061 |
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
import os
os.environ['EDXAPP_TEST_MONGO_HOST'] = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'edx.devstack.mongo')
# noinspection PyUnresolvedReferences
from .acceptance import *
LETTUCE_HOST = os.environ['BOK_CHOY_HOSTNAME']
SITE_NAME = '{}:{}'.format(LETTUCE_HOST, LETTUCE_SERVER_PORT)
update_module_store_settings(
MODULESTORE,
doc_store_settings={
'db': 'acceptance_xmodule',
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'collection': 'acceptance_modulestore_%s' % seed(),
},
module_store_options={
'fs_root': TEST_ROOT / "data",
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'acceptance_xcontent_%s' % seed(),
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'test',
'collection': 'events',
'host': [
'edx.devstack.mongo'
],
'port': 27017
}
}
})
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update({
'mongo': {
'ENGINE': 'eventtracking.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'track',
'host': [
'edx.devstack.mongo'
],
'port': 27017
}
}
})
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'grid')
SELENIUM_HOST = 'edx.devstack.{}'.format(LETTUCE_BROWSER)
SELENIUM_PORT = os.environ.get('SELENIUM_PORT', '4444')
SELENIUM_GRID = {
'URL': 'http://{}:{}/wd/hub'.format(SELENIUM_HOST, SELENIUM_PORT),
'BROWSER': LETTUCE_BROWSER,
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE['API'] = "http://{}:{}/get_youtube_api/".format(LETTUCE_HOST, YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://{}:{}/test_youtube/".format(LETTUCE_HOST, YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "{}:{}/test_transcripts_youtube/".format(LETTUCE_HOST, YOUTUBE_PORT)
| angelapper/edx-platform | lms/envs/acceptance_docker.py | Python | agpl-3.0 | 2,522 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
You can run this example like this:
.. code:: console
$ rm -rf '/tmp/bar'
$ luigi --module examples.foo examples.Foo --workers 2 --local-scheduler
"""
from __future__ import print_function
import time
import luigi
class Foo(luigi.WrapperTask):
task_namespace = 'examples'
def run(self):
print("Running Foo")
def requires(self):
for i in range(10):
yield Bar(i)
class Bar(luigi.Task):
task_namespace = 'examples'
num = luigi.IntParameter()
def run(self):
time.sleep(1)
self.output().open('w').close()
def output(self):
"""
Returns the target output for this task.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
time.sleep(1)
return luigi.LocalTarget('/tmp/bar/%d' % self.num)
| samuell/luigi | examples/foo.py | Python | apache-2.0 | 1,501 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on Cisco IOS network devices
description:
- This module provides declarative management of static
IP routes on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
options:
prefix:
description:
- Network prefix of the static route.
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions.
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0 255.255.255.0 10.0.0.1
"""
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.ios import load_config, run_commands
from ansible.module_utils.ios import ios_argument_spec, check_args
from ipaddress import ip_network
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s %s' % (prefix, mask, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %s %s' % (prefix, mask, next_hop,
admin_distance))
return commands
def map_config_to_obj(module):
obj = []
rc, out, err = exec_command(module, 'show ip static route')
match = re.search(r'.*Static local RIB for default\s*(.*)$', out, re.DOTALL)
if match and match.group(1):
for r in match.group(1).splitlines():
splitted_line = r.split()
code = splitted_line[0]
if code != 'M':
continue
cidr = ip_network(to_text(splitted_line[1]))
prefix = str(cidr.network_address)
mask = str(cidr.netmask)
next_hop = splitted_line[4]
admin_distance = splitted_line[2][1]
obj.append({'prefix': prefix, 'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
return obj
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'].strip(),
'mask': module.params['mask'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': str(module.params['admin_distance']),
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'mask', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_static_route.py | Python | bsd-3-clause | 7,090 |
# Copyright (c) 2005 Divmod, Inc.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.python import lockfile
class LockingTestCase(unittest.TestCase):
def testBasics(self):
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.failUnless(lock.lock())
self.failUnless(lock.clean)
lock.unlock()
self.failUnless(lock.lock())
self.failUnless(lock.clean)
lock.unlock()
def testProtection(self):
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.failUnless(lock.lock())
self.failUnless(lock.clean)
self.failIf(lock.lock())
lock.unlock()
def testBigLoop(self):
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.failUnless(lock.lock())
for i in xrange(500):
self.failIf(lock.lock())
lock.unlock()
def testIsLocked(self):
lockf = self.mktemp()
self.failIf(lockfile.isLocked(lockf))
lock = lockfile.FilesystemLock(lockf)
self.failUnless(lock.lock())
self.failUnless(lockfile.isLocked(lockf))
lock.unlock()
self.failIf(lockfile.isLocked(lockf))
# A multiprocess test would be good here, for the sake of
# completeness. However, it is fairly safe to rely on the
# filesystem to provide the semantics we require.
| UstadMobile/eXePUB | twisted/test/test_lockfile.py | Python | gpl-2.0 | 1,423 |
"""
Meteorology visualisation examples
==================================
"""
| pp-mo/iris | docs/iris/example_code/Meteorology/__init__.py | Python | lgpl-3.0 | 78 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Eric D Helms <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
deprecated:
removed_in: "2.12"
why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
alternative: https://github.com/theforeman/foreman-ansible-modules
description:
- Allows the management of Katello resources inside your Foreman server.
version_added: "2.3"
author:
- Eric D Helms (@ehelms)
requirements:
- nailgun >= 0.28.0
- python >= 2.6
- datetime
options:
server_url:
description:
- URL of Foreman server.
required: true
username:
description:
- Username on Foreman server.
required: true
password:
description:
- Password for user accessing Foreman server.
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host).
choices:
- repository
- manifest
- repository_set
- sync_plan
- content_view
- lifecycle_environment
- activation_key
- product
required: true
action:
description:
- action associated to the entity resource to set or edit in dictionary format.
- Possible Action in relation to Entitys.
- "sync (available when entity=product or entity=repository)"
- "publish (available when entity=content_view)"
- "promote (available when entity=content_view)"
choices:
- sync
- publish
- promote
required: false
params:
description:
- Parameters associated to the entity resource and action, to set or edit in dictionary format.
- Each choice may be only available with specific entitys and actions.
- "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
- The action "None" means no action specified.
- Possible Params in relation to entity and action.
- "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "content ([manifest,None])"
- "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
- "basearch ([repository_set,None])"
- "releaserver ([repository_set,None])"
- "sync_date ([sync_plan,None])"
- "interval ([sync_plan,None])"
- "repositories ([content_view,None])"
- "from_environment ([content_view,promote])"
- "to_environment([content_view,promote])"
- "prior ([lifecycle_environment,None])"
- "content_view ([activation_key,None])"
- "lifecycle_environment ([activation_key,None])"
required: true
task_timeout:
description:
- The timeout in seconds to wait for the started Foreman action to finish.
- If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
default: 1000
version_added: "2.7"
required: false
verify_ssl:
description:
- verify the ssl/https connection (e.g for a valid certificate)
default: false
type: bool
required: false
'''
EXAMPLES = '''
---
# Simple Example:
- name: Create Product
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: product
params:
name: Centos 7
delegate_to: localhost
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: "{{ entity }}"
params: "{{ params }}"
delegate_to: localhost
# tasks.yml
---
- include: katello.yml
vars:
name: Create Dev Environment
entity: lifecycle_environment
params:
name: Dev
prior: Library
organization: Default Organization
- include: katello.yml
vars:
name: Create Centos Product
entity: product
params:
name: Centos 7
organization: Default Organization
- include: katello.yml
vars:
name: Create 7.2 Repository
entity: repository
params:
name: Centos 7.2
product: Centos 7
organization: Default Organization
content_type: yum
url: http://mirror.centos.org/centos/7/os/x86_64/
- include: katello.yml
vars:
name: Create Centos 7 View
entity: content_view
params:
name: Centos 7 View
organization: Default Organization
repositories:
- name: Centos 7.2
product: Centos 7
- include: katello.yml
vars:
name: Enable RHEL Product
entity: repository_set
params:
name: Red Hat Enterprise Linux 7 Server (RPMs)
product: Red Hat Enterprise Linux Server
organization: Default Organization
basearch: x86_64
releasever: 7
- include: katello.yml
vars:
name: Promote Contentview Environment with longer timout
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
# Best Practices
# In Foreman, things can be done in paralell.
# When a conflicting action is already running,
# the task will fail instantly instead of waiting for the already running action to complete.
# So you sould use a "until success" loop to catch this.
- name: Promote Contentview Environment with increased Timeout
katello:
username: ansibleuser
password: supersecret
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
register: task_result
until: task_result is success
retries: 9
delay: 120
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except Exception:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module, task_timeout):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = task_timeout
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={0}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True),
username=dict(type='str', required=True, no_log=True),
password=dict(type='str', required=True, no_log=True),
entity=dict(type='str', required=True,
choices=['repository', 'manifest', 'repository_set', 'sync_plan',
'content_view', 'lifecycle_environment', 'activation_key', 'product']),
action=dict(type='str', choices=['sync', 'publish', 'promote']),
verify_ssl=dict(type='bool', default=False),
task_timeout=dict(type='int', default=1000),
params=dict(type='dict', required=True, no_log=True),
),
supports_check_mode=True,
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
task_timeout = module.params['task_timeout']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module, task_timeout)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
if __name__ == '__main__':
main()
| tersmitten/ansible | lib/ansible/modules/remote_management/foreman/_katello.py | Python | gpl-3.0 | 20,771 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.monkey}.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.python.monkey import MonkeyPatcher
class TestObj:
def __init__(self):
self.foo = 'foo value'
self.bar = 'bar value'
self.baz = 'baz value'
class MonkeyPatcherTests(unittest.SynchronousTestCase):
"""
Tests for L{MonkeyPatcher} monkey-patching class.
"""
def setUp(self):
self.testObject = TestObj()
self.originalObject = TestObj()
self.monkeyPatcher = MonkeyPatcher()
def test_empty(self):
"""
A monkey patcher without patches shouldn't change a thing.
"""
self.monkeyPatcher.patch()
# We can't assert that all state is unchanged, but at least we can
# check our test object.
self.assertEqual(self.originalObject.foo, self.testObject.foo)
self.assertEqual(self.originalObject.bar, self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_constructWithPatches(self):
"""
Constructing a L{MonkeyPatcher} with patches should add all of the
given patches to the patch list.
"""
patcher = MonkeyPatcher((self.testObject, 'foo', 'haha'),
(self.testObject, 'bar', 'hehe'))
patcher.patch()
self.assertEqual('haha', self.testObject.foo)
self.assertEqual('hehe', self.testObject.bar)
self.assertEqual(self.originalObject.baz, self.testObject.baz)
def test_patchExisting(self):
"""
Patching an attribute that exists sets it to the value defined in the
patch.
"""
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, 'haha')
def test_patchNonExisting(self):
"""
Patching a non-existing attribute fails with an C{AttributeError}.
"""
self.monkeyPatcher.addPatch(self.testObject, 'nowhere',
'blow up please')
self.assertRaises(AttributeError, self.monkeyPatcher.patch)
def test_patchAlreadyPatched(self):
"""
Adding a patch for an object and attribute that already have a patch
overrides the existing patch.
"""
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'BLAH')
self.monkeyPatcher.patch()
self.assertEqual(self.testObject.foo, 'BLAH')
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_restoreTwiceIsANoOp(self):
"""
Restoring an already-restored monkey patch is a no-op.
"""
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
self.monkeyPatcher.patch()
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
self.monkeyPatcher.restore()
self.assertEqual(self.testObject.foo, self.originalObject.foo)
def test_runWithPatchesDecoration(self):
"""
runWithPatches should run the given callable, passing in all arguments
and keyword arguments, and return the return value of the callable.
"""
log = []
def f(a, b, c=None):
log.append((a, b, c))
return 'foo'
result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
self.assertEqual('foo', result)
self.assertEqual([(1, 2, 10)], log)
def test_repeatedRunWithPatches(self):
"""
We should be able to call the same function with runWithPatches more
than once. All patches should apply for each call.
"""
def f():
return (self.testObject.foo, self.testObject.bar,
self.testObject.baz)
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
result = self.monkeyPatcher.runWithPatches(f)
self.assertEqual(
('haha', self.originalObject.bar, self.originalObject.baz), result)
result = self.monkeyPatcher.runWithPatches(f)
self.assertEqual(
('haha', self.originalObject.bar, self.originalObject.baz),
result)
def test_runWithPatchesRestores(self):
"""
C{runWithPatches} should restore the original values after the function
has executed.
"""
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
self.assertEqual(self.originalObject.foo, self.testObject.foo)
self.monkeyPatcher.runWithPatches(lambda: None)
self.assertEqual(self.originalObject.foo, self.testObject.foo)
def test_runWithPatchesRestoresOnException(self):
"""
Test runWithPatches restores the original values even when the function
raises an exception.
"""
def _():
self.assertEqual(self.testObject.foo, 'haha')
self.assertEqual(self.testObject.bar, 'blahblah')
raise RuntimeError("Something went wrong!")
self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
self.monkeyPatcher.addPatch(self.testObject, 'bar', 'blahblah')
self.assertRaises(RuntimeError, self.monkeyPatcher.runWithPatches, _)
self.assertEqual(self.testObject.foo, self.originalObject.foo)
self.assertEqual(self.testObject.bar, self.originalObject.bar)
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/test/test_monkey.py | Python | gpl-3.0 | 5,637 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ, char, tag
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
class GeneralizedTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
class UTCTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
| frerepoulet/ZeroNet | src/lib/pyasn1/type/useful.py | Python | gpl-2.0 | 1,159 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import cPickle
import os
import mxnet as mx
from ..logger import logger
from ..config import config, default, generate_config
from ..dataset import *
def reeval(args):
# load imdb
imdb = eval(args.dataset)(args.image_set, args.root_path, args.dataset_path)
# load detection results
cache_file = os.path.join(imdb.cache_path, imdb.name, 'detections.pkl')
with open(cache_file) as f:
detections = cPickle.load(f)
# eval
imdb.evaluate_detections(detections)
def parse_args():
parser = argparse.ArgumentParser(description='imdb test')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# other
parser.add_argument('--no_shuffle', help='disable random shuffle', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
reeval(args)
if __name__ == '__main__':
main()
| Mega-DatA-Lab/mxnet | example/rcnn/rcnn/tools/reeval.py | Python | apache-2.0 | 2,273 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A simple utility for constructing filesystem-like trees from beets
libraries.
"""
from __future__ import division, absolute_import, print_function
from collections import namedtuple
from beets import util
Node = namedtuple('Node', ['files', 'dirs'])
def _insert(node, path, itemid):
"""Insert an item into a virtual filesystem node."""
if len(path) == 1:
# Last component. Insert file.
node.files[path[0]] = itemid
else:
# In a directory.
dirname = path[0]
rest = path[1:]
if dirname not in node.dirs:
node.dirs[dirname] = Node({}, {})
_insert(node.dirs[dirname], rest, itemid)
def libtree(lib):
"""Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named
tuples in which both components are dictionaries. The first
maps filenames to Item ids. The second maps directory names to
child node tuples.
"""
root = Node({}, {})
for item in lib.items():
dest = item.destination(fragment=True)
parts = util.components(dest)
_insert(root, parts, item.id)
return root
| clinton-hall/nzbToMedia | libs/common/beets/vfs.py | Python | gpl-3.0 | 1,839 |
from bar import path | siosio/intellij-community | python/testData/quickFixes/PyAddImportQuickFixTest/combinedElementOrdering/first/second/__init__.py | Python | apache-2.0 | 20 |
from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
def is_win32():
return sys.platform == "win32" and platform.architecture()[0] == "32bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared -Wl,-gc-sections -Wl,-s")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
elif v and v >= '4.6.0':
if is_win32():
# use -mincoming-stack-boundary=2
# due to the change to 16 byte stack alignment since GCC 4.6
# but 32 bit Windows ABI defines 4 bytes stack alignment
opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2'
'-mincoming-stack-boundary=2']
else:
opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2']
else:
opt = ['-O2']
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, target, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
try:
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
| tdsmith/numpy | numpy/distutils/fcompiler/gnu.py | Python | bsd-3-clause | 15,266 |
self.description = "CleanMethod = KeepCurrent"
sp = pmpkg("dummy", "2.0-1")
self.addpkg2db("sync", sp)
sp = pmpkg("bar", "2.0-1")
self.addpkg2db("sync", sp)
sp = pmpkg("baz", "2.0-1")
self.addpkg2db("sync", sp)
lp = pmpkg("dummy", "1.0-1")
self.addpkg2db("local", lp)
lp = pmpkg("bar", "2.0-1")
self.addpkg2db("local", lp)
self.args = "-Sc"
self.option['CleanMethod'] = ['KeepCurrent']
self.createlocalpkgs = True
self.addrule("PACMAN_RETCODE=0")
self.addrule("CACHE_EXISTS=dummy|2.0-1")
self.addrule("!CACHE_EXISTS=dummy|1.0-1")
self.addrule("CACHE_EXISTS=bar|2.0-1")
self.addrule("CACHE_EXISTS=baz|2.0-1")
| vodik/pacman | test/pacman/tests/clean002.py | Python | gpl-2.0 | 615 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'pci_requests'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/db/sqlalchemy/migrate_repo/versions/253_add_pci_requests_to_instance_extra_table.py | Python | gpl-2.0 | 1,402 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest2 as unittest
from nupic.frameworks.opf.metrics import getModule, MetricSpec, MetricMulti
class OPFMetricsTest(unittest.TestCase):
DELTA = 0.01
VERBOSITY = 0
def testRMSE(self):
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testNRMSE(self):
nrmse = getModule(MetricSpec("nrmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
nrmse.addInstance(gt[i], p[i])
target = 3.5856858280031814
self.assertAlmostEqual(nrmse.getMetric()["value"], target)
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3}))
gt = [9, 4, 4, 100, 44]
p = [0, 13, 4, 6, 7]
for gv, pv in zip(gt, p):
wrmse.addInstance(gv, pv)
target = 58.324
self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testAAE(self):
aae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aae.addInstance(gt[i], p[i])
target = 6.0
self.assertTrue(abs(aae.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testTrivialAAE(self):
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(100)]
p = [i for i in range(100)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAAE (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuract(self):
"""Trivial AAE metric test"""
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(1000)]
p = [i for i in range(1000)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuracy(self):
"""Trivial Accuracy metric test"""
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAverageError (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(500, 1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMultistepAAE(self):
"""Multistep AAE metric test"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": 3}))
# Make each ground truth 1 greater than the prediction
gt = [i+1 for i in range(100)]
p = [{3: {i: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 1
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepAAEMultipleSteps(self):
"""Multistep AAE metric test, predicting 2 different step sizes"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": [3,6]}))
# Make each 3 step prediction +1 over ground truth and each 6 step
# prediction +0.5 over ground truth
gt = [i for i in range(100)]
p = [{3: {i+1: .7, 5: 0.3},
6: {i+0.5: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 0.75 # average of +1 error and 0.5 error
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbability(self):
"""Multistep with probabilities metric test"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps":3}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
target = 283.35
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbabilityMultipleSteps(self):
"""Multistep with probabilities metric test, predicting 2 different step
sizes"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
"errorMetric":"aae", "steps": [1,3]}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7},
1: {5: 1.0}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
# / 2 because the 1-step prediction is 100% accurate
target = 283.35/2
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMovingMeanAbsoluteError(self):
"""Moving mean Average Absolute Error metric test"""
movingMeanAAE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"aae"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanAAE.addInstance(gt[i], p[i])
res.append(movingMeanAAE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanAAE.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingMeanRMSE(self):
"""Moving mean RMSE metric test"""
movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"rmse"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanRMSE.addInstance(gt[i], p[i])
res.append(movingMeanRMSE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testMovingModeAverageError(self):
"""Moving mode Average Error metric test"""
movingModeAvgErr = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"avg_err"}))
#Should initially assymptote to .5
#Then after 900 should go to 1.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeAvgErr.addInstance(gt[i], p[i])
res.append(movingModeAvgErr.getMetric()["value"])
#Make sure that there is no point where the average error is >.5
self.assertTrue(max(res[1:890]) == .5)
#Make sure that after the statistics switch the error goes to 1.0
self.assertTrue(min(res[891:])>=.5)
#Make sure that the statistics change is still noticeable while it is
#in the window
self.assertTrue(res[998]<1.0)
target = 1.0
self.assertTrue(abs(movingModeAvgErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingModeAccuracy(self):
"""Moving mode Accuracy metric test"""
movingModeACC = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"acc"}))
#Should initially asymptote to .5
#Then after 900 should go to 0.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeACC.addInstance(gt[i], p[i])
res.append(movingModeACC.getMetric()["value"])
#Make sure that there is no point where the average acc is <.5
self.assertTrue(min(res[1:899]) == .5)
#Make sure that after the statistics switch the acc goes to 0.0
self.assertTrue(max(res[900:])<=.5)
#Make sure that the statistics change is still noticeable while it
#is in the window
self.assertTrue(res[998]>0.0)
target = 0.0
self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalars(self):
"""Two gram scalars test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, \
"window":100, "predictionField":"test",
"errorMetric":"acc"}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalarsStepsGreaterOne(self):
"""Two gram scalars test with step size other than 1"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,\
"window":100, "predictionField":"test",
"errorMetric":"acc", "steps": 2}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTwoGramStrings(self):
"""One gram string test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"acc",
"predictionField":"test"}))
# Sequences of "0", "1", "2", "3", "4", "0", "1", ...
gt = [str(i%5) for i in range(1000)]
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
# Make every 5th element random
newElem = 100
for i in range(5, 1000, 5):
gt[i] = str(newElem)
newElem += 20
res = []
for i in xrange(len(gt)):
if i==20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = .8
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedAAE(self):
"""Windowed AAE"""
waae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":1}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
waae.addInstance(gt[i], p[i])
target = 3.0
self.assertTrue( abs(waae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
def testAccuracy(self):
"""Accuracy"""
acc = getModule(MetricSpec("acc", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.5
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAccuracy(self):
"""Windowed accuracy"""
acc = getModule(MetricSpec("acc", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.0
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testAverageError(self):
"""Ave Error"""
err = getModule(MetricSpec("avg_err", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [1, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = (2.0/3.0)
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAverageError(self):
"""Windowed Ave Error"""
err = getModule(MetricSpec("avg_err", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = 1.0
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testLongWindowRMSE(self):
"""RMSE"""
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testNegativeLogLikelihood(self):
# make sure negativeLogLikelihood returns correct LL numbers
# mock objects for ClassifierInput and ModelResult (see opfutils.py)
class MockClassifierInput(object):
def __init__(self, bucketIdx):
self.bucketIndex = bucketIdx
class MockModelResult(object):
def __init__(self, bucketll, bucketIdx):
self.inferences = {'multiStepBucketLikelihoods': {1: bucketll}}
self.classifierInput = MockClassifierInput(bucketIdx)
bucketLL = {0: 1.0, 1: 0, 2: 0, 3: 0} # model prediction as a dictionary
gt_bucketIdx = 0 # bucket index for ground truth
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL, gt_bucketIdx))
target = 0.0 # -log(1.0)
self.assertAlmostEqual(negLL.getMetric()["value"], target)
bucketLL = {0: 0.5, 1: 0.5, 2: 0, 3: 0} # model prediction as a dictionary
gt_bucketIdx = 0 # bucket index for ground truth
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL, gt_bucketIdx))
target = 0.6931471 # -log(0.5)
self.assertTrue(abs(negLL.getMetric()["value"]-target)
< OPFMetricsTest.DELTA)
# test accumulated negLL for multiple steps
bucketLL = []
bucketLL.append({0: 1, 1: 0, 2: 0, 3: 0})
bucketLL.append({0: 0, 1: 1, 2: 0, 3: 0})
bucketLL.append({0: 0, 1: 0, 2: 1, 3: 0})
bucketLL.append({0: 0, 1: 0, 2: 0, 3: 1})
gt_bucketIdx = [0, 2, 1, 3]
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
for i in xrange(len(bucketLL)):
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL[i], gt_bucketIdx[i]))
target = 5.756462
self.assertTrue(abs(negLL.getMetric()["value"]-target)
< OPFMetricsTest.DELTA)
def testNegLLMultiplePrediction(self):
# In cases where the ground truth has multiple possible outcomes, make sure
# that the prediction that captures ground truth distribution has best LL
# and models that gives single prediction (either most likely outcome or
# average outcome) has worse LL
# mock objects for ClassifierInput and ModelResult (see opfutils.py)
class MockClassifierInput(object):
def __init__(self, bucketIdx):
self.bucketIndex = bucketIdx
class MockModelResult(object):
def __init__(self, bucketll, bucketIdx):
self.inferences = {'multiStepBucketLikelihoods': {1: bucketll}}
self.classifierInput = MockClassifierInput(bucketIdx)
# the ground truth lies in bucket 0 with p=0.45, in bucket 1 with p=0.0
# and in bucket 2 with p=0.55
gt_bucketIdx = [0]*45+[2]*55
# compare neg log-likelihood for three type of model predictions
# a model that predicts ground truth distribution
prediction_gt = {0: 0.45, 1: 0, 2: 0.55}
# a model that predicts only the most likely outcome
prediction_ml = {0: 0.0, 1: 0, 2: 1.0}
# a model that only predicts mean (bucket 1)
prediction_mean = {0: 0, 1: 1, 2: 0}
negLL_gt = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL_ml = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL_mean = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
for i in xrange(len(gt_bucketIdx)):
negLL_gt.addInstance(0, 0, record = None,
result=MockModelResult(prediction_gt, gt_bucketIdx[i]))
negLL_ml.addInstance(0, 0, record = None,
result=MockModelResult(prediction_ml, gt_bucketIdx[i]))
negLL_mean.addInstance(0, 0, record = None,
result=MockModelResult(prediction_mean, gt_bucketIdx[i]))
self.assertTrue(negLL_gt.getMetric()["value"] < negLL_ml.getMetric()["value"])
self.assertTrue(negLL_gt.getMetric()["value"] < negLL_mean.getMetric()["value"])
def testCustomErrorMetric(self):
customFunc = """def getError(pred,ground,tools):
return abs(pred-ground)"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc, "errorWindow":3}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aggErr = customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
# insure that addInstance returns the aggregate error - other
# uber metrics depend on this behavior.
self.assertEqual(aggErr, customEM.getMetric()["value"])
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
customFunc = """def getError(pred,ground,tools):
sum = 0
for i in range(min(3,tools.getBufferLen())):
sum+=abs(tools.getPrediction(i)-tools.getGroundTruth(i))
return sum/3"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
# Test custom error metric helper functions
# Test getPrediction
# Not-Windowed
storeWindow=4
failed = False
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == p[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if lookBack>=storeWindow-1:
pass
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == p[i-lookBack])
#Test getGroundTruth
#Not-Windowed
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == gt[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == gt[i-lookBack])
#Test getFieldValue
#Not-Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == t1[i-lookBack])
#Not-Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Test getBufferLen
#Not-Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == i+1)
#Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == min(i+1, 4))
#Test initialization edge cases
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"errorWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
def testMultiMetric(self):
ms1 = MetricSpec(field='a', metric='trivial', inferenceElement='prediction', params={'errorMetric': 'aae', 'window': 1000, 'steps': 1})
ms2 = MetricSpec(metric='trivial', inferenceElement='prediction', field='a', params={'window': 10, 'steps': 1, 'errorMetric': 'rmse'})
metric1000 = getModule(ms1)
metric10 = getModule(ms2)
# create multi metric
multi = MetricMulti(weights=[0.2, 0.8], metrics=[metric10, metric1000])
multi.verbosity = 1
# create reference metrics (must be diff from metrics above used in MultiMetric, as they keep history)
metric1000ref = getModule(ms1)
metric10ref = getModule(ms2)
gt = range(500, 1000)
p = range(500)
for i in xrange(len(gt)):
v10=metric10ref.addInstance(gt[i], p[i])
v1000=metric1000ref.addInstance(gt[i], p[i])
if v10 is None or v1000 is None:
check=None
else:
check=0.2*float(v10) + 0.8*float(v1000)
metricValue = multi.addInstance(gt[i], p[i])
self.assertEqual(check, metricValue, "iter i= %s gt=%s pred=%s multi=%s sub1=%s sub2=%s" % (i, gt[i], p[i], metricValue, v10, v1000))
if __name__ == "__main__":
unittest.main()
| elkingtonmcb/nupic | tests/unit/nupic/frameworks/opf/opf_metrics_test.py | Python | agpl-3.0 | 33,254 |
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is used by run_tests.py to create cpu load on a machine"""
while True:
pass
| jboeuf/grpc | tools/run_tests/python_utils/antagonist.py | Python | apache-2.0 | 688 |
# mssql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
from sqlalchemy.dialects.mssql.base import \
INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
)
| wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/sqlalchemy/dialects/mssql/__init__.py | Python | gpl-3.0 | 1,081 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from time import sleep
from os.path import exists, join
from shutil import copy
from traceback import print_exc
from utils import chmod
# ignore these plugin configs, mainly because plugins were wiped out
IGNORE = (
"FreakshareNet", "SpeedManager", "ArchiveTo", "ShareCx", ('hooks', 'UnRar'),
'EasyShareCom', 'FlyshareCz'
)
CONF_VERSION = 1
class ConfigParser:
"""
holds and manage the configuration
current dict layout:
{
section : {
option : {
value:
type:
desc:
}
desc:
}
"""
def __init__(self):
"""Constructor"""
self.config = {} # the config values
self.plugin = {} # the config for plugins
self.oldRemoteData = {}
self.pluginCB = None # callback when plugin config value is changed
self.checkVersion()
self.readConfig()
self.deleteOldPlugins()
def checkVersion(self, n=0):
"""determines if config need to be copied"""
try:
if not exists("pyload.conf"):
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
if not exists("plugin.conf"):
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
f = open("pyload.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
print "Old version of config was replaced"
f = open("plugin.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
print "Old version of plugin-config replaced"
except:
if n < 3:
sleep(0.3)
self.checkVersion(n + 1)
else:
raise
def readConfig(self):
"""reads the config file"""
self.config = self.parseConfig(join(pypath, "module", "config", "default.conf"))
self.plugin = self.parseConfig("plugin.conf")
try:
homeconf = self.parseConfig("pyload.conf")
if "username" in homeconf["remote"]:
if "password" in homeconf["remote"]:
self.oldRemoteData = {"username": homeconf["remote"]["username"]["value"],
"password": homeconf["remote"]["username"]["value"]}
del homeconf["remote"]["password"]
del homeconf["remote"]["username"]
self.updateValues(homeconf, self.config)
except Exception, e:
print "Config Warning"
print_exc()
def parseConfig(self, config):
"""parses a given configfile"""
f = open(config)
config = f.read()
config = config.splitlines()[1:]
conf = {}
section, option, value, typ, desc = "", "", "", "", ""
listmode = False
for line in config:
comment = line.rfind("#")
if line.find(":", comment) < 0 > line.find("=", comment) and comment > 0 and line[comment - 1].isspace():
line = line.rpartition("#") # removes comments
if line[1]:
line = line[0]
else:
line = line[2]
line = line.strip()
try:
if line == "":
continue
elif line.endswith(":"):
section, none, desc = line[:-1].partition('-')
section = section.strip()
desc = desc.replace('"', "").strip()
conf[section] = {"desc": desc}
else:
if listmode:
if line.endswith("]"):
listmode = False
line = line.replace("]", "")
value += [self.cast(typ, x.strip()) for x in line.split(",") if x]
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
else:
content, none, value = line.partition("=")
content, none, desc = content.partition(":")
desc = desc.replace('"', "").strip()
typ, none, option = content.strip().rpartition(" ")
value = value.strip()
if value.startswith("["):
if value.endswith("]"):
listmode = False
value = value[:-1]
else:
listmode = True
value = [self.cast(typ, x.strip()) for x in value[1:].split(",") if x]
else:
value = self.cast(typ, value)
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
except Exception, e:
print "Config Warning"
print_exc()
f.close()
return conf
def updateValues(self, config, dest):
"""sets the config values from a parsed config file to values in destination"""
for section in config.iterkeys():
if section in dest:
for option in config[section].iterkeys():
if option in ("desc", "outline"): continue
if option in dest[section]:
dest[section][option]["value"] = config[section][option]["value"]
#else:
# dest[section][option] = config[section][option]
#else:
# dest[section] = config[section]
def saveConfig(self, config, filename):
"""saves config to filename"""
with open(filename, "wb") as f:
chmod(filename, 0600)
f.write("version: %i \n" % CONF_VERSION)
for section in config.iterkeys():
f.write('\n%s - "%s":\n' % (section, config[section]["desc"]))
for option, data in config[section].iteritems():
if option in ("desc", "outline"): continue
if isinstance(data["value"], list):
value = "[ \n"
for x in data["value"]:
value += "\t\t" + str(x) + ",\n"
value += "\t\t]\n"
else:
if type(data["value"]) in (str, unicode):
value = data["value"] + "\n"
else:
value = str(data["value"]) + "\n"
try:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value))
except UnicodeEncodeError:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value.encode("utf8")))
def cast(self, typ, value):
"""cast value to given format"""
if type(value) not in (str, unicode):
return value
elif typ == "int":
return int(value)
elif typ == "bool":
return True if value.lower() in ("1", "true", "on", "an", "yes") else False
elif typ == "time":
if not value: value = "0:00"
if not ":" in value: value += ":00"
return value
elif typ in ("str", "file", "folder"):
try:
return value.encode("utf8")
except:
return value
else:
return value
def save(self):
"""saves the configs to disk"""
self.saveConfig(self.config, "pyload.conf")
self.saveConfig(self.plugin, "plugin.conf")
def __getitem__(self, section):
"""provides dictonary like access: c['section']['option']"""
return Section(self, section)
def get(self, section, option):
"""get value"""
val = self.config[section][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def set(self, section, option, value):
"""set value"""
value = self.cast(self.config[section][option]["type"], value)
self.config[section][option]["value"] = value
self.save()
def getPlugin(self, plugin, option):
"""gets a value for a plugin"""
val = self.plugin[plugin][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def setPlugin(self, plugin, option, value):
"""sets a value for a plugin"""
value = self.cast(self.plugin[plugin][option]["type"], value)
if self.pluginCB: self.pluginCB(plugin, option, value)
self.plugin[plugin][option]["value"] = value
self.save()
def getMetaData(self, section, option):
""" get all config data for an option """
return self.config[section][option]
def addPluginConfig(self, name, config, outline=""):
"""adds config options with tuples (name, type, desc, default)"""
if name not in self.plugin:
conf = {"desc": name,
"outline": outline}
self.plugin[name] = conf
else:
conf = self.plugin[name]
conf["outline"] = outline
for item in config:
if item[0] in conf:
conf[item[0]]["type"] = item[1]
conf[item[0]]["desc"] = item[2]
else:
conf[item[0]] = {
"desc": item[2],
"type": item[1],
"value": self.cast(item[1], item[3])
}
values = [x[0] for x in config] + ["desc", "outline"]
#delete old values
for item in conf.keys():
if item not in values:
del conf[item]
def deleteConfig(self, name):
"""Removes a plugin config"""
if name in self.plugin:
del self.plugin[name]
def deleteOldPlugins(self):
""" remove old plugins from config """
for name in IGNORE:
if name in self.plugin:
del self.plugin[name]
class Section:
"""provides dictionary like access for configparser"""
def __init__(self, parser, section):
"""Constructor"""
self.parser = parser
self.section = section
def __getitem__(self, item):
"""getitem"""
return self.parser.get(self.section, item)
def __setitem__(self, item, value):
"""setitem"""
self.parser.set(self.section, item, value)
if __name__ == "__main__":
pypath = ""
from time import time
a = time()
c = ConfigParser()
b = time()
print "sec", b - a
print c.config
c.saveConfig(c.config, "user.conf")
| manuelm/pyload | module/ConfigParser.py | Python | gpl-3.0 | 11,895 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end benchmark for batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def batch_norm_op(tensor, mean, variance, beta, gamma, scale):
"""Fused kernel for batch normalization."""
# _batch_norm_with_global_normalization is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
tensor, mean, variance, beta, gamma, 0.001, scale)
# pylint: enable=protected-access
# Note that the naive implementation is much slower:
# batch_norm = (tensor - mean) * tf.rsqrt(variance + 0.001)
# if scale:
# batch_norm *= gamma
# return batch_norm + beta
def batch_norm_py(tensor, mean, variance, beta, gamma, scale):
"""Python implementation of batch normalization."""
return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if
scale else None, 0.001)
def batch_norm_slow(tensor, mean, variance, beta, gamma, scale):
batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
if scale:
batch_norm *= gamma
return batch_norm + beta
def build_graph(device, input_shape, axes, num_layers, mode, scale, train):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
Returns:
An array of tensors to run()
"""
moment_shape = []
keep_dims = mode == "py" or mode == "slow"
if keep_dims:
for axis in range(len(input_shape)):
if axis in axes:
moment_shape.append(1)
else:
moment_shape.append(input_shape[axis])
else:
for axis in range(len(input_shape)):
if axis not in axes:
moment_shape.append(input_shape[axis])
with ops.device("/%s:0" % device):
tensor = variables.Variable(random_ops.truncated_normal(input_shape))
for _ in range(num_layers):
if train:
mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)
else:
mean = array_ops.zeros(moment_shape)
variance = array_ops.ones(moment_shape)
beta = variables.Variable(array_ops.zeros(moment_shape))
gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))
if mode == "py":
tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)
elif mode == "op":
tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)
elif mode == "slow":
tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)
if train:
return gradients_impl.gradients([tensor], variables.trainable_variables())
else:
return [tensor]
def print_difference(mode, t1, t2):
"""Print the difference in timing between two runs."""
difference = (t2 - t1) / t1 * 100.0
print("=== %s: %.1f%% ===" % (mode, difference))
class BatchNormBenchmark(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| nburn42/tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | Python | apache-2.0 | 10,762 |
#!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def DoCopy(path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
invalid_lines.append(line)
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path)
if __name__ == '__main__':
sys.exit(main())
| weiyirong/crosswalk-1 | build/android/merge_java_srcs.py | Python | bsd-3-clause | 2,054 |
Subsets and Splits