code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#
# Copyright 2013 Tim O'Shea
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from globals import *;
import recipe_loader;
from recipe import recipe
from pybombs_ops import *
| scalable-networks/ext | pybombs/mod_pybombs/__init__.py | Python | gpl-2.0 | 852 |
# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EC2 service.
"""
from boto.ec2.connection import EC2Connection
from boto.vpc.vpc import VPC
from boto.vpc.customergateway import CustomerGateway
from boto.vpc.vpngateway import VpnGateway, Attachment
from boto.vpc.dhcpoptions import DhcpOptions
from boto.vpc.subnet import Subnet
from boto.vpc.vpnconnection import VpnConnection
class VPCConnection(EC2Connection):
# VPC methods
def get_all_vpcs(self, vpc_ids=None, filters=None):
"""
Retrieve information about your VPCs. You can filter results to
return information only about those VPCs that match your search
parameters. Otherwise, all VPCs associated with your account
are returned.
:type vpc_ids: list
:param vpc_ids: A list of strings with the desired VPC ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VPC (pending or available)
- *cidrBlock*, CIDR block of the VPC
- *dhcpOptionsId*, the ID of a set of DHCP options
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
"""
params = {}
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Key' % i)] = filter[0]
params[('Filter.%d.Value.1')] = filter[1]
i += 1
return self.get_list('DescribeVpcs', params, [('item', VPC)])
def create_vpc(self, cidr_block):
"""
Create a new Virtual Private Cloud.
:type cidr_block: str
:param cidr_block: A valid CIDR block
:rtype: The newly created VPC
:return: A :class:`boto.vpc.vpc.VPC` object
"""
params = {'CidrBlock' : cidr_block}
return self.get_object('CreateVpc', params, VPC)
def delete_vpc(self, vpc_id):
"""
Delete a Virtual Private Cloud.
:type vpc_id: str
:param vpc_id: The ID of the vpc to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id}
return self.get_status('DeleteVpc', params)
# Customer Gateways
def get_all_customer_gateways(self, customer_gateway_ids=None, filters=None):
"""
Retrieve information about your CustomerGateways. You can filter results to
return information only about those CustomerGateways that match your search
parameters. Otherwise, all CustomerGateways associated with your account
are returned.
:type customer_gateway_ids: list
:param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the CustomerGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *ipAddress* the IP address of customer gateway's
internet-routable external inteface
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.CustomerGateway`
"""
params = {}
if customer_gateway_ids:
self.build_list_params(params, customer_gateway_ids, 'CustomerGatewayId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Key' % i)] = filter[0]
params[('Filter.%d.Value.1')] = filter[1]
i += 1
return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)])
def create_customer_gateway(self, type, ip_address, bgp_asn):
"""
Create a new Customer Gateway
:type type: str
:param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
:type ip_address: str
:param ip_address: Internet-routable IP address for customer's gateway.
Must be a static address.
:type bgp_asn: str
:param bgp_asn: Customer gateway's Border Gateway Protocol (BGP)
Autonomous System Number (ASN)
:rtype: The newly created CustomerGateway
:return: A :class:`boto.vpc.customergateway.CustomerGateway` object
"""
params = {'Type' : type,
'IpAddress' : ip_address,
'BgpAsn' : bgp_asn}
return self.get_object('CreateCustomerGateway', params, CustomerGateway)
def delete_customer_gateway(self, customer_gateway_id):
"""
Delete a Customer Gateway.
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer_gateway to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'CustomerGatewayId': customer_gateway_id}
return self.get_status('DeleteCustomerGateway', params)
# VPN Gateways
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
"""
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VpnGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *availabilityZone*, the Availability zone the
VPN gateway is in.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
"""
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Key' % i)] = filter[0]
params[('Filter.%d.Value.1')] = filter[1]
i += 1
return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)])
def create_vpn_gateway(self, type, availability_zone=None):
"""
Create a new Vpn Gateway
:type type: str
:param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
:type availability_zone: str
:param availability_zone: The Availability Zone where you want the VPN gateway.
:rtype: The newly created VpnGateway
:return: A :class:`boto.vpc.vpngateway.VpnGateway` object
"""
params = {'Type' : type}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateVpnGateway', params, VpnGateway)
def delete_vpn_gateway(self, vpn_gateway_id):
"""
Delete a Vpn Gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpnGatewayId': vpn_gateway_id}
return self.get_status('DeleteVpnGateway', params)
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
"""
Attaches a VPN gateway to a VPC.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to attach
:type vpc_id: str
:param vpc_id: The ID of the VPC you want to attach the gateway to.
:rtype: An attachment
:return: a :class:`boto.vpc.vpngateway.Attachment`
"""
params = {'VpnGatewayId': vpn_gateway_id,
'VpcId' : vpc_id}
return self.get_object('AttachVpnGateway', params, Attachment)
# Subnets
def get_all_subnets(self, subnet_ids=None, filters=None):
"""
Retrieve information about your Subnets. You can filter results to
return information only about those Subnets that match your search
parameters. Otherwise, all Subnets associated with your account
are returned.
:type subnet_ids: list
:param subnet_ids: A list of strings with the desired Subnet ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the Subnet
(pending,available)
- *vpdId*, the ID of teh VPC the subnet is in.
- *cidrBlock*, CIDR block of the subnet
- *availabilityZone*, the Availability Zone
the subnet is in.
:rtype: list
:return: A list of :class:`boto.vpc.subnet.Subnet`
"""
params = {}
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Key' % i)] = filter[0]
params[('Filter.%d.Value.1' % i)] = filter[1]
i += 1
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
"""
Create a new Subnet
:type vpc_id: str
:param vpc_id: The ID of the VPC where you want to create the subnet.
:type cidr_block: str
:param cidr_block: The CIDR block you want the subnet to cover.
:type availability_zone: str
:param availability_zone: The AZ you want the subnet in
:rtype: The newly created Subnet
:return: A :class:`boto.vpc.customergateway.Subnet` object
"""
params = {'VpcId' : vpc_id,
'CidrBlock' : cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateSubnet', params, Subnet)
def delete_subnet(self, subnet_id):
"""
Delete a subnet.
:type subnet_id: str
:param subnet_id: The ID of the subnet to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'SubnetId': subnet_id}
return self.get_status('DeleteSubnet', params)
# DHCP Options
def get_all_dhcp_options(self, dhcp_options_ids=None):
"""
Retrieve information about your DhcpOptions.
:type dhcp_options_ids: list
:param dhcp_options_ids: A list of strings with the desired DhcpOption ID's
:rtype: list
:return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
"""
params = {}
if dhcp_options_ids:
self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId')
return self.get_list('DescribeDhcpOptions', params, [('item', DhcpOptions)])
def create_dhcp_options(self, vpc_id, cidr_block, availability_zone=None):
"""
Create a new DhcpOption
:type vpc_id: str
:param vpc_id: The ID of the VPC where you want to create the subnet.
:type cidr_block: str
:param cidr_block: The CIDR block you want the subnet to cover.
:type availability_zone: str
:param availability_zone: The AZ you want the subnet in
:rtype: The newly created DhcpOption
:return: A :class:`boto.vpc.customergateway.DhcpOption` object
"""
params = {'VpcId' : vpc_id,
'CidrBlock' : cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateDhcpOption', params, DhcpOptions)
def delete_dhcp_options(self, dhcp_options_id):
"""
Delete a DHCP Options
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the DHCP Options to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id}
return self.get_status('DeleteDhcpOptions', params)
def associate_dhcp_options(self, dhcp_options_id, vpc_id):
"""
Associate a set of Dhcp Options with a VPC.
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the Dhcp Options
:type vpc_id: str
:param vpc_id: The ID of the VPC.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id,
'VpcId' : vpc_id}
return self.get_status('AssociateDhcpOptions', params)
# VPN Connection
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
"""
Retrieve information about your VPN_CONNECTIONs. You can filter results to
return information only about those VPN_CONNECTIONs that match your search
parameters. Otherwise, all VPN_CONNECTIONs associated with your account
are returned.
:type vpn_connection_ids: list
:param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the VPN_CONNECTION
pending,available,deleting,deleted
- *type*, the type of connection, currently 'ipsec.1'
- *customerGatewayId*, the ID of the customer gateway
associated with the VPN
- *vpnGatewayId*, the ID of the VPN gateway associated
with the VPN connection
:rtype: list
:return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection`
"""
params = {}
if vpn_connection_ids:
self.build_list_params(params, vpn_connection_ids, 'Vpn_ConnectionId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Key' % i)] = filter[0]
params[('Filter.%d.Value.1')] = filter[1]
i += 1
return self.get_list('DescribeVpnConnections', params, [('item', VpnConnection)])
def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id):
"""
Create a new VPN Connection.
:type type: str
:param type: The type of VPN Connection. Currently only 'ipsec.1'
is supported
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the VPN gateway.
:rtype: The newly created VpnConnection
:return: A :class:`boto.vpc.vpnconnection.VpnConnection` object
"""
params = {'Type' : type,
'CustomerGatewayId' : customer_gateway_id,
'VpnGatewayId' : vpn_gateway_id}
return self.get_object('CreateVpnConnection', params, VpnConnection)
def delete_vpn_connection(self, vpn_connection_id):
"""
Delete a VPN Connection.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the vpn_connection to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpnConnectionId': vpn_connection_id}
return self.get_status('DeleteVpnConnection', params)
| apavlo/h-store | third_party/python/boto/vpc/__init__.py | Python | gpl-3.0 | 17,897 |
"""
Management command to resend all lti scores for the requested course.
"""
import textwrap
from django.core.management import BaseCommand
from opaque_keys.edx.keys import CourseKey
from lti_provider.models import GradedAssignment
from lti_provider import tasks
class Command(BaseCommand):
"""
Send all lti scores for the requested courses to the registered consumers.
If no arguments are provided, send all scores for all courses.
Examples:
./manage.py lms resend_lti_scores
./manage.py lms resend_lti_scores course-v1:edX+DemoX+Demo_Course course-v1:UBCx+course+2016-01
"""
help = textwrap.dedent(__doc__)
def add_arguments(self, parser):
parser.add_argument(u'course_keys', type=CourseKey.from_string, nargs='*')
def handle(self, *args, **options):
if options[u'course_keys']:
for course_key in options[u'course_keys']:
for assignment in self._iter_course_assignments(course_key):
self._send_score(assignment)
else:
for assignment in self._iter_all_assignments():
self._send_score(assignment)
def _send_score(self, assignment):
"""
Send the score to the LTI consumer for a single assignment.
"""
tasks.send_composite_outcome.delay(
assignment.user_id,
unicode(assignment.course_key),
assignment.id,
assignment.version_number,
)
def _iter_all_assignments(self):
"""
Get all the graded assignments in the system.
"""
return GradedAssignment.objects.all()
def _iter_course_assignments(self, course_key):
"""
Get all the graded assignments for the given course.
"""
return GradedAssignment.objects.filter(course_key=course_key)
| caesar2164/edx-platform | lms/djangoapps/lti_provider/management/commands/resend_lti_scores.py | Python | agpl-3.0 | 1,857 |
#!/usr/bin/env python
#***************************************************************************
#* *
#* Copyright (c) 2009 Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="wiki2qhelp"
__author__ = "Yorik van Havre <[email protected]>"
__url__ = "http://www.freecadweb.org"
"""
This script builds qhrlp files from a local copy of the wiki
"""
import sys, os, re, tempfile, getopt, shutil
from urllib2 import urlopen, HTTPError
# CONFIGURATION #################################################
FOLDER = "./localwiki"
INDEX = "Online_Help_Toc" # the start page from where to crawl the wiki
VERBOSE = True # to display what's going on. Otherwise, runs totally silent.
QHELPCOMPILER = 'qhelpgenerator'
QCOLLECTIOMGENERATOR = 'qcollectiongenerator'
RELEASE = '0.16'
# END CONFIGURATION ##############################################
fcount = dcount = 0
def crawl():
"downloads an entire wiki site"
# tests ###############################################
if os.system(QHELPCOMPILER +' -v'):
print "Error: QAssistant not fully installed, exiting."
return 1
if os.system(QCOLLECTIOMGENERATOR +' -v'):
print "Error: QAssistant not fully installed, exiting."
return 1
# run ########################################################
qhp = buildtoc()
qhcp = createCollProjectFile()
if generate(qhcp) or compile(qhp):
print "Error at compiling"
return 1
if VERBOSE: print "All done!"
i=raw_input("Copy the files to their correct location in the source tree? y/n (default=no) ")
if i.upper() in ["Y","YES"]:
shutil.copy("localwiki/freecad.qch","../../Doc/freecad.qch")
shutil.copy("localwiki/freecad.qhc","../../Doc/freecad.qhc")
else:
print 'Files are in localwiki. Test with "assistant -collectionFile localwiki/freecad.qhc"'
return 0
def compile(qhpfile):
"compiles the whole html doc with qassistant"
qchfile = FOLDER + os.sep + "freecad.qch"
if not os.system(QHELPCOMPILER + ' '+qhpfile+' -o '+qchfile):
if VERBOSE: print "Successfully created",qchfile
return 0
def generate(qhcpfile):
"generates qassistant-specific settings like icon, title, ..."
txt="""
<center>FreeCAD """+RELEASE+""" help files<br/>
<a href="http://www.freecadweb.org">http://www.freecadweb.org</a></center>
"""
about=open(FOLDER + os.sep + "about.txt","w")
about.write(txt)
about.close()
qhcfile = FOLDER + os.sep + "freecad.qhc"
if not os.system(QCOLLECTIOMGENERATOR+' '+qhcpfile+' -o '+qhcfile):
if VERBOSE: print "Successfully created ",qhcfile
return 0
def createCollProjectFile():
qprojectfile = '''<?xml version="1.0" encoding="UTF-8"?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>FreeCAD User Manual</title>
<applicationIcon>64px-FreeCAD05.svg.png</applicationIcon>
<cacheDirectory>freecad/freecad</cacheDirectory>
<startPage>qthelp://org.freecad.usermanual/doc/Online_Help_Startpage.html</startPage>
<aboutMenuText>
<text>About FreeCAD</text>
</aboutMenuText>
<aboutDialog>
<file>about.txt</file>
<!--
<icon>images/icon.png</icon>
-->
<icon>64px-FreeCAD05.svg.png</icon>
</aboutDialog>
<enableDocumentationManager>true</enableDocumentationManager>
<enableAddressBar>true</enableAddressBar>
<enableFilterFunctionality>true</enableFilterFunctionality>
</assistant>
<docFiles>
<generate>
<file>
<input>freecad.qhp</input>
<output>freecad.qch</output>
</file>
</generate>
<register>
<file>freecad.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
if VERBOSE: print "Building project file..."
qfilename = FOLDER + os.sep + "freecad.qhcp"
f = open(qfilename,'w')
f.write(qprojectfile)
f.close()
if VERBOSE: print "Done writing qhcp file",qfilename
return qfilename
def buildtoc():
'''
gets the table of contents page and parses its
contents into a clean lists structure
'''
qhelpfile = '''<?xml version="1.0" encoding="UTF-8"?>
<QtHelpProject version="1.0">
<namespace>org.freecad.usermanual</namespace>
<virtualFolder>doc</virtualFolder>
<!--
<customFilter name="FreeCAD '''+RELEASE+'''">
<filterAttribute>FreeCAD</filterAttribute>
<filterAttribute>'''+RELEASE+'''</filterAttribute>
</customFilter>
-->
<filterSection>
<!--
<filterAttribute>FreeCAD</filterAttribute>
<filterAttribute>'''+RELEASE+'''</filterAttribute>
-->
<toc>
<inserttoc>
</toc>
<keywords>
<insertkeywords>
</keywords>
<insertfiles>
</filterSection>
</QtHelpProject>
'''
def getname(line):
line = re.compile('<li>').sub('',line)
line = re.compile('</li>').sub('',line)
title = line.strip()
link = ''
if "<a" in line:
title = re.findall('<a[^>]*>(.*?)</a>',line)[0].strip()
link = re.findall('href="(.*?)"',line)[0].strip()
if not link: link = 'default.html'
return title,link
if VERBOSE: print "Building table of contents..."
f = open(FOLDER+os.sep+INDEX+'.html')
html = ''
for line in f: html += line
f.close()
html = html.replace("\n"," ")
html = html.replace("> <","><")
html = re.findall("<ul.*/ul>",html)[0]
items = re.findall('<li[^>]*>.*?</li>|</ul></li>',html)
inserttoc = '<section title="FreeCAD Documentation" ref="Online_Help_Toc.html">\n'
insertkeywords = ''
for item in items:
if not ("<ul>" in item):
if ("</ul>" in item):
inserttoc += '</section>\n'
else:
link = ''
title,link=getname(item)
if link:
link='" ref="'+link
insertkeywords += ('<keyword name="'+title+link+'"/>\n')
inserttoc += ('<section title="'+title+link+'"></section>\n')
else:
subitems = item.split("<ul>")
for i in range(len(subitems)):
link = ''
title,link=getname(subitems[i])
if link:
link='" ref="'+link
insertkeywords += ('<keyword name="'+title+link+'"/>\n')
trail = ''
if i == len(subitems)-1: trail = '</section>'
inserttoc += ('<section title="'+title+link+'">'+trail+'\n')
inserttoc += '</section>\n'
insertfiles = "<files>\n"
for fil in os.listdir(FOLDER):
insertfiles += ("<file>"+fil+"</file>\n")
insertfiles += "</files>\n"
qhelpfile = re.compile('<insertkeywords>').sub(insertkeywords,qhelpfile)
qhelpfile = re.compile('<inserttoc>').sub(inserttoc,qhelpfile)
qhelpfile = re.compile('<insertfiles>').sub(insertfiles,qhelpfile)
qfilename = FOLDER + os.sep + "freecad.qhp"
f = open(qfilename,'wb')
f.write(qhelpfile)
f.close()
if VERBOSE: print "Done writing qhp file",qfilename
return qfilename
if __name__ == "__main__":
crawl()
| kkoksvik/FreeCAD | src/Tools/offlinedoc/buildqhelp.py | Python | lgpl-2.1 | 8,832 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import tempfile
try:
import cStringIO as io
BytesIO = io.StringIO
except ImportError:
import io
BytesIO = io.BytesIO
import fixtures
import testscenarios
from pbr import packaging
from pbr.tests import base
class SkipFileWrites(base.BaseTestCase):
scenarios = [
('changelog_option_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_option_false',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_env_true',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_both_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('authors_option_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_option_false',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_env_true',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_both_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=packaging.generate_authors, filename='AUTHORS')),
]
def setUp(self):
super(SkipFileWrites, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
if not os.path.exists(self.git_dir):
self.skipTest("%s is missing; skipping git-related checks"
% self.git_dir)
return
self.filename = os.path.join(self.temp_path, self.filename)
self.option_dict = dict()
if self.option_key is not None:
self.option_dict[self.option_key] = ('setup.cfg',
self.option_value)
self.useFixture(
fixtures.EnvironmentVariable(self.env_key, self.env_value))
def test_skip(self):
self.pkg_func(git_dir=self.git_dir,
dest_dir=self.temp_path,
option_dict=self.option_dict)
self.assertEqual(
not os.path.exists(self.filename),
(self.option_value.lower() in packaging.TRUE_VALUES
or self.env_value is not None))
_changelog_content = """04316fe (review/monty_taylor/27519) Make python
378261a Add an integration test script.
3c373ac (HEAD, tag: 2013.2.rc2, tag: 2013.2, milestone-proposed) Merge "Lib
182feb3 (tag: 0.5.17) Fix pip invocation for old versions of pip.
fa4f46e (tag: 0.5.16) Remove explicit depend on distribute.
d1c53dd Use pip instead of easy_install for installation.
a793ea1 Merge "Skip git-checkout related tests when .git is missing"
6c27ce7 Skip git-checkout related tests when .git is missing
04984a5 Refactor hooks file.
a65e8ee (tag: 0.5.14, tag: 0.5.13) Remove jinja pin.
"""
class GitLogsTest(base.BaseTestCase):
def setUp(self):
super(GitLogsTest, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
self.useFixture(
fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS'))
self.useFixture(
fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG'))
def test_write_git_changelog(self):
self.useFixture(fixtures.FakePopen(lambda _: {
"stdout": BytesIO(_changelog_content.encode('utf-8'))
}))
packaging.write_git_changelog(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh:
changelog_contents = ch_fh.read()
self.assertIn("2013.2", changelog_contents)
self.assertIn("0.5.17", changelog_contents)
self.assertIn("------", changelog_contents)
self.assertIn("Refactor hooks file", changelog_contents)
self.assertNotIn("Refactor hooks file.", changelog_contents)
self.assertNotIn("182feb3", changelog_contents)
self.assertNotIn("review/monty_taylor/27519", changelog_contents)
self.assertNotIn("0.5.13", changelog_contents)
self.assertNotIn('Merge "', changelog_contents)
def test_generate_authors(self):
author_old = u"Foo Foo <[email protected]>"
author_new = u"Bar Bar <[email protected]>"
co_author = u"Foo Bar <[email protected]>"
co_author_by = u"Co-authored-by: " + co_author
git_log_cmd = (
"git --git-dir=%s log --format=%%aN <%%aE>"
% self.git_dir)
git_co_log_cmd = ("git --git-dir=%s log" % self.git_dir)
git_top_level = "git rev-parse --show-toplevel"
cmd_map = {
git_log_cmd: author_new,
git_co_log_cmd: co_author_by,
git_top_level: self.root_dir,
}
exist_files = [self.git_dir,
os.path.join(self.temp_path, "AUTHORS.in")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
def _fake_run_shell_command(cmd, **kwargs):
return cmd_map[" ".join(cmd)]
self.useFixture(fixtures.MonkeyPatch(
"pbr.packaging._run_shell_command",
_fake_run_shell_command))
with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh:
auth_fh.write("%s\n" % author_old)
packaging.generate_authors(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh:
authors = auth_fh.read()
self.assertTrue(author_old in authors)
self.assertTrue(author_new in authors)
self.assertTrue(co_author in authors)
class BuildSphinxTest(base.BaseTestCase):
scenarios = [
('true_autodoc_caps',
dict(has_opt=True, autodoc='True', has_autodoc=True)),
('true_autodoc_lower',
dict(has_opt=True, autodoc='true', has_autodoc=True)),
('false_autodoc',
dict(has_opt=True, autodoc='False', has_autodoc=False)),
('no_autodoc',
dict(has_opt=False, autodoc='False', has_autodoc=False)),
]
def setUp(self):
super(BuildSphinxTest, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"sphinx.setup_command.BuildDoc.run", lambda self: None))
from distutils import dist
self.distr = dist.Distribution()
self.distr.packages = ("fake_package",)
self.distr.command_options["build_sphinx"] = {
"source_dir": ["a", "."]}
pkg_fixture = fixtures.PythonPackage(
"fake_package", [("fake_module.py", b"")])
self.useFixture(pkg_fixture)
self.useFixture(base.DiveDir(pkg_fixture.base))
def test_build_doc(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)}
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.run()
self.assertTrue(
os.path.exists("api/autoindex.rst") == self.has_autodoc)
self.assertTrue(
os.path.exists(
"api/fake_package.fake_module.rst") == self.has_autodoc)
def test_builders_config(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)}
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(2, len(build_doc.builders))
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = ''
build_doc.finalize_options()
self.assertEqual('', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'man'
build_doc.finalize_options()
self.assertEqual(1, len(build_doc.builders))
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'html,man,doctest'
build_doc.finalize_options()
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
self.assertIn('doctest', build_doc.builders)
class ParseRequirementsTest(base.BaseTestCase):
def setUp(self):
super(ParseRequirementsTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack',
suffix='.setup')
def test_parse_requirements_normal(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar-1.2.4")
self.assertEqual(['bar>=1.2.4'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar-4.2.1")
self.assertEqual(['bar>=4.2.1'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_removes_index_lines(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-f foobar")
self.assertEqual([], packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_override_with_env(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_parse_requirements_override_with_env_multiple_files(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
"no-such-file," + self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_get_requirement_from_file_empty(self):
actual = packaging.get_reqs_from_files([])
self.assertEqual([], actual)
def test_parse_requirements_with_comments(self):
with open(self.tmp_file, 'w') as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_python_version(self):
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
def test_parse_requirements_right_python_version(self):
with open("requirements-py1.txt", "w") as fh:
fh.write("thisisatrap")
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
class ParseDependencyLinksTest(base.BaseTestCase):
def setUp(self):
super(ParseDependencyLinksTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack",
suffix=".setup")
def test_parse_dependency_normal(self):
with open(self.tmp_file, "w") as fh:
fh.write("http://test.com\n")
self.assertEqual(
["http://test.com"],
packaging.parse_dependency_links([self.tmp_file]))
def test_parse_dependency_with_git_egg_url(self):
with open(self.tmp_file, "w") as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(
["git://foo.com/zipball#egg=bar"],
packaging.parse_dependency_links([self.tmp_file]))
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| muzixing/ryu | pbr-0.10.0-py2.7.egg/pbr/tests/test_setup.py | Python | apache-2.0 | 15,144 |
__version_info__ = (0, 6, 1)
__version__ = '.'.join(map(str, __version_info__))
| NeuPhysics/NumSolTUn | docs/_themes/alabaster/_version.py | Python | gpl-2.0 | 80 |
from lib.mmonit import MmonitBaseAction
class MmonitGetUptimeHost(MmonitBaseAction):
def run(self, host_id, uptime_range=0, datefrom=0, dateto=0):
self.login()
if datefrom != 0 and uptime_range != 12:
raise Exception("If datefrom is set, range should be 12")
data = {"id": host_id, "range": uptime_range, "datefrom": datefrom, "dateto": dateto}
req = self.session.post("{}/reports/uptime/get".format(self.url), data=data)
try:
return req.json()
except Exception:
raise
finally:
self.logout()
| meirwah/st2contrib | packs/mmonit/actions/get_uptime_host.py | Python | apache-2.0 | 604 |
#!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<[email protected]>
import os
import sys
import stat
import shutil
import urllib2
import subprocess
import time
import json
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
DEFAULT_CMD_TIMEOUT = 600
def setUp():
global device_x86, device_arm, crosswalkVersion, ARCH_ARM, ARCH_X86, PLATFORMS, HOST_PREFIX, SHELL_FLAG, MODE, ANDROID_MODE, BIT, TARGETS, apptools, apktype
ARCH_ARM = ""
ARCH_X86 = ""
BIT = "32"
device_x86 = ""
device_arm = ""
TARGETS = ""
host = open(ConstPath + "/platforms.txt", 'r')
PLATFORMS = host.read().strip("\n\t")
if PLATFORMS != "windows":
HOST_PREFIX = ""
SHELL_FLAG = "True"
else:
HOST_PREFIX = "node "
SHELL_FLAG = "False"
host.close()
if HOST_PREFIX != "":
apptools = "%crosswalk-pkg%"
else:
apptools = "crosswalk-pkg"
if os.system(HOST_PREFIX + apptools) != 0:
print "crosswalk-pkg is not work, Please set the env"
sys.exit(1)
if PLATFORMS == "android":
apktype = ".apk"
elif PLATFORMS == "ios":
apktype = ".ipa"
elif PLATFORMS == "deb":
apktype = ".deb"
else:
apktype = ".msi"
if PLATFORMS == "android":
fp = open(ConstPath + "/arch.txt", 'r')
fp_arch = fp.read().strip("\n\t")
if "x86" in fp_arch:
ARCH_X86 = "x86"
if "arm" in fp_arch:
ARCH_ARM = "arm"
if "64" in fp_arch:
BIT = "64"
fp.close()
if BIT == "32":
if ARCH_X86 == "x86" and ARCH_ARM == "":
TARGETS = "x86"
elif ARCH_ARM == "arm" and ARCH_X86 == "":
TARGETS = "armeabi-v7a"
elif ARCH_ARM == "arm" and ARCH_X86 == "x86":
TARGETS = "armeabi-v7a x86"
else:
if ARCH_X86 == "x86" and ARCH_ARM == "":
TARGETS = "x86_64"
elif ARCH_ARM == "arm" and ARCH_X86 == "":
TARGETS = "arm64-v8a"
elif ARCH_ARM == "arm" and ARCH_X86 == "x86":
TARGETS = "arm64-v8a x86_64"
mode = open(ConstPath + "/mode.txt", 'r')
mode_type = mode.read().strip("\n\t")
if mode_type == "embedded":
MODE = ""
ANDROID_MODE = "embedded"
elif mode_type == "shared":
MODE = " --android-shared"
ANDROID_MODE = "shared"
else:
MODE = " --android-lite"
ANDROID_MODE = "lite"
mode.close()
device = ""
if PLATFORMS == "android":
#device = "Medfield61809467,066e11baf0ecb889"
device = os.environ.get('DEVICE_ID')
if not device:
print ("Get DEVICE_ID env error\n")
sys.exit(1)
if device:
if ARCH_ARM != "" and ARCH_X86 != "":
if "," in device:
if getDeviceCpuAbi(device.split(',')[0]) == "x86":
device_x86 = device.split(',')[0]
else:
device_arm = device.split(',')[0]
if getDeviceCpuAbi(device.split(',')[1]) == "x86":
device_x86 = device.split(',')[1]
else:
device_arm = device.split(',')[1]
if not device_x86 or not device_arm:
print ("Need x86 and arm architecture devices id\n")
sys.exit(1)
else:
print ("Need x86 and arm architecture devices id\n")
sys.exit(1)
elif ARCH_ARM != "" and ARCH_X86 == "":
if getDeviceCpuAbi(device) == "arm":
device_arm = device
if not device_arm:
print ("Need arm architecture devices id\n")
sys.exit(1)
elif ARCH_ARM == "" and ARCH_X86 != "":
if getDeviceCpuAbi(device) == "x86":
device_x86 = device
if not device_x86:
print ("Need x86 architecture devices id\n")
sys.exit(1)
if PLATFORMS == "android" or PLATFORMS == "windows":
if not os.path.exists(ConstPath + "/VERSION"):
version_path = ConstPath + "/../../VERSION"
else:
version_path = ConstPath + "/VERSION"
with open(version_path) as json_file:
data = json.load(json_file)
crosswalkVersion = data['main-version'].strip(os.linesep)
def getstatusoutput(cmd, time_out=DEFAULT_CMD_TIMEOUT):
pre_time = time.time()
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=SHELL_FLAG)
while True:
output_line = cmd_proc.stdout.read()
cmd_return_code = cmd_proc.poll()
elapsed_time = time.time() - pre_time
if cmd_return_code is None:
if elapsed_time >= time_out:
killProcesses(ppid=cmd_proc.pid)
return False
elif output_line == '' and cmd_return_code is not None:
break
sys.stdout.write(output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def getDeviceCpuAbi(device):
cmd = 'adb -s ' + device + ' shell getprop'
(return_code, output) = getstatusoutput(cmd)
for line in output[0].split('/n'):
if "[ro.product.cpu.abi]" in line and "x86" in line:
return "x86"
else:
return "arm"
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
return False
return True
| ibelem/crosswalk-test-suite | apptools/apptools-manifest-tests/comm.py | Python | bsd-3-clause | 8,359 |
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class DataConvertOpTest( unittest.TestCase ) :
def testScaling( self ) :
o = IECore.DataConvertOp()(
data = IECore.FloatVectorData( [ 0, 0.5, 1 ] ),
targetType = IECore.UCharVectorData.staticTypeId()
)
self.assertEqual(
o,
IECore.UCharVectorData( [ 0, 128, 255 ] )
)
def testDimensionUpConversion( self ) :
o = IECore.DataConvertOp()(
data = IECore.FloatVectorData( [ 0, 0.5, 1, 0.1, 2, 10 ] ),
targetType = IECore.V3fVectorData.staticTypeId()
)
self.assertEqual(
o,
IECore.V3fVectorData( [ IECore.V3f( 0, 0.5, 1 ), IECore.V3f( 0.1, 2, 10 ) ] )
)
def testDimensionDownConversion( self ) :
o = IECore.DataConvertOp()(
data = IECore.V3iVectorData( [ IECore.V3i( 1, 2, 3 ), IECore.V3i( 4, 5, 6 ) ] ),
targetType = IECore.IntVectorData.staticTypeId()
)
self.assertEqual(
o,
IECore.IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
)
def testWrongSizeForDimensions( self ) :
self.assertRaises(
RuntimeError,
IECore.DataConvertOp(),
data = IECore.FloatVectorData( [ 1, 2 ] ),
targetType = IECore.V3fVectorData.staticTypeId()
)
if __name__ == "__main__":
unittest.main()
| lento/cortex | test/IECore/DataConvertOpTest.py | Python | bsd-3-clause | 3,033 |
#!/usr/bin/env python
"""Execute the tests for the razers3 program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
class RemovePairIdColumn(object):
"""Transformation to remove pair id column."""
def __init__(self, col_no=8, min_cols=8):
# The index of the column to remove.
self.col_no = col_no
# If there are less than min_col columns then we don't remove.
self.min_cols = min_cols
def apply(self, text, is_left):
lines = text.splitlines(True)
lines2 = []
for line in lines:
cols = line.split('\t')
if len(cols) > self.min_cols:
cols = cols[0:self.col_no] + cols[self.col_no + 1:]
lines2.append('\t'.join(cols))
return ''.join(lines2)
def main(source_base, binary_base, num_threads=1):
"""Main entry point of the script."""
print 'Executing test for razers3'
print '==========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/razers3/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'bin', 'razers3')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# We prepare a list of transforms to apply to the output files. This is
# used to strip the input/output paths from the programs' output to
# make it more canonical and host independent.
ph.outFile('-') # To ensure that the out path is set.
transforms = [
app_tests.ReplaceTransform(os.path.join(ph.source_base_path, 'apps/razers3/tests') + os.sep, '', right=True),
app_tests.ReplaceTransform(ph.temp_dir + os.sep, '', right=True),
]
# Transforms for SAM output format only. Make VN field of @PG header canonical.
sam_transforms = [app_tests.RegexpReplaceTransform(r'\tVN:[^\t]*', r'\tVN:VERSION', right=True, left=True)]
# Transforms for RazerS output format only. Remove pair id column.
razers_transforms = [RemovePairIdColumn()]
# ============================================================
# Run Adeno Single-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-tc%d.stdout' % (rl, num_threads)),
args=['-tc', str(num_threads),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-tc%d.razers' % (rl, num_threads))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-tc%d.razers' % (rl, num_threads)),
ph.outFile('se-adeno-reads%d_1-tc%d.razers' % (rl, num_threads))),
(ph.inFile('se-adeno-reads%d_1-tc%d.stdout' % (rl, num_threads)),
ph.outFile('se-adeno-reads%d_1-tc%d.stdout' % (rl, num_threads)))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-ng-tc%d.stdout' % (rl, num_threads)),
args=['-tc', str(num_threads),
'-ng',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-ng-tc%d.razers' % (rl, num_threads))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-ng-tc%d.razers' % (rl, num_threads)),
ph.outFile('se-adeno-reads%d_1-ng-tc%d.razers' % (rl, num_threads))),
(ph.inFile('se-adeno-reads%d_1-ng-tc%d.stdout' % (rl, num_threads)),
ph.outFile('se-adeno-reads%d_1-ng-tc%d.stdout' % (rl, num_threads)))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1%s-tc%d.stdout' % (rl, o, num_threads)),
args=['-tc', str(num_threads),
o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1%s-tc%d.razers' % (rl, o, num_threads))],
to_diff=[(ph.inFile('se-adeno-reads%d_1%s-tc%d.razers' % (rl, o, num_threads)),
ph.outFile('se-adeno-reads%d_1%s-tc%d.razers' % (rl, o, num_threads))),
(ph.inFile('se-adeno-reads%d_1%s-tc%d.stdout' % (rl, o, num_threads)),
ph.outFile('se-adeno-reads%d_1%s-tc%d.stdout' % (rl, o, num_threads)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-i%d-tc%d.stdout' % (rl, i, num_threads)),
args=['-tc', str(num_threads),
'-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-i%d-tc%d.razers' % (rl, i, num_threads))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-i%d-tc%d.razers' % (rl, i, num_threads)),
ph.outFile('se-adeno-reads%d_1-i%d-tc%d.razers' % (rl, i, num_threads))),
(ph.inFile('se-adeno-reads%d_1-i%d-tc%d.stdout' % (rl, i, num_threads)),
ph.outFile('se-adeno-reads%d_1-i%d-tc%d.stdout' % (rl, i, num_threads)))])
conf_list.append(conf)
# Compute with different output formats.
for of, suffix in enumerate(['razers', 'fa', 'eland', 'gff', 'sam', 'afg']):
this_transforms = list(transforms)
if suffix == 'razers':
this_transforms += razers_transforms
elif suffix == 'sam':
this_transforms += sam_transforms
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-of%d-tc%d.stdout' % (rl, of, num_threads)),
args=['-tc', str(num_threads),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-of%d-tc%d.%s' % (rl, of, num_threads, suffix))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-of%d-tc%d.%s' % (rl, of, num_threads, suffix)),
ph.outFile('se-adeno-reads%d_1-of%d-tc%d.%s' % (rl, of, num_threads, suffix)),
this_transforms),
(ph.inFile('se-adeno-reads%d_1-of%d-tc%d.stdout' % (rl, of, num_threads)),
ph.outFile('se-adeno-reads%d_1-of%d-tc%d.stdout' % (rl, of, num_threads)),
transforms)])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-so%d-tc%d.stdout' % (rl, so, num_threads)),
args=['-tc', str(num_threads),
'-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-so%d-tc%d.razers' % (rl, so, num_threads))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-so%d-tc%d.razers' % (rl, so, num_threads)),
ph.outFile('se-adeno-reads%d_1-so%d-tc%d.razers' % (rl, so, num_threads))),
(ph.inFile('se-adeno-reads%d_1-so%d-tc%d.stdout' % (rl, so, num_threads)),
ph.outFile('se-adeno-reads%d_1-so%d-tc%d.stdout' % (rl, so, num_threads)))])
conf_list.append(conf)
# ============================================================
# Run Adeno Paired-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)),
args=['-tc', str(num_threads),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads)),
ph.outFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads)),
razers_transforms),
(ph.inFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)),
ph.outFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)),
args=['-tc', str(num_threads),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads)),
ph.outFile('pe-adeno-reads%d_2-tc%d.razers' % (rl, num_threads)),
razers_transforms),
(ph.inFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)),
ph.outFile('pe-adeno-reads%d_2-tc%d.stdout' % (rl, num_threads)))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2%s-tc%d.stdout' % (rl, o, num_threads)),
args=['-tc', str(num_threads),
o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2%s-tc%d.razers' % (rl, o, num_threads))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2%s-tc%d.razers' % (rl, o, num_threads)),
ph.outFile('pe-adeno-reads%d_2%s-tc%d.razers' % (rl, o, num_threads)),
razers_transforms),
(ph.inFile('pe-adeno-reads%d_2%s-tc%d.stdout' % (rl, o, num_threads)),
ph.outFile('pe-adeno-reads%d_2%s-tc%d.stdout' % (rl, o, num_threads)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-i%d-tc%d.stdout' % (rl, i, num_threads)),
args=['-tc', str(num_threads),
'-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-i%d-tc%d.razers' % (rl, i, num_threads))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-i%d-tc%d.razers' % (rl, i, num_threads)),
ph.outFile('pe-adeno-reads%d_2-i%d-tc%d.razers' % (rl, i, num_threads)),
razers_transforms),
(ph.inFile('pe-adeno-reads%d_2-i%d-tc%d.stdout' % (rl, i, num_threads)),
ph.outFile('pe-adeno-reads%d_2-i%d-tc%d.stdout' % (rl, i, num_threads)))])
conf_list.append(conf)
# Compute with different output formats.
for of, suffix in enumerate(['razers', 'fa', 'eland', 'gff', 'sam', 'afg']):
this_transforms = list(transforms)
if suffix == 'razers':
this_transforms += razers_transforms
elif suffix == 'sam':
this_transforms += sam_transforms
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-of%d-tc%d.stdout' % (rl, of, num_threads)),
args=['-tc', str(num_threads),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-of%d-tc%d.%s' % (rl, of, num_threads, suffix))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-of%d-tc%d.%s' % (rl, of, num_threads, suffix)),
ph.outFile('pe-adeno-reads%d_2-of%d-tc%d.%s' % (rl, of, num_threads, suffix)),
this_transforms),
(ph.inFile('pe-adeno-reads%d_2-of%d-tc%d.stdout' % (rl, of, num_threads)),
ph.outFile('pe-adeno-reads%d_2-of%d-tc%d.stdout' % (rl, of, num_threads)),
this_transforms)])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-so%d-tc%d.stdout' % (rl, so, num_threads)),
args=['-tc', str(num_threads),
'-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-so%d-tc%d.razers' % (rl, so, num_threads))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-so%d-tc%d.razers' % (rl, so, num_threads)),
ph.outFile('pe-adeno-reads%d_2-so%d-tc%d.razers' % (rl, so, num_threads)),
razers_transforms),
(ph.inFile('pe-adeno-reads%d_2-so%d-tc%d.stdout' % (rl, so, num_threads)),
ph.outFile('pe-adeno-reads%d_2-so%d-tc%d.stdout' % (rl, so, num_threads)))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['razers3'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| rrahn/jst_bench | include/seqan/apps/razers3/tests/run_tests.py | Python | gpl-3.0 | 16,681 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'OrderAndItemCharges.code'
db.alter_column(u'shipping_orderanditemcharges', 'code', self.gf('oscar.models.fields.autoslugfield.AutoSlugField')(allow_duplicates=False, max_length=128, separator=u'-', unique=True, populate_from='name', overwrite=False))
# Changing field 'WeightBased.code'
db.alter_column(u'shipping_weightbased', 'code', self.gf('oscar.models.fields.autoslugfield.AutoSlugField')(allow_duplicates=False, max_length=128, separator=u'-', unique=True, populate_from='name', overwrite=False))
def backwards(self, orm):
# Changing field 'OrderAndItemCharges.code'
db.alter_column(u'shipping_orderanditemcharges', 'code', self.gf('django.db.models.fields.SlugField')(max_length=128, unique=True))
# Changing field 'WeightBased.code'
db.alter_column(u'shipping_weightbased', 'code', self.gf('django.db.models.fields.SlugField')(max_length=128, unique=True))
models = {
u'address.country': {
'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'shipping.orderanditemcharges': {
'Meta': {'object_name': 'OrderAndItemCharges'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['address.Country']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'free_shipping_threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'price_per_item': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'price_per_order': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
u'shipping.weightband': {
'Meta': {'ordering': "['upper_limit']", 'object_name': 'WeightBand'},
'charge': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bands'", 'to': u"orm['shipping.WeightBased']"}),
'upper_limit': ('django.db.models.fields.FloatField', [], {})
},
u'shipping.weightbased': {
'Meta': {'object_name': 'WeightBased'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['address.Country']", 'null': 'True', 'blank': 'True'}),
'default_weight': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'upper_charge': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'})
}
}
complete_apps = ['shipping'] | jinnykoo/christmas | src/oscar/apps/shipping/south_migrations/0006_auto__chg_field_orderanditemcharges_code__chg_field_weightbased_code.py | Python | bsd-3-clause | 5,032 |
import imghdr
import json
import logging
from django.conf import settings
from django.db.models import Q
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponseBadRequest, Http404)
from django.shortcuts import get_object_or_404, render
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from tower import ugettext as _
from kitsune.access.decorators import login_required
from kitsune.gallery import ITEMS_PER_PAGE
from kitsune.gallery.forms import ImageForm
from kitsune.gallery.models import Image, Video
from kitsune.gallery.utils import upload_image, check_media_permissions
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import paginate
from kitsune.upload.tasks import compress_image, generate_thumbnail
from kitsune.upload.utils import FileTooLargeError
from kitsune.wiki.tasks import schedule_rebuild_kb
log = logging.getLogger('k.gallery')
def gallery(request, media_type='image'):
"""The media gallery.
Filter can be set to 'images' or 'videos'.
"""
if media_type == 'image':
media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
elif media_type == 'video':
media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
else:
raise Http404
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
drafts = _get_drafts(request.user)
image = drafts['image'][0] if drafts['image'] else None
image_form = _init_media_form(ImageForm, request, image)
if request.method == 'POST':
image_form.is_valid()
return render(request, 'gallery/gallery.html', {
'media': media,
'media_type': media_type,
'image_form': image_form,
'submitted': request.method == 'POST'})
@login_required
@require_POST
def upload(request, media_type='image'):
"""Finalizes an uploaded draft."""
drafts = _get_drafts(request.user)
if media_type == 'image' and drafts['image']:
# We're publishing an image draft!
image_form = _init_media_form(ImageForm, request, drafts['image'][0])
if image_form.is_valid():
img = image_form.save(is_draft=None)
generate_thumbnail.delay(img, 'file', 'thumbnail')
compress_image.delay(img, 'file')
# Rebuild KB
schedule_rebuild_kb()
return HttpResponseRedirect(img.get_absolute_url())
else:
return gallery(request, media_type='image')
return HttpResponseBadRequest(u'Unrecognized POST request.')
@login_required
@require_POST
def cancel_draft(request, media_type='image'):
"""Delete an existing draft for the user."""
drafts = _get_drafts(request.user)
if media_type == 'image' and drafts['image']:
drafts['image'].delete()
drafts['image'] = None
else:
msg = _(u'Unrecognized request or nothing to cancel.')
content_type = None
if request.is_ajax():
msg = json.dumps({'status': 'error', 'message': msg})
content_type = 'application/json'
return HttpResponseBadRequest(msg, content_type=content_type)
if request.is_ajax():
return HttpResponse(json.dumps({'status': 'success'}),
content_type='application/json')
return HttpResponseRedirect(reverse('gallery.gallery', args=[media_type]))
def gallery_async(request):
"""AJAX endpoint to media gallery.
Returns an HTML list representation of the media.
"""
# Maybe refactor this into existing views and check request.is_ajax?
media_type = request.GET.get('type', 'image')
term = request.GET.get('q')
media_locale = request.GET.get('locale', settings.WIKI_DEFAULT_LANGUAGE)
if media_type == 'image':
media_qs = Image.objects
elif media_type == 'video':
media_qs = Video.objects
else:
raise Http404
media_qs = media_qs.filter(locale=media_locale)
if term:
media_qs = media_qs.filter(Q(title__icontains=term) |
Q(description__icontains=term))
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
return render(request, 'gallery/includes/media_list.html', {
'media_list': media})
def search(request, media_type):
"""Search the media gallery."""
term = request.GET.get('q')
if not term:
url = reverse('gallery.gallery', args=[media_type])
return HttpResponseRedirect(url)
filter = Q(title__icontains=term) | Q(description__icontains=term)
if media_type == 'image':
media_qs = Image.objects.filter(filter, locale=request.LANGUAGE_CODE)
elif media_type == 'video':
media_qs = Video.objects.filter(filter, locale=request.LANGUAGE_CODE)
else:
raise Http404
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
return render(request, 'gallery/search.html', {
'media': media,
'media_type': media_type,
'q': term})
@login_required
def delete_media(request, media_id, media_type='image'):
"""Delete media and redirect to gallery view."""
media, media_format = _get_media_info(media_id, media_type)
check_media_permissions(media, request.user, 'delete')
if request.method == 'GET':
# Render the confirmation page
return render(request, 'gallery/confirm_media_delete.html', {
'media': media,
'media_type': media_type,
'media_format': media_format})
# Handle confirm delete form POST
log.warning('User %s is deleting %s with id=%s' %
(request.user, media_type, media.id))
media.delete()
# Rebuild KB
schedule_rebuild_kb()
return HttpResponseRedirect(reverse('gallery.gallery', args=[media_type]))
@login_required
def edit_media(request, media_id, media_type='image'):
"""Edit media means only changing the description, for now."""
media, media_format = _get_media_info(media_id, media_type)
check_media_permissions(media, request.user, 'change')
if media_type == 'image':
media_form = _init_media_form(ImageForm, request, media,
('locale', 'title'))
else:
raise Http404
if request.method == 'POST' and media_form.is_valid():
media = media_form.save(update_user=request.user, is_draft=False)
return HttpResponseRedirect(
reverse('gallery.media', args=[media_type, media_id]))
return render(request, 'gallery/edit_media.html', {
'media': media,
'media_format': media_format,
'form': media_form,
'media_type': media_type})
def media(request, media_id, media_type='image'):
"""The media page."""
media, media_format = _get_media_info(media_id, media_type)
return render(request, 'gallery/media.html', {
'media': media,
'media_format': media_format,
'media_type': media_type})
@login_required
@require_POST
@xframe_options_sameorigin
def upload_async(request, media_type='image'):
"""Upload images or videos from request.FILES."""
# TODO(paul): validate the Submit File on upload modal async
# even better, use JS validation for title length.
try:
if media_type == 'image':
file_info = upload_image(request)
else:
msg = _(u'Unrecognized media type.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': msg}))
except FileTooLargeError as e:
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': e.args[0]}))
if isinstance(file_info, dict) and 'thumbnail_url' in file_info:
schedule_rebuild_kb()
return HttpResponse(
json.dumps({'status': 'success', 'file': file_info}))
message = _(u'Could not upload your image.')
return HttpResponseBadRequest(
json.dumps({'status': 'error',
'message': unicode(message),
'errors': file_info}))
def _get_media_info(media_id, media_type):
"""Returns an image or video along with media format for the image."""
media_format = None
if media_type == 'image':
media = get_object_or_404(Image, pk=media_id)
try:
media_format = imghdr.what(media.file.path)
except UnicodeEncodeError:
pass
elif media_type == 'video':
media = get_object_or_404(Video, pk=media_id)
else:
raise Http404
return (media, media_format)
def _get_drafts(user):
"""Get video and image drafts for a given user."""
drafts = {'image': None, 'video': None}
if user.is_authenticated():
drafts['image'] = Image.objects.filter(creator=user, is_draft=True)
drafts['video'] = Video.objects.filter(creator=user, is_draft=True)
return drafts
def _init_media_form(form_cls, request=None, obj=None,
ignore_fields=()):
"""Initializes the media form with an Image/Video instance and POSTed data.
form_cls is a django ModelForm
Request method must be POST for POST data to be bound.
exclude_fields contains the list of fields to default to their current
value from the Image/Video object.
"""
post_data = None
initial = None
if request:
initial = {'locale': request.LANGUAGE_CODE}
file_data = None
if request.method == 'POST':
file_data = request.FILES
post_data = request.POST.copy()
if obj and ignore_fields:
for f in ignore_fields:
post_data[f] = getattr(obj, f)
return form_cls(post_data, file_data, instance=obj, initial=initial,
is_ajax=False)
| orvi2014/kitsune | kitsune/gallery/views.py | Python | bsd-3-clause | 9,805 |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = '[email protected] (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| paoloach/zdomus | zigbee_lib/googletest/googletest/test/gtest_xml_test_utils.py | Python | gpl-2.0 | 8,876 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ExportStrategy class that provides strategies to export model so later it
can be used for TensorFlow serving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
__all__ = ['ExportStrategy']
class ExportStrategy(collections.namedtuple('ExportStrategy',
['name', 'export_fn'])):
def export(self, estimator, export_path):
return self.export_fn(estimator, export_path)
| jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/export_strategy.py | Python | mit | 1,195 |
#!/usr/bin/env python
import sys
import re
from helpers import *
PROGRAM_USAGE = """
SeqAn script to replace invalid identifiers (previously collected) in the SeqAn
codebase.
USAGE: replace_identifiers.py BASE_PATH [REPLACEMENTS]
BASE_PATH is the root path of all the folders to be searched.
REPLACEMENTS is a file of ``"key:value"`` pairs which contain the invalid
identifier and the replacement string.
If this file is not given, it is attemped to read the replacements from the
standard input stream.
""".strip()
def replace_all(text, subst):
"""
Perform the substitutions given by the dictionary ``subst`` on ``text``.
"""
for old in subst.keys():
text = old.sub(subst[old], text)
return text
def validate_file(file, subst):
"""
Perform the substitutions given by the dictionary ``subst`` on ``file``.
"""
#print file
code = ''
try:
f = open(file, 'r')
finally:
code = f.read()
old_len = len(code)
replaced = replace_all(code, subst)
#assert old_len == len(replaced)
open(file, 'w').write(replaced)
def build_subst_table(file):
"""
Read the substitutions defined in ``file`` and build a substitution table.
"""
table = {}
for line in file:
old, new = line.rstrip('\r\n').split(':')
table[re.compile(r'\b%s\b' % old.strip())] = new.strip()
return table
def main():
# Either read from stdin or expect a file path in the second argument.
# Since there is no reliable way of checking for an attached stdin on
# Windows, just assume good faith if the file name isn't given.
use_stdin = len(sys.argv) == 2
if not (len(sys.argv) == 3 or use_stdin):
print >>sys.stderr, 'ERROR: Invalid number of arguments.'
print >>sys.stderr, PROGRAM_USAGE
return 1
if use_stdin:
print >>sys.stderr, "Attempting to read from stdin ..."
project_path = sys.argv[1]
replacements_file = sys.stdin if use_stdin else open(sys.argv[2], 'r')
substitutions = build_subst_table(replacements_file)
for file in all_files(project_path):
validate_file(file, substitutions)
return 0
if __name__ == '__main__':
sys.exit(main())
| bkahlert/seqan-research | raw/workshop13/workshop2013-data-20130926/trunk/misc/renaming/replace_identifiers.py | Python | mit | 2,256 |
from PyQt5.uic import properties
| drnextgis/QGIS | python/PyQt/PyQt5/uic/properties.py | Python | gpl-2.0 | 33 |
'''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
list, set, and tuple.
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
'''
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
import _collections_abc
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
try:
from _collections import deque
except ImportError:
pass
else:
_collections_abc.MutableSequence.register(deque)
try:
from _collections import defaultdict
except ImportError:
pass
def __getattr__(name):
# For backwards compatibility, continue to make the collections ABCs
# through Python 3.6 available through the collections module.
# Note, no new collections ABCs were added in Python 3.7
if name in _collections_abc.__all__:
obj = getattr(_collections_abc, name)
import warnings
warnings.warn("Using or importing the ABCs from 'collections' instead "
"of from 'collections.abc' is deprecated, "
"and in 3.8 it will stop working",
DeprecationWarning, stacklevel=2)
globals()[name] = obj
return obj
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
################################################################################
### OrderedDict
################################################################################
class _OrderedDictKeysView(_collections_abc.KeysView):
def __reversed__(self):
yield from reversed(self._mapping)
class _OrderedDictItemsView(_collections_abc.ItemsView):
def __reversed__(self):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(_collections_abc.ValuesView):
def __reversed__(self):
for key in reversed(self._mapping):
yield self._mapping[key]
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries. Keyword argument order is preserved.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last is false).
Raise KeyError if the element does not exist.
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
root.prev = soft_link
last.next = link
else:
first = root.next
link.prev = root
link.next = first
first.prev = soft_link
root.next = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = _collections_abc.MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = _collections_abc.MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'''Insert key with a value of default if key is not in the dictionary.
Return the value for key if key is in the dictionary, else default.
'''
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''Create a new ordered dictionary with keys from iterable and values set to value.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
try:
from _collections import OrderedDict
except ImportError:
# Leave the pure Python version in place.
pass
################################################################################
### namedtuple
################################################################################
_nt_itemgetters = {}
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = repr(field_names).replace("'", "")[1:-1]
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_len = len
# Create all the named tuple methods to be added to the class namespace
s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))'
namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'}
# Note: exec() has the side-effect of interning the field names
exec(s, namespace)
__new__ = namespace['__new__']
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(_self, **kwds):
result = _self._make(map(kwds.pop, field_names, _self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (__new__, _make.__func__, _replace,
__repr__, _asdict, __getnewargs__):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_fields_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
}
cache = _nt_itemgetters
for index, name in enumerate(field_names):
try:
itemgetter_object, doc = cache[index]
except KeyError:
itemgetter_object = _itemgetter(index)
doc = f'Alias for field number {index}'
cache[index] = itemgetter_object, doc
class_namespace[name] = property(itemgetter_object, doc=doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, _collections_abc.Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, _collections_abc.Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
result = Counter()
for elem, count in self.items():
if count > 0:
result[elem] = count
return result
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
result = Counter()
for elem, count in self.items():
if count < 0:
result[elem] = 0 - count
return result
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap
########################################################################
class ChainMap(_collections_abc.MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other
state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
d = {}
for mapping in reversed(self.maps):
d.update(mapping) # reuses stored hash values if possible
return iter(d)
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''New ChainMap with a new map followed by all previous maps.
If no map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(_collections_abc.MutableMapping):
# Start by filling-out the abstract methods
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(_collections_abc.MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(_collections_abc.Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __getnewargs__(self):
return (self.data[:],)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
def __rmod__(self, format):
return self.__class__(format % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def casefold(self):
return self.__class__(self.data.casefold())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isascii(self): return self.data.isascii()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isprintable(self): return self.data.isprintable()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
| mdanielwork/intellij-community | python/testData/MockSdk3.7/Lib/collections/__init__.py | Python | apache-2.0 | 47,640 |
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
from contextlib import contextmanager
import warnings
try:
import cPickle as pickle
except ImportError:
import pickle
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend)
from ._compat import _basestring
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'multiprocessing'
DEFAULT_N_JOBS = 1
# Thread local value that can be overridden by the ``parallel_backend`` context
# manager
_backend = threading.local()
def get_active_backend():
"""Return the active default backend"""
active_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if active_backend_and_jobs is not None:
return active_backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now
active_backend = BACKENDS[DEFAULT_BACKEND]()
return active_backend, DEFAULT_N_JOBS
@contextmanager
def parallel_backend(backend, n_jobs=-1, **backend_params):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
Alternatively backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if isinstance(backend, _basestring):
backend = BACKENDS[backend](**backend_params)
old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
try:
_backend.backend_and_jobs = (backend, n_jobs)
# return the backend instance to make it easier to write tests
yield backend, n_jobs
finally:
if old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers.
Passing n_jobs=-1 means requesting all available workers for instance
matching the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, _ = get_active_backend()
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str, ParallelBackendBase instance or None, \
default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
active_backend, default_n_jobs = get_active_backend()
if backend is None and n_jobs == 1:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = default_n_jobs
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
if backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is
pass
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend()
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory()
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
if getattr(self._backend, 'supports_timeout', False):
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
if not isinstance(exception, TransportableException):
raise
else:
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| herilalaina/scikit-learn | sklearn/externals/joblib/parallel.py | Python | bsd-3-clause | 33,164 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_job
short_description: Module to manage jobs in oVirt/RHV
version_added: "2.9"
author: "Martin Necas (@mnecas)"
description:
- "This module manage jobs in oVirt/RHV. It can also manage steps of the job."
options:
description:
description:
- "Description of the job."
required: true
state:
description:
- "Should the job be C(present)/C(absent)/C(failed)."
- "C(started) is alias for C(present). C(finished) is alias for C(absent). Same in the steps."
- "Note when C(finished)/C(failed) it will finish/fail all steps."
choices: ['present', 'absent', 'started', 'finished', 'failed']
default: present
steps:
description:
- "The steps of the job."
suboptions:
description:
description:
- "Description of the step."
required: true
state:
description:
- "Should the step be present/absent/failed."
- "Note when one step fail whole job will fail"
- "Note when all steps are finished it will finish job."
choices: ['present', 'absent', 'started', 'finished', 'failed']
default: present
type: list
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Create job with two steps
ovirt_job:
description: job_name
steps:
- description: step_name_A
- description: step_name_B
- name: Finish one step
ovirt_job:
description: job_name
steps:
- description: step_name_A
state: finished
- name: When you fail one step whole job will stop
ovirt_job:
description: job_name
steps:
- description: step_name_B
state: failed
- name: Finish all steps
ovirt_job:
description: job_name
state: finished
'''
RETURN = '''
id:
description: ID of the job which is managed
returned: On success if job is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
job:
description: "Dictionary of all the job attributes. Job attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/job."
returned: On success if job is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
equal,
get_id_by_name,
ovirt_full_argument_spec,
get_dict_of_struct,
)
def build_job(description):
return otypes.Job(
description=description,
status=otypes.JobStatus.STARTED,
external=True,
auto_cleared=True
)
def build_step(description, job_id):
return otypes.Step(
description=description,
type=otypes.StepEnum.UNKNOWN,
job=otypes.Job(
id=job_id
),
status=otypes.StepStatus.STARTED,
external=True,
)
def attach_steps(module, job_id, jobs_service):
changed = False
steps_service = jobs_service.job_service(job_id).steps_service()
if module.params.get('steps'):
for step in module.params.get('steps'):
step_entity = get_entity(steps_service, step.get('description'))
step_state = step.get('state', 'present')
if step_state in ['present', 'started']:
if step_entity is None:
steps_service.add(build_step(step.get('description'), job_id))
changed = True
if step_entity is not None and step_entity.status not in [otypes.StepStatus.FINISHED, otypes.StepStatus.FAILED]:
if step_state in ['absent', 'finished']:
steps_service.step_service(step_entity.id).end(succeeded=True)
changed = True
elif step_state == 'failed':
steps_service.step_service(step_entity.id).end(succeeded=False)
changed = True
return changed
def get_entity(service, description):
all_entities = service.list()
for entity in all_entities:
if entity.description == description and entity.status not in [otypes.StepStatus.FINISHED, otypes.JobStatus.FINISHED]:
return entity
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'started', 'finished', 'failed'],
default='present',
),
description=dict(default=None),
steps=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
jobs_service = connection.system_service().jobs_service()
state = module.params['state']
job = get_entity(jobs_service, module.params['description'])
changed = False
if state in ['present', 'started']:
if job is None:
job = jobs_service.add(build_job(module.params['description']))
changed = True
changed = attach_steps(module, job.id, jobs_service) or changed
if job is not None and job.status not in [otypes.JobStatus.FINISHED, otypes.JobStatus.FAILED]:
if state in ['absent', 'finished']:
jobs_service.job_service(job.id).end(succeeded=True)
changed = True
elif state == 'failed':
jobs_service.job_service(job.id).end(succeeded=False)
changed = True
ret = {
'changed': changed,
'id': getattr(job, 'id', None),
'job': get_dict_of_struct(
struct=job,
connection=connection,
fetch_nested=True,
attributes=module.params.get('nested_attributes'),
),
}
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| thaim/ansible | lib/ansible/modules/cloud/ovirt/ovirt_job.py | Python | mit | 7,379 |
# flake8: NOQA
# "flake8: NOQA" to suppress warning "H104 File contains nothing but comments"
# TODO(okuta): Implement packbits
# TODO(okuta): Implement unpackbits
| tigerneil/chainer | cupy/binary/packing.py | Python | mit | 168 |
# -*- coding: utf-8 -*-
"""
Tests of extended hints
"""
import unittest
from ddt import ddt, data, unpack
# With the use of ddt, some of the data expected_string cases below are naturally long stretches
# of text text without whitespace. I think it's best to leave such lines intact
# in the test code. Therefore:
# pylint: disable=line-too-long
# For out many ddt data cases, prefer a compact form of { .. }
from capa.tests.helpers import new_loncapa_problem, load_fixture
class HintTest(unittest.TestCase):
"""Base class for tests of extended hinting functionality."""
def correctness(self, problem_id, choice):
"""Grades the problem and returns the 'correctness' string from cmap."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
return cmap[problem_id]['correctness']
def get_hint(self, problem_id, choice):
"""Grades the problem and returns its hint from cmap or the empty string."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
adict = cmap.cmap.get(problem_id)
if adict:
return adict['msg']
else:
return ''
# It is a little surprising how much more complicated TextInput is than all the other cases.
@ddt
class TextInputHintsTest(HintTest):
"""
Test Text Input Hints Test
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'Blue')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_2',
'trigger_type': 'single',
'hint_label': u'Correct:',
'correctness': True,
'student_answer': [u'Blue'],
'question_type': 'stringresponse',
'hints': [{'text': 'The red light is scattered by water molecules leaving only blue light.'}]}
)
@data(
{'problem_id': u'1_2_1', u'choice': u'GermanyΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">I do not think so.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'franceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'FranceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'Mexico',
'expected_string': ''},
{'problem_id': u'1_2_1', u'choice': u'USAΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'usaΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'uSAxΩ',
'expected_string': u''},
{'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">The country name does not end in LANDΩ</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'Blue',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">The red light is scattered by water molecules leaving only blue light.</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'blue',
'expected_string': u''},
{'problem_id': u'1_3_1', u'choice': u'b',
'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseInsensitive(HintTest):
"""Test Text Input Extended hints Case Insensitive"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint
{'problem_id': u'1_5_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Woo Hoo </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'a', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Woo Hoo </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'b', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'c', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="hint-text">hint4</div></div>'},
# regexp cases
{'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseSensitive(HintTest):
"""Sometimes the semantics can be encoded in the class name."""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''},
{'problem_id': u'1_6_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''},
# regexp cases
{'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
@ddt
class TextInputExtendedHintsCompatible(HintTest):
"""
Compatibility test with mixed old and new style additional_answer tags.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''},
{'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''},
# check going through conversion with difficult chars
{'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class TextInputExtendedHintsRegex(HintTest):
"""
Extended hints where the answer is regex mode.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''},
{'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class NumericInputHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the numeric input problem represented by the XML below.
"""
xml = load_fixture('extended_hints_numeric_input.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
def test_tracking_log(self):
self.get_hint(u'1_2_1', u'1.141')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'hint_label': u'Nice',
'correctness': True,
'student_answer': [u'1.141'],
'question_type': 'numericalresponse',
'hints': [{'text': 'The square root of two turns up in the strangest places.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': '1.141',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Nice </span><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# additional answer
{'problem_id': u'1_2_1', 'choice': '10',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">This is an additional hint.</div></div>'},
{'problem_id': u'1_3_1', 'choice': '4',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Pretty easy, uh?.</div></div>'},
# should get hint, when correct via numeric-tolerance
{'problem_id': u'1_2_1', 'choice': '1.15',
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Nice </span><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# when they answer wrong, nothing
{'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''},
)
@unpack
def test_numeric_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class CheckboxHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the checkbox problem represented by the XML below.
"""
xml = load_fixture('extended_hints_checkbox.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
@data(
{'problem_id': u'1_2_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_4'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">I do not know what a Camero is but it is not a fruit.</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Almost right </span><div class="hint-text">You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprouts are vegetables.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Very funny </span><div class="hint-text">Making a banana split?</div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
# check for interaction between compoundhint and correct/incorrect
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">AB</div></div>'},
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">AC</div></div>'},
# check for labeling where multiple child hints have labels
# These are some tricky cases
{'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">AA </span><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_1'],
'expected_string': ''},
{'problem_id': '1_5_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">BB </span><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><div class="hint-text">compoundo</div></div>'},
# The user selects *nothing*, but can still get "unselected" feedback
{'problem_id': '1_7_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
# 100% not match of sel/unsel feedback
{'problem_id': '1_7_1', 'choice': ['choice_1'],
'expected_string': ''},
# Here we have the correct combination, and that makes feedback too
{'problem_id': '1_7_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
)
@unpack
def test_checkbox_hints(self, problem_id, choice, expected_string):
self.maxDiff = None # pylint: disable=invalid-name
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class CheckboxHintsTestTracking(HintTest):
"""
Test the rather complicated tracking log output for checkbox cases.
"""
xml = """
<problem>
<p>question</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">A true</choicehint>
<choicehint selected="false">A false</choicehint>
</choice>
<choice correct="false">Banana
</choice>
<choice correct="true">Cronut
<choicehint selected="true">C true</choicehint>
</choice>
<compoundhint value="A C">A C Compound</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test checkbox tracking log - by far the most complicated case"""
# A -> 1 hint
self.get_hint(u'1_2_1', [u'choice_0'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect:',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_0'],
'hints': [{'text': 'A true', 'trigger': [{'choice': 'choice_0', 'selected': True}]}],
'question_type': 'choiceresponse'}
)
# B C -> 2 hints
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_1', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect:',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_1', u'choice_2'],
'hints': [
{'text': 'A false', 'trigger': [{'choice': 'choice_0', 'selected': False}]},
{'text': 'C true', 'trigger': [{'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
# A C -> 1 Compound hint
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_0', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Correct:',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': True,
'trigger_type': 'compound',
'student_answer': [u'choice_0', u'choice_2'],
'hints': [
{'text': 'A C Compound',
'trigger': [{'choice': 'choice_0', 'selected': True}, {'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
@ddt
class MultpleChoiceHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'choice_2')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'choice_2'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'OOPS', 'hints': [{'text': 'Apple is a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': ''},
{'problem_id': u'1_3_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">Potato is a root vegetable.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">OUTSTANDING </span><div class="hint-text">Apple is indeed a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">OOPS </span><div class="hint-text">Apple is a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_9',
'expected_string': ''},
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class MultpleChoiceHintsWithHtmlTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice_with_html.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_2_1', u'choice_0')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'Incorrect:', 'hints': [{'text': 'Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">Potato is <img src="#" ale="#"/> not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text"><a href="#">Apple</a> is a fruit.</div></div>'}
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class DropdownHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the drop down problem represented by the XML below.
"""
xml = load_fixture('extended_hints_dropdown.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'FACES')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse',
'hint_label': 'Correct:', 'hints': [{'text': 'With lots of makeup, doncha know?'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': 'Multiple Choice',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Good Job </span><div class="hint-text">Yes, multiple choice is the right answer.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Text Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">No, text input problems do not present options.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Numerical Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">No, numerical input problems do not present options.</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'FACES',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">With lots of makeup, doncha know?</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'dogs',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">NOPE </span><div class="hint-text">Not dogs, not cats, not toads</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'wrongo',
'expected_string': ''},
# Regression case where feedback includes answer substring
{'problem_id': u'1_4_1', 'choice': 'AAA',
'expected_string': '<div class="feedback-hint-incorrect"><div class="explanation-title">Answer</div><span class="hint-label">Incorrect: </span><div class="hint-text">AAABBB1</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'BBB',
'expected_string': '<div class="feedback-hint-correct"><div class="explanation-title">Answer</div><span class="hint-label">Correct: </span><div class="hint-text">AAABBB2</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'not going to match',
'expected_string': ''},
)
@unpack
def test_dropdown_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class ErrorConditionsTest(HintTest):
"""
Erroneous xml should raise exception.
"""
def test_error_conditions_illegal_element(self):
xml_with_errors = load_fixture('extended_hints_with_errors.xml')
with self.assertRaises(Exception):
new_loncapa_problem(xml_with_errors) # this problem is improperly constructed
| synergeticsedx/deployment-wipro | common/lib/capa/capa/tests/test_hint_functionality.py | Python | agpl-3.0 | 37,584 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| mfjb/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 44,015 |
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_time_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'time'
api_call_object_plural_version = 'times'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointTimeFacts(object):
module = cp_mgmt_time_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| thaim/ansible | test/units/modules/network/check_point/test_cp_mgmt_time_facts.py | Python | mit | 2,820 |
"""SCons.Scanner.IDL
This module implements the depenency scanner for IDL (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/IDL.py 4043 2009/02/23 09:06:45 scons"
import SCons.Node.FS
import SCons.Scanner
def IDLScan():
"""Return a prototype Scanner instance for scanning IDL source files"""
cs = SCons.Scanner.ClassicCPP("IDLScan",
"$IDLSUFFIXES",
"CPPPATH",
'^[ \t]*(?:#[ \t]*include|[ \t]*import)[ \t]+(<|")([^>"]+)(>|")')
return cs
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mastbaum/rat-pac | python/SCons/Scanner/IDL.py | Python | bsd-3-clause | 1,852 |
r"""
==============================================================
Compressed Sparse Graph Routines (:mod:`scipy.sparse.csgraph`)
==============================================================
.. currentmodule:: scipy.sparse.csgraph
Fast graph algorithms based on sparse matrix representations.
Contents
========
.. autosummary::
:toctree: generated/
connected_components -- determine connected components of a graph
laplacian -- compute the laplacian of a graph
shortest_path -- compute the shortest path between points on a positive graph
dijkstra -- use Dijkstra's algorithm for shortest path
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
bellman_ford -- use the Bellman-Ford algorithm for shortest path
johnson -- use Johnson's algorithm for shortest path
breadth_first_order -- compute a breadth-first order of nodes
depth_first_order -- compute a depth-first order of nodes
breadth_first_tree -- construct the breadth-first tree from a given node
depth_first_tree -- construct a depth-first tree from a given node
minimum_spanning_tree -- construct the minimum spanning tree of a graph
reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
maximum_bipartite_matching -- compute permutation to make diagonal zero free
Graph Representations
=====================
This module uses graphs which are stored in a matrix format. A
graph with N nodes can be represented by an (N x N) adjacency matrix G.
If there is a connection from node i to node j, then G[i, j] = w, where
w is the weight of the connection. For nodes i and j which are
not connected, the value depends on the representation:
- for dense array representations, non-edges are represented by
G[i, j] = 0, infinity, or NaN.
- for dense masked representations (of type np.ma.MaskedArray), non-edges
are represented by masked values. This can be useful when graphs with
zero-weight edges are desired.
- for sparse array representations, non-edges are represented by
non-entries in the matrix. This sort of sparse representation also
allows for edges with zero weights.
As a concrete example, imagine that you would like to represent the following
undirected graph::
G
(0)
/ \
1 2
/ \
(2) (1)
This graph has three nodes, where node 0 and 1 are connected by an edge of
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
We can construct the dense, masked, and sparse representations as follows,
keeping in mind that an undirected graph is represented by a symmetric matrix::
>>> G_dense = np.array([[0, 2, 1],
... [2, 0, 0],
... [1, 0, 0]])
>>> G_masked = np.ma.masked_values(G_dense, 0)
>>> from scipy.sparse import csr_matrix
>>> G_sparse = csr_matrix(G_dense)
This becomes more difficult when zero edges are significant. For example,
consider the situation when we slightly modify the above graph::
G2
(0)
/ \
0 2
/ \
(2) (1)
This is identical to the previous graph, except nodes 0 and 2 are connected
by an edge of zero weight. In this case, the dense representation above
leads to ambiguities: how can non-edges be represented if zero is a meaningful
value? In this case, either a masked or sparse representation must be used
to eliminate the ambiguity::
>>> G2_data = np.array([[np.inf, 2, 0 ],
... [2, np.inf, np.inf],
... [0, np.inf, np.inf]])
>>> G2_masked = np.ma.masked_invalid(G2_data)
>>> from scipy.sparse.csgraph import csgraph_from_dense
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
>>> G2_sparse.data
array([ 2., 0., 2., 0.])
Here we have used a utility routine from the csgraph submodule in order to
convert the dense representation to a sparse representation which can be
understood by the algorithms in submodule. By viewing the data array, we
can see that the zero values are explicitly encoded in the graph.
Directed vs. Undirected
-----------------------
Matrices may represent either directed or undirected graphs. This is
specified throughout the csgraph module by a boolean keyword. Graphs are
assumed to be directed by default. In a directed graph, traversal from node
i to node j can be accomplished over the edge G[i, j], but not the edge
G[j, i]. In a non-directed graph, traversal from node i to node j can be
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
and the two have unequal weights, then the smaller of the two is used.
Note that a symmetric matrix will represent an undirected graph, regardless
of whether the 'directed' keyword is set to True or False. In this case,
using ``directed=True`` generally leads to more efficient computation.
The routines in this module accept as input either scipy.sparse representations
(csr, csc, or lil format), masked representations, or dense representations
with non-edges indicated by zeros, infinities, and NaN entries.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['cs_graph_components',
'connected_components',
'laplacian',
'shortest_path',
'floyd_warshall',
'dijkstra',
'bellman_ford',
'johnson',
'breadth_first_order',
'depth_first_order',
'breadth_first_tree',
'depth_first_tree',
'minimum_spanning_tree',
'reverse_cuthill_mckee',
'maximum_bipartite_matching',
'construct_dist_matrix',
'reconstruct_path',
'csgraph_from_dense',
'csgraph_masked_from_dense',
'csgraph_to_dense',
'csgraph_to_masked',
'NegativeCycleError']
from ._components import cs_graph_components
from ._laplacian import laplacian
from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\
bellman_ford, johnson, NegativeCycleError
from ._traversal import breadth_first_order, depth_first_order, \
breadth_first_tree, depth_first_tree, connected_components
from ._min_spanning_tree import minimum_spanning_tree
from ._reordering import reverse_cuthill_mckee, maximum_bipartite_matching
from ._tools import construct_dist_matrix, reconstruct_path,\
csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense,\
csgraph_from_masked
from numpy import deprecate as _deprecate
cs_graph_components = _deprecate(cs_graph_components,
message=("In the future, use "
"csgraph.connected_components. Note "
"that this new function has a "
"slightly different interface: see "
"the docstring for more "
"information."))
from numpy.testing import Tester
test = Tester().test
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/sparse/csgraph/__init__.py | Python | gpl-2.0 | 7,254 |
from sympy import *
from sympy.printing import print_ccode
from sympy.physics.vector import ReferenceFrame, gradient, divergence
from sympy.vector import CoordSysCartesian
R = ReferenceFrame('R');
x = R[0]; y = R[1];
a=-0.5; b=1.5;
visc=1e-1;
lambda_=(1/(2*visc)-sqrt(1/(4*visc**2)+4*pi**2));
print(" visc=%f" % visc)
u=[0,0]
u[0]=1-exp(lambda_*x)*cos(2*pi*y);
u[1]=lambda_/(2*pi)*exp(lambda_*x)*sin(2*pi*y);
p=(exp(3*lambda_)-exp(-lambda_))/(8*lambda_)-exp(2*lambda_*x)/2;
p=p - integrate(p, (x,a,b));
grad_p = gradient(p, R).to_matrix(R)
f0 = -divergence(visc*gradient(u[0], R), R) + grad_p[0];
f1 = -divergence(visc*gradient(u[1], R), R) + grad_p[1];
f2 = divergence(u[0]*R.x + u[1]*R.y, R);
print("\n * RHS:")
print(ccode(f0, assign_to = "values[0]"));
print(ccode(f1, assign_to = "values[1]"));
print(ccode(f2, assign_to = "values[2]"));
print("\n * ExactSolution:")
print(ccode(u[0], assign_to = "values[0]"));
print(ccode(u[1], assign_to = "values[1]"));
print(ccode(p, assign_to = "values[2]"));
print("")
print("pressure mean:", N(integrate(p,(x,a,b))))
| pesser/dealii | examples/step-55/reference.py | Python | lgpl-2.1 | 1,071 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from powerline.bindings.vim import buffer_name
def commandt(matcher_info):
name = buffer_name(matcher_info)
return name and os.path.basename(name) == b'GoToFile'
| gorczynski/dotfiles | vim/bundle/powerline/powerline/matchers/vim/plugin/commandt.py | Python | gpl-3.0 | 293 |
import mechanize._clientcookie
import mechanize._testcase
def cookie_args(
version=1, name="spam", value="eggs",
port="80", port_specified=True,
domain="example.com", domain_specified=False, domain_initial_dot=False,
path="/", path_specified=False,
secure=False,
expires=0,
discard=True,
comment=None,
comment_url=None,
rest={},
rfc2109=False,
):
return locals()
def make_cookie(*args, **kwds):
return mechanize._clientcookie.Cookie(**cookie_args(*args, **kwds))
class Test(mechanize._testcase.TestCase):
def test_equality(self):
# not using assertNotEqual here since operator used varies across
# Python versions
self.assertEqual(make_cookie(), make_cookie())
self.assertFalse(make_cookie(name="ham") == make_cookie())
def test_inequality(self):
# not using assertNotEqual here since operator used varies across
# Python versions
self.assertTrue(make_cookie(name="ham") != make_cookie())
self.assertFalse(make_cookie() != make_cookie())
def test_all_state_included(self):
def non_equal_value(value):
if value is None:
new_value = "80"
elif isinstance(value, basestring):
new_value = value + "1"
elif isinstance(value, bool):
new_value = not value
elif isinstance(value, dict):
new_value = dict(value)
new_value["spam"] = "eggs"
elif isinstance(value, int):
new_value = value + 1
else:
assert False, value
assert new_value != value, value
return new_value
cookie = make_cookie()
for arg, default_value in cookie_args().iteritems():
new_value = non_equal_value(default_value)
self.assertNotEqual(make_cookie(**{arg: new_value}), cookie)
| mzdaniel/oh-mainline | vendor/packages/mechanize/test/test_cookie.py | Python | agpl-3.0 | 1,934 |
# -*- coding: utf-8 -*-
#
# Clang documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 9 20:01:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang'
copyright = u'2007-%d, The Clang Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9'
# The full version, including alpha/beta/rc tags.
release = '3.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'analyzer']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Clang.tex', u'Clang Documentation',
u'The Clang Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory. This was copied from llvm/docs/conf.py.
basedir = os.path.dirname(__file__)
man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Clang', u'Clang Documentation',
u'The Clang Team', 'Clang', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| cd80/UtilizedLLVM | tools/clang/docs/conf.py | Python | unlicense | 9,132 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeAuthPassword
ECSDriver = get_driver(Provider.ALIYUN_ECS)
region = 'cn-hangzhou'
your_access_key_id = ''
your_access_key_secret = ''
ecs = ECSDriver(your_access_key_id, your_access_key_secret, region=region)
sizes = ecs.list_sizes()
small = sizes[1]
locations = ecs.list_locations()
location = None
for each in locations:
if each.id == region:
location = each
break
if location is None:
print('could not find cn-qingdao location')
sys.exit(-1)
print(location.name)
images = ecs.list_images()
print('Found %d images' % len(images))
for each in images:
if 'ubuntu' in each.id.lower():
image = each
break
else:
image = images[0]
print('Use image %s' % image)
sgs = ecs.ex_list_security_groups()
print('Found %d security groups' % len(sgs))
if len(sgs) == 0:
sg = ecs.ex_create_security_group(description='test')
print('Create security group %s' % sg)
else:
sg = sgs[0].id
print('Use security group %s' % sg)
nodes = ecs.list_nodes()
print('Found %d nodes' % len(nodes))
if len(nodes) == 0:
print('Starting create a new node')
data_disk = {
'size': 5,
'category': ecs.disk_categories.CLOUD,
'disk_name': 'data_disk1',
'delete_with_instance': True}
auth = NodeAuthPassword('P@$$w0rd')
ex_internet_charge_type = ecs.internet_charge_types.BY_TRAFFIC
node = ecs.create_node(image=image, size=small, name='test',
ex_security_group_id=sg,
ex_internet_charge_type=ex_internet_charge_type,
ex_internet_max_bandwidth_out=1,
ex_data_disk=data_disk,
auth=auth)
print('Created node %s' % node)
nodes = ecs.list_nodes()
for each in nodes:
print('Found node %s' % each)
| StackPointCloud/libcloud | demos/example_aliyun_ecs.py | Python | apache-2.0 | 2,755 |
# merge_frontend.py
import sys
import io
import os
import pygit2
import collections
import typing
ENCODING = 'utf-8'
class MergeReturn(typing.NamedTuple):
success: bool
merge_result: typing.Optional[object]
class MergeDriver:
driver_id: typing.Optional[str] = None
def pre_announce(self, path: str):
"""
Called before merge() is called, with a human-friendly path for output.
"""
print(f"Merging {self.driver_id}: {path}")
def merge(self, base: typing.BinaryIO, left: typing.BinaryIO, right: typing.BinaryIO) -> MergeReturn:
"""
Read from three BinaryIOs: base (common ancestor), left (ours), and
right (theirs). Perform the actual three-way merge operation. Leave
conflict markers if necessary.
Return (False, None) to indicate the merge driver totally failed.
Return (False, merge_result) if the result contains conflict markers.
Return (True, merge_result) if everything went smoothly.
"""
raise NotImplementedError
def to_file(self, output: typing.BinaryIO, merge_result: object):
"""
Save the merge() result to the given output stream.
Override this if the merge() result is not bytes or str.
"""
if isinstance(merge_result, bytes):
output.write(merge_result)
elif isinstance(merge_result, str):
with io.TextIOWrapper(output, ENCODING) as f:
f.write(merge_result)
else:
raise NotImplementedError
def post_announce(self, success: bool, merge_result: object):
"""
Called after merge() is called, to warn the user if action is needed.
"""
if not success:
print("!!! Manual merge required")
if merge_result:
print(" A best-effort merge was performed. You must finish the job yourself.")
else:
print(" No merge was possible. You must resolve the conflict yourself.")
def main(self, args: typing.List[str] = None):
return _main(self, args or sys.argv[1:])
def _main(driver: MergeDriver, args: typing.List[str]):
if len(args) > 0 and args[0] == '--posthoc':
return _posthoc_main(driver, args[1:])
else:
return _driver_main(driver, args)
def _driver_main(driver: MergeDriver, args: typing.List[str]):
"""
Act like a normal Git merge driver, called by Git during a merge.
"""
if len(args) != 5:
print("merge driver called with wrong number of arguments")
print(" usage: %P %O %A %B %L")
return 1
path, path_base, path_left, path_right, _ = args
driver.pre_announce(path)
with open(path_base, 'rb') as io_base:
with open(path_left, 'rb') as io_left:
with open(path_right, 'rb') as io_right:
success, merge_result = driver.merge(io_base, io_left, io_right)
if merge_result:
# If we got anything, write it to the working directory.
with open(path_left, 'wb') as io_output:
driver.to_file(io_output, merge_result)
driver.post_announce(success, merge_result)
if not success:
# If we were not successful, do not mark the conflict as resolved.
return 1
def _posthoc_main(driver: MergeDriver, args: typing.List[str]):
"""
Apply merge driver logic to a repository which is already in a conflicted
state, running the driver on any conflicted files.
"""
repo_dir = pygit2.discover_repository(os.getcwd())
repo = pygit2.Repository(repo_dir)
conflicts = repo.index.conflicts
if not conflicts:
print("There are no unresolved conflicts.")
return 0
all_success = True
index_changed = False
any_attempted = False
for base, left, right in list(conflicts):
if not base or not left or not right:
# (not left) or (not right): deleted in one branch, modified in the other.
# (not base): added differently in both branches.
# In either case, there's nothing we can do for now.
continue
path = left.path
if not _applies_to(repo, driver, path):
# Skip the file if it's not the right extension.
continue
any_attempted = True
driver.pre_announce(path)
io_base = io.BytesIO(repo[base.id].data)
io_left = io.BytesIO(repo[left.id].data)
io_right = io.BytesIO(repo[right.id].data)
success, merge_result = driver.merge(io_base, io_left, io_right)
if merge_result:
# If we got anything, write it to the working directory.
with open(os.path.join(repo.workdir, path), 'wb') as io_output:
driver.to_file(io_output, merge_result)
if success:
# If we were successful, mark the conflict as resolved.
with open(os.path.join(repo.workdir, path), 'rb') as io_readback:
contents = io_readback.read()
merged_id = repo.create_blob(contents)
repo.index.add(pygit2.IndexEntry(path, merged_id, left.mode))
del conflicts[path]
index_changed = True
if not success:
all_success = False
driver.post_announce(success, merge_result)
if index_changed:
repo.index.write()
if not any_attempted:
print("There are no unresolved", driver.driver_id, "conflicts.")
if not all_success:
# Not usually observed, but indicate the failure just in case.
return 1
def _applies_to(repo: pygit2.Repository, driver: MergeDriver, path: str):
"""
Check if the current merge driver is a candidate to handle a given path.
"""
if not driver.driver_id:
raise ValueError('Driver must have ID to perform post-hoc merge')
return repo.get_attr(path, 'merge') == driver.driver_id
| erwgd/-tg-station | tools/hooks/merge_frontend.py | Python | agpl-3.0 | 5,937 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.bucket."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.training.python.training import bucket_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _which_bucket(bucket_edges, v):
"""Identify which bucket v falls into.
Args:
bucket_edges: int array, bucket edges
v: int scalar, index
Returns:
int scalar, the bucket.
If v < bucket_edges[0], return 0.
If bucket_edges[0] <= v < bucket_edges[1], return 1.
...
If bucket_edges[-2] <= v < bucket_edges[-1], return len(bucket_edges).
If v >= bucket_edges[-1], return len(bucket_edges) + 1
"""
v = np.asarray(v)
full = [0] + bucket_edges
found = np.where(np.logical_and(v >= full[:-1], v < full[1:]))[0]
if not found.size:
return len(full)
return found[0]
class BucketTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
self.scalar_int_feed = array_ops.placeholder(dtypes_lib.int32, ())
self.unk_int64_feed = array_ops.placeholder(dtypes_lib.int64, (None,))
self.vec3_str_feed = array_ops.placeholder(dtypes_lib.string, (3,))
self.sparse_c = sparse_tensor.SparseTensor(
indices=[[0]],
values=[1.0],
dense_shape=[1])
self._coord = coordinator.Coordinator()
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.PaddingFIFOQueue(
5000,
dtypes=[dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string],
shapes=[(), (None,), (3,)])
self._input_enqueue_op = input_queue.enqueue(
(self.scalar_int_feed, self.unk_int64_feed, self.vec3_str_feed))
self.scalar_int, self.unk_int64, self.vec3_str = input_queue.dequeue()
self._threads = None
self._close_op = input_queue.close()
self._sess = None
def enqueue_inputs(self, sess, feed_dict):
sess.run(self._input_enqueue_op, feed_dict=feed_dict)
def start_queue_runners(self, sess):
# Store session to be able to close inputs later
if self._sess is None:
self._sess = sess
self._threads = queue_runner_impl.start_queue_runners(coord=self._coord)
def tearDown(self):
if self._sess is not None:
self._sess.run(self._close_op)
self._coord.request_stop()
self._coord.join(self._threads)
def testSingleBucket(self):
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=constant_op.constant(0),
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(32):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get a single minibatch
bucketed_values = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values[1]))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values[0])
expected_scalar_int = np.arange(32)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values[1][0])
self.assertAllEqual(expected_scalar_int, bucketed_values[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values[1][2][resort])
def testBatchSizePerBucket(self):
which_bucket = control_flow_ops.cond(self.scalar_int < 5,
lambda: constant_op.constant(0),
lambda: constant_op.constant(1))
batch_sizes = [5, 10]
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=batch_sizes,
num_threads=1,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[None], [None, None], [None, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(15):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one with small values, one with large).
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# Figure out which output has the small values
if bucketed_values_0[0] < 5:
bucketed_values_large, bucketed_values_small = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_small, bucketed_values_large = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_small[0])
self.assertAllEqual(1, bucketed_values_large[0])
# Check that the batch sizes differ per bucket
self.assertEqual(5, len(bucketed_values_small[1][0]))
self.assertEqual(10, len(bucketed_values_large[1][0]))
def testEvenOddBuckets(self):
which_bucket = (self.scalar_int % 2)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str, self.sparse_c],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3], [None, None]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(64):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches (one containing even values, one containing odds)
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values_0))
self.assertEqual(2, len(bucketed_values_1))
# Count number of bucket_tensors.
self.assertEqual(4, len(bucketed_values_0[1]))
self.assertEqual(4, len(bucketed_values_1[1]))
# Figure out which output has the even values (there's
# randomness due to the multithreaded nature of bucketing)
if bucketed_values_0[0] % 2 == 1:
bucketed_values_even, bucketed_values_odd = (bucketed_values_1,
bucketed_values_0)
else:
bucketed_values_even, bucketed_values_odd = (bucketed_values_0,
bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_even[0])
self.assertAllEqual(1, bucketed_values_odd[0])
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(3 *
[np.arange(0, 32 * 2, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_even[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_even[1][0][resort])
self.assertAllEqual(expected_unk_int64,
bucketed_values_even[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[1][2][resort])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_odd[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_odd[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[1][2][resort])
def testEvenOddBucketsFilterOutAllOdd(self):
which_bucket = (self.scalar_int % 2)
keep_input = math_ops.equal(which_bucket, 0)
bucketed_dynamic = bucket_ops.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
keep_input=keep_input,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.cached_session() as sess:
for v in range(128):
self.enqueue_inputs(sess, {
self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]
})
self.start_queue_runners(sess)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
bucketed_values_even0 = sess.run(bucketed_dynamic)
bucketed_values_even1 = sess.run(bucketed_dynamic)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, bucketed_values_even0[0])
self.assertAllEqual(0, bucketed_values_even1[0])
# Merge their output for sorting and comparison
bucketed_values_all_elem0 = np.concatenate((bucketed_values_even0[1][0],
bucketed_values_even1[1][0]))
self.assertAllEqual(
np.arange(0, 128, 2), sorted(bucketed_values_all_elem0))
def testFailOnWrongBucketCapacities(self):
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
bucket_ops.bucket( # 2 buckets and 3 capacities raises ValueError.
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=constant_op.constant(0), num_buckets=2,
batch_size=32, bucket_capacities=[3, 4, 5])
class BucketBySequenceLengthTest(test.TestCase):
def _testBucketBySequenceLength(self,
allow_small_batch,
bucket_capacities=None,
drain_entire_queue=True):
ops.reset_default_graph()
# All inputs must be identical lengths across tuple index.
# The input reader will get input_length from the first tuple
# entry.
data_len = 4
labels_len = 3
input_pairs = [(length, ([np.int64(length)] * data_len,
[str(length).encode("ascii")] * labels_len))
for length in (1, 3, 4, 5, 6, 10)]
lengths = array_ops.placeholder(dtypes_lib.int32, ())
data = array_ops.placeholder(dtypes_lib.int64, (data_len,))
labels = array_ops.placeholder(dtypes_lib.string, (labels_len,))
batch_size = 8
bucket_boundaries = [3, 4, 5, 10]
num_pairs_to_enqueue = 50 * batch_size + 100
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = data_flow_ops.FIFOQueue(
5000, (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.string), (
(), (data_len,), (labels_len,)))
input_enqueue_op = input_queue.enqueue((lengths, data, labels))
lengths_t, data_t, labels_t = input_queue.dequeue()
close_input_op = input_queue.close()
(out_lengths_t, data_and_labels_t) = (bucket_ops.bucket_by_sequence_length(
input_length=lengths_t,
tensors=[data_t, labels_t],
batch_size=batch_size,
bucket_boundaries=bucket_boundaries,
bucket_capacities=bucket_capacities,
allow_smaller_final_batch=allow_small_batch,
num_threads=10))
expected_batch_size = None if allow_small_batch else batch_size
self.assertEqual(out_lengths_t.get_shape().as_list(), [expected_batch_size])
self.assertEqual(data_and_labels_t[0].get_shape().as_list(),
[expected_batch_size, data_len])
self.assertEqual(data_and_labels_t[1].get_shape().as_list(),
[expected_batch_size, labels_len])
def _read_test(sess):
num_pairs_dequeued = 0
try:
while drain_entire_queue or num_pairs_dequeued < 40 * batch_size:
(out_lengths, (data, labels)) = sess.run(
(out_lengths_t, data_and_labels_t))
num_pairs_dequeued += out_lengths.shape[0]
if allow_small_batch:
self.assertEqual(data_len, data.shape[1])
self.assertEqual(labels_len, labels.shape[1])
self.assertGreaterEqual(batch_size, out_lengths.shape[0])
self.assertGreaterEqual(batch_size, data.shape[0])
self.assertGreaterEqual(batch_size, labels.shape[0])
else:
self.assertEqual((batch_size, data_len), data.shape)
self.assertEqual((batch_size, labels_len), labels.shape)
self.assertEqual((batch_size,), out_lengths.shape)
for (lr, dr, tr) in zip(out_lengths, data, labels):
# Make sure length matches data (here it's the same value).
self.assertEqual(dr[0], lr)
# Make sure data & labels match.
self.assertEqual(dr[0], int(tr[0].decode("ascii")))
# Make sure for each row, data came from the same bucket.
self.assertEqual(
_which_bucket(bucket_boundaries, dr[0]),
_which_bucket(bucket_boundaries, dr[1]))
except errors.OutOfRangeError:
if allow_small_batch:
self.assertEqual(num_pairs_to_enqueue, num_pairs_dequeued)
else:
# Maximum left over in the queues should be at most one less than the
# batch_size, for every bucket.
num_buckets = len(bucket_boundaries) + 2
self.assertLessEqual(
num_pairs_to_enqueue - (batch_size - 1) * num_buckets,
num_pairs_dequeued)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
# Feed the inputs, then close the input thread.
for _ in range(num_pairs_to_enqueue):
which = random.randint(0, len(input_pairs) - 1)
length, pair = input_pairs[which]
sess.run(input_enqueue_op,
feed_dict={lengths: length,
data: pair[0],
labels: pair[1]})
sess.run(close_input_op)
# Start the queue runners
threads = queue_runner_impl.start_queue_runners(coord=coord)
# Read off the top of the bucket and ensure correctness of output
_read_test(sess)
coord.request_stop()
coord.join(threads)
def testBucketBySequenceLength(self):
self._testBucketBySequenceLength(allow_small_batch=False)
def testBucketBySequenceLengthAllow(self):
self._testBucketBySequenceLength(allow_small_batch=True)
def testBucketBySequenceLengthBucketCapacities(self):
# Above bucket_boundaries = [3, 4, 5, 10] so we need 5 capacities.
with self.assertRaisesRegexp(ValueError, r"must have exactly num_buckets"):
self._testBucketBySequenceLength(allow_small_batch=False,
bucket_capacities=[32, 32, 32, 32])
# Test with different capacities.
capacities = [48, 40, 32, 24, 16]
self._testBucketBySequenceLength(allow_small_batch=True,
bucket_capacities=capacities)
def testBucketBySequenceLengthShutdown(self):
self._testBucketBySequenceLength(allow_small_batch=True,
drain_entire_queue=False)
if __name__ == "__main__":
test.main()
| kevin-coder/tensorflow-fork | tensorflow/contrib/training/python/training/bucket_ops_test.py | Python | apache-2.0 | 18,461 |
"""Tests for the NumpyVersion class.
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert_(NumpyVersion('1.8.0') > ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert_(NumpyVersion('1.9.0') < '1.10.0')
assert_(NumpyVersion('1.11.0') < '1.11.1')
assert_(NumpyVersion('1.11.0') == '1.11.0')
assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_alpha_beta_rc():
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert_(NumpyVersion('1.8.0rc1') < ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert_(NumpyVersion('1.8.0rc1') > ver)
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
def test_dev_version():
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
def test_dev0_version():
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
def test_dev0_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
| kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/lib/tests/test__version.py | Python | mit | 2,055 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class TraceResult(object):
def __init__(self, impl):
self._impl = impl
def Serialize(self, f):
"""Serializes the trace result to a file-like object"""
return self._impl.Serialize(f)
def AsTimelineModel(self):
"""Parses the trace result into a timeline model for in-memory
manipulation."""
return self._impl.AsTimelineModel()
| DirtyUnicorns/android_external_chromium-org | tools/telemetry/telemetry/core/trace_result.py | Python | bsd-3-clause | 520 |
__author__ = 'bromix'
import unittest
"""
class TestCipher(unittest.TestCase):
def setUp(self):
pass
def test_load_javascript(self):
cipher = Cipher()
java_script = ''
with open ("html5player.js", "r") as java_script_file:
java_script = java_script_file.read()
pass
json_script = cipher._load_java_script(java_script)
jse = JsonScriptEngine(json_script)
signature = jse.execute('299D15DC85986F6D8B7BC0E5655F758E6F14B1E33.50BCBEAE15DA02F131DAA96B640C57AAABAB20E20E2')
pass
pass
""" | azumimuo/family-xbmc-addon | zips/plugin.video.youtube/resources/lib/test_youtube/test_cipher.py | Python | gpl-2.0 | 585 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Dates on Sales Order',
'version': '1.1',
'category': 'Sales Management',
'description': """
Add additional date information to the sales order.
===================================================
You can add the following additional dates to a sales order:
------------------------------------------------------------
* Requested Date (will be used as the expected date on pickings)
* Commitment Date
* Effective Date
""",
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale_stock'],
'data': ['sale_order_dates_view.xml'],
'demo': [],
'test': ['test/requested_date.yml'],
'installable': True,
'auto_install': False,
}
| minhphung171093/GreenERP_V9 | openerp/addons/sale_order_dates/__openerp__.py | Python | gpl-3.0 | 797 |
# -*- coding: utf-8 -*-
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
# Register database schemes in URLs.
urlparse.uses_netloc.append('postgres')
urlparse.uses_netloc.append('postgresql')
urlparse.uses_netloc.append('pgsql')
urlparse.uses_netloc.append('postgis')
urlparse.uses_netloc.append('mysql')
urlparse.uses_netloc.append('mysql2')
urlparse.uses_netloc.append('mysqlgis')
urlparse.uses_netloc.append('spatialite')
urlparse.uses_netloc.append('sqlite')
DEFAULT_ENV = 'DATABASE_URL'
SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
}
def config(env=DEFAULT_ENV, default=None, engine=None):
"""Returns configured DATABASE dictionary from DATABASE_URL."""
config = {}
s = os.environ.get(env, default)
if s:
config = parse(s, engine)
return config
def parse(url, engine=None):
"""Parses a database URL."""
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
# otherwise parse the url as normal
config = {}
url = urlparse.urlparse(url)
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
# Update with environment configuration.
config.update({
'NAME': path or '',
'USER': url.username or '',
'PASSWORD': url.password or '',
'HOST': url.hostname or '',
'PORT': url.port or '',
})
if engine:
config['ENGINE'] = engine
elif url.scheme in SCHEMES:
config['ENGINE'] = SCHEMES[url.scheme]
return config
| ramcn/demo3 | venv/lib/python3.4/site-packages/dj_database_url.py | Python | mit | 2,475 |
import os
import sys
import shutil
import tempfile
import contextlib
from ._compat import iteritems, PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
if PY2:
from cStringIO import StringIO
else:
import io
from ._compat import _find_binary_reader
class EchoingStdin(object):
def __init__(self, input, output):
self._input = input
self._output = output
def __getattr__(self, x):
return getattr(self._input, x)
def _echo(self, rv):
self._output.write(rv)
return rv
def read(self, n=-1):
return self._echo(self._input.read(n))
def readline(self, n=-1):
return self._echo(self._input.readline(n))
def readlines(self):
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self):
return iter(self._echo(x) for x in self._input)
def __repr__(self):
return repr(self._input)
def make_input_stream(input, charset):
# Is already an input stream.
if hasattr(input, 'read'):
if PY2:
return input
rv = _find_binary_reader(input)
if rv is not None:
return rv
raise TypeError('Could not find binary reader for input stream.')
if input is None:
input = b''
elif not isinstance(input, bytes):
input = input.encode(charset)
if PY2:
return StringIO(input)
return io.BytesIO(input)
class Result(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(self, runner, output_bytes, exit_code, exception,
exc_info=None):
#: The runner that created the result
self.runner = runner
#: The output as bytes.
self.output_bytes = output_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happend if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The output as unicode string."""
return self.output_bytes.decode(self.runner.charset, 'replace') \
.replace('\r\n', '\n')
def __repr__(self):
return '<Result %s>' % (
self.exception and repr(self.exception) or 'okay',
)
class CliRunner(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
"""
def __init__(self, charset=None, env=None, echo_stdin=False):
if charset is None:
charset = 'utf-8'
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or 'root'
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
env = self.make_env(env)
if PY2:
sys.stdout = sys.stderr = bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = sys.stderr = io.TextIOWrapper(
bytes_output, encoding=self.charset)
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or '')
val = input.readline().rstrip('\r\n')
sys.stdout.write(val + '\n')
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write((prompt or '') + '\n')
sys.stdout.flush()
return input.readline().rstrip('\r\n')
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = clickpkg.termui.visible_prompt_func
old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
old__getchar_func = clickpkg.termui._getchar
old_should_strip_ansi = clickpkg.utils.should_strip_ansi
clickpkg.termui.visible_prompt_func = visible_input
clickpkg.termui.hidden_prompt_func = hidden_input
clickpkg.termui._getchar = _getchar
clickpkg.utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(value)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield bytes_output
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
clickpkg.termui.visible_prompt_func = old_visible_prompt_func
clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
clickpkg.termui._getchar = old__getchar_func
clickpkg.utils.should_strip_ansi = old_should_strip_ansi
def invoke(self, cli, args=None, input=None, env=None,
catch_exceptions=True, color=False, **extra):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as out:
exception = None
exit_code = 0
try:
cli.main(args=args or (),
prog_name=self.get_default_prog_name(cli), **extra)
except SystemExit as e:
if e.code != 0:
exception = e
exc_info = sys.exc_info()
exit_code = e.code
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write('\n')
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = -1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
output = out.getvalue()
return Result(runner=self,
output_bytes=output,
exit_code=exit_code,
exception=exception,
exc_info=exc_info)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError):
pass
| gameduell/duell | pylib/click/testing.py | Python | bsd-2-clause | 10,834 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel Reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class ReaderTest(test.TestCase):
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
def testReadSavedModelValid(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "valid_saved_model")
builder = saved_model_builder.SavedModelBuilder(saved_model_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
builder.save()
actual_saved_model_pb = reader.read_saved_model(saved_model_dir)
self.assertEqual(len(actual_saved_model_pb.meta_graphs), 1)
self.assertEqual(
len(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags), 1)
self.assertEqual(actual_saved_model_pb.meta_graphs[0].meta_info_def.tags[0],
tag_constants.TRAINING)
def testReadSavedModelInvalid(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "invalid_saved_model")
with self.assertRaisesRegexp(
IOError, "SavedModel file does not exist at: %s" % saved_model_dir):
reader.read_saved_model(saved_model_dir)
def testGetSavedModelTagSets(self):
saved_model_dir = os.path.join(test.get_temp_dir(), "test_tags")
builder = saved_model_builder.SavedModelBuilder(saved_model_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple predefined tags.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple predefined tags for serving on TPU.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
actual_tags = reader.get_saved_model_tag_sets(saved_model_dir)
expected_tags = [["train"], ["serve"], ["serve", "gpu"], ["serve", "tpu"],
["foo", "bar"]]
self.assertEqual(expected_tags, actual_tags)
if __name__ == "__main__":
test.main()
| jalexvig/tensorflow | tensorflow/contrib/saved_model/python/saved_model/reader_test.py | Python | apache-2.0 | 4,761 |
# (c) 2020, Felix Fontein <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def add_internal_fqcns(names):
'''
Given a sequence of action/module names, returns a list of these names
with the same names with the prefixes `ansible.builtin.` and
`ansible.legacy.` added for all names that are not already FQCNs.
'''
result = []
for name in names:
result.append(name)
if '.' not in name:
result.append('ansible.builtin.%s' % name)
result.append('ansible.legacy.%s' % name)
return result
| dmsimard/ansible | lib/ansible/utils/fqcn.py | Python | gpl-3.0 | 1,268 |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.tests.functional import integrated_helpers
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.unit.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
# Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.content
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
| jeffrey4l/nova | nova/tests/functional/test_extensions.py | Python | apache-2.0 | 1,578 |
#!/usr/bin/env python
"""Show python and pip versions."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import warnings
warnings.simplefilter('ignore') # avoid python version deprecation warnings when using newer pip dependencies
try:
import pip
except ImportError:
pip = None
print(sys.version)
if pip:
print('pip %s from %s' % (pip.__version__, os.path.dirname(pip.__file__)))
| thnee/ansible | test/lib/ansible_test/_util/controller/tools/versions.py | Python | gpl-3.0 | 460 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.core import web_contents
def UrlToExtensionId(url):
return re.match(r"(chrome-extension://)([^/]+)", url).group(2)
class ExtensionPage(web_contents.WebContents):
"""Represents an extension page in the browser"""
def __init__(self, inspector_backend, backend_list):
super(ExtensionPage, self).__init__(inspector_backend, backend_list)
self.url = inspector_backend.url
self.extension_id = UrlToExtensionId(self.url)
def Reload(self):
"""Reloading an extension page is used as a workaround for an extension
binding bug for old versions of Chrome (crbug.com/263162). After Navigate
returns, we are guaranteed that the inspected page is in the correct state.
"""
self._inspector_backend.Navigate(self.url, None, 10)
| 7kbird/chrome | tools/telemetry/telemetry/core/extension_page.py | Python | bsd-3-clause | 946 |
# Copyright 2011 Red Hat, Inc.
# Copyright 2003 Brent Fox <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors:
# Miroslav Suchy <[email protected]>
import sys
sys.path.append("/usr/share/rhn")
from up2date_client import rhnreg, rhnregGui, rhnserver
import gtk
import gettext
_ = lambda x: gettext.ldgettext("rhn-client-tools", x)
gtk.glade.bindtextdomain("rhn-client-tools")
from firstboot.module import Module
from firstboot.constants import RESULT_SUCCESS, RESULT_FAILURE, RESULT_JUMP
class moduleClass(Module):
def __init__(self):
Module.__init__(self)
self.priority = 108.6
self.sidebarTitle = _("Select operating system release")
self.title = _("Select operating system release")
self.chooseChannel = FirstbootChooseChannelPage()
def needsNetwork(self):
return True
def apply(self, interface, testing=False):
if testing:
return RESULT_SUCCESS
self.chooseChannel.chooseChannelPageApply()
return RESULT_SUCCESS
def createScreen(self):
self.vbox = gtk.VBox(spacing=5)
self.vbox.pack_start(self.chooseChannel.chooseChannelPageVbox(), True, True)
def initializeUI(self):
# populate capability - needef for EUSsupported
s = rhnserver.RhnServer()
s.capabilities.validate()
# this populate zstream channels as side effect
self.chooseChannel.chooseChannelShouldBeShown()
self.chooseChannel.chooseChannelPagePrepare()
def shouldAppear(self):
return not rhnreg.registered()
class FirstbootChooseChannelPage(rhnregGui.ChooseChannelPage):
pass
| lhellebr/spacewalk | client/rhel/rhn-client-tools/src/firstboot-legacy-rhel5/rhn_choose_channel.py | Python | gpl-2.0 | 2,255 |
"""
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| Titan-C/scikit-learn | benchmarks/bench_mnist.py | Python | bsd-3-clause | 6,977 |
# Based on the buildah connection plugin
# Copyright (c) 2017 Ansible Project
# 2018 Kushal Das
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
#
# Written by: Kushal Das (https://github.com/kushaldas)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
connection: qubes
short_description: Interact with an existing QubesOS AppVM
description:
- Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
author: Kushal Das (@kushaldas)
version_added: "2.8"
options:
remote_addr:
description:
- vm name
default: inventory_hostname
vars:
- name: ansible_host
remote_user:
description:
- The user to execute as inside the vm.
default: The *user* account as default in Qubes OS.
vars:
- name: ansible_user
# keyword:
# - name: hosts
"""
import shlex
import shutil
import os
import base64
import subprocess
import ansible.constants as C
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.errors import AnsibleConnectionFailure
from ansible.utils.display import Display
display = Display()
# this _has to be_ named Connection
class Connection(ConnectionBase):
"""This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers."""
# String used to identify this Connection class from other classes
transport = 'qubes'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._remote_vmname = self._play_context.remote_addr
self._connected = False
# Default username in Qubes
self.user = "user"
if self._play_context.remote_user:
self.user = self._play_context.remote_user
def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"):
"""run qvm-run executable
:param cmd: cmd string for remote system
:param in_data: data passed to qvm-run-vm's stdin
:return: return code, stdout, stderr
"""
display.vvvv("CMD: ", cmd)
if not cmd.endswith("\n"):
cmd = cmd + "\n"
local_cmd = []
# For dom0
local_cmd.extend(["qvm-run", "--pass-io", "--service"])
if self.user != "user":
# Means we have a remote_user value
local_cmd.extend(["-u", self.user])
local_cmd.append(self._remote_vmname)
local_cmd.append(shell)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
display.vvvv("Local cmd: ", local_cmd)
display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Here we are writing the actual command to the remote bash
p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict'))
stdout, stderr = p.communicate(input=in_data)
return p.returncode, stdout, stderr
def _connect(self):
"""No persistent connection is being maintained."""
super(Connection, self)._connect()
self._connected = True
@ensure_connect
def exec_command(self, cmd, in_data=None, sudoable=False):
"""Run specified command in a running QubesVM """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvvv("CMD IS: %s" % cmd)
rc, stdout, stderr = self._qubes(cmd)
display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside VM at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
with open(in_path, "rb") as fobj:
source_data = fobj.read()
retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
# if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
# hope it will have appropriate permissions
if retcode == 127:
retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
if retcode != 0:
raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
def fetch_file(self, in_path, out_path):
"""Obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
# We are running in dom0
cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
with open(out_path, "wb") as fobj:
p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
p.communicate()
if p.returncode != 0:
raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
def close(self):
""" Closing the connection """
super(Connection, self).close()
self._connected = False
| alxgu/ansible | lib/ansible/plugins/connection/qubes.py | Python | gpl-3.0 | 5,603 |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_static_route
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosStaticRouteModule(TestNxosModule):
module = nxos_static_route
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_static_route.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_static_route.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('', 'nxos_static_route.cfg')
self.load_config.return_value = None
def test_nxos_static_route_present(self):
set_module_args(dict(prefix='192.168.20.64/24', next_hop='3.3.3.3'))
self.execute_module(changed=True, commands=['ip route 192.168.20.0/24 3.3.3.3'])
def test_nxos_static_route_present_no_defaults(self):
set_module_args(dict(prefix='192.168.20.64/24', next_hop='3.3.3.3',
route_name='testing', pref=100))
self.execute_module(changed=True, commands=['ip route 192.168.20.0/24 3.3.3.3 name testing 100'])
def test_nxos_static_route_present_vrf(self):
set_module_args(dict(prefix='192.168.20.64/24', next_hop='3.3.3.3', vrf='test'))
self.execute_module(changed=True, sort=False, commands=['vrf context test', 'ip route 192.168.20.0/24 3.3.3.3'])
def test_nxos_static_route_no_change(self):
set_module_args(dict(prefix='10.10.30.64/24', next_hop='1.2.4.8'))
self.execute_module(changed=False, commands=[])
def test_nxos_static_route_absent(self):
set_module_args(dict(prefix='10.10.30.12/24', next_hop='1.2.4.8', state='absent'))
self.execute_module(changed=True, commands=['no ip route 10.10.30.0/24 1.2.4.8'])
def test_nxos_static_route_absent_no_change(self):
set_module_args(dict(prefix='192.168.20.6/24', next_hop='3.3.3.3', state='absent'))
self.execute_module(changed=False, commands=[])
def test_nxos_static_route_absent_vrf(self):
set_module_args(dict(prefix='10.11.12.13/14', next_hop='15.16.17.18', vrf='test', state='absent'))
self.execute_module(
changed=True, sort=False,
commands=['vrf context test', 'no ip route 10.8.0.0/14 15.16.17.18']
)
| ppanczyk/ansible | test/units/modules/network/nxos/test_nxos_static_route.py | Python | gpl-3.0 | 3,385 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import project
import company
import report
import res_partner
import res_config
import web_planner
| vileopratama/vitech | src/addons/project/__init__.py | Python | mit | 200 |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script lays out the PNaCl translator files for a
normal Chrome installer, for one platform. Once run num-of-arches times,
the result can then be packed into a multi-CRX zip file.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import json
import logging
import optparse
import os
import platform
import re
import shutil
import sys
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
######################################################################
class PnaclPackaging(object):
package_base = os.path.dirname(__file__)
# File paths that are set from the command line.
pnacl_template = None
tool_revisions = None
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def SetPnaclInfoTemplatePath(path):
PnaclPackaging.pnacl_template = path
@staticmethod
def SetToolsRevisionPath(path):
PnaclPackaging.tool_revisions = path
@staticmethod
def PnaclToolsRevision():
with open(PnaclPackaging.tool_revisions, 'r') as f:
for line in f.read().splitlines():
if line.startswith('PNACL_VERSION'):
_, version = line.split('=')
# CWS happens to use version quads, so make it a quad too.
# However, each component of the quad is limited to 64K max.
# Try to handle a bit more.
max_version = 2 ** 16
version = int(version)
version_more = version / max_version
version = version % max_version
return '0.1.%d.%d' % (version_more, version)
raise Exception('Cannot find PNACL_VERSION in TOOL_REVISIONS file: %s' %
PnaclPackaging.tool_revisions)
@staticmethod
def GeneratePnaclInfo(target_dir, abi_version, arch):
# A note on versions: pnacl_version is the version of translator built
# by the NaCl repo, while abi_version is bumped when the NaCl sandbox
# actually changes.
pnacl_version = PnaclPackaging.PnaclToolsRevision()
with open(PnaclPackaging.pnacl_template, 'r') as pnacl_template_fd:
pnacl_template = json.load(pnacl_template_fd)
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
with open(out_name, 'w') as output_fd:
pnacl_template['pnacl-arch'] = arch
pnacl_template['pnacl-version'] = pnacl_version
json.dump(pnacl_template, output_fd, sort_keys=True, indent=4)
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, arches):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad, arches)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-t', '--target_arch',
dest='target_arch', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('--info_template_path',
dest='info_template_path', default=None,
help='Path of the info template file')
parser.add_option('--tool_revisions_path', dest='tool_revisions_path',
default=None, help='Location of NaCl TOOL_REVISIONS file.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.info_template_path:
PnaclPackaging.SetPnaclInfoTemplatePath(options.info_template_path)
if options.tool_revisions_path:
PnaclPackaging.SetToolsRevisionPath(options.tool_revisions_path)
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
abi_version = int(args[0])
arches = DetermineInstallerArches(options.target_arch)
BuildInstallerStyle(abi_version, lib_overrides, arches)
return 0
if __name__ == '__main__':
sys.exit(Main())
| GeyerA/android_external_chromium_org | ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_component_crx_gen.py | Python | bsd-3-clause | 13,105 |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.enterprise.adbapi.
"""
from twisted.trial import unittest
import os, stat, new
from twisted.enterprise.adbapi import ConnectionPool, ConnectionLost, safe
from twisted.enterprise.adbapi import Connection, Transaction
from twisted.enterprise.adbapi import _unreleasedVersion
from twisted.internet import reactor, defer, interfaces
from twisted.python.failure import Failure
simple_table_schema = """
CREATE TABLE simple (
x integer
)
"""
class ADBAPITestBase:
"""Test the asynchronous DB-API code."""
openfun_called = {}
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Set up the database and create a connection pool pointing at it.
"""
self.startDB()
self.dbpool = self.makePool(cp_openfun=self.openfun)
self.dbpool.start()
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def openfun(self, conn):
self.openfun_called[conn] = True
def checkOpenfunCalled(self, conn=None):
if not conn:
self.failUnless(self.openfun_called)
else:
self.failUnless(self.openfun_called.has_key(conn))
def testPool(self):
d = self.dbpool.runOperation(simple_table_schema)
if self.test_failures:
d.addCallback(self._testPool_1_1)
d.addCallback(self._testPool_1_2)
d.addCallback(self._testPool_1_3)
d.addCallback(self._testPool_1_4)
d.addCallback(lambda res: self.flushLoggedErrors())
d.addCallback(self._testPool_2)
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
d.addCallback(self._testPool_6)
d.addCallback(self._testPool_7)
d.addCallback(self._testPool_8)
d.addCallback(self._testPool_9)
return d
def _testPool_1_1(self, res):
d = defer.maybeDeferred(self.dbpool.runQuery, "select * from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_2(self, res):
d = defer.maybeDeferred(self.dbpool.runOperation,
"deletexxx from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_3(self, res):
d = defer.maybeDeferred(self.dbpool.runInteraction,
self.bad_interaction)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_4(self, res):
d = defer.maybeDeferred(self.dbpool.runWithConnection,
self.bad_withConnection)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_2(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Interaction not rolled back")
self.checkOpenfunCalled()
d.addCallback(_check)
return d
def _testPool_3(self, res):
sql = "select count(1) from simple"
inserts = []
# add some rows to simple table (runOperation)
for i in range(self.num_iterations):
sql = "insert into simple(x) values(%d)" % i
inserts.append(self.dbpool.runOperation(sql))
d = defer.gatherResults(inserts)
def _select(res):
# make sure they were added (runQuery)
sql = "select x from simple order by x";
d = self.dbpool.runQuery(sql)
return d
d.addCallback(_select)
def _check(rows):
self.failUnless(len(rows) == self.num_iterations,
"Wrong number of rows")
for i in range(self.num_iterations):
self.failUnless(len(rows[i]) == 1, "Wrong size row")
self.failUnless(rows[i][0] == i, "Values not returned.")
d.addCallback(_check)
return d
def _testPool_4(self, res):
# runInteraction
d = self.dbpool.runInteraction(self.interaction)
d.addCallback(lambda res: self.assertEquals(res, "done"))
return d
def _testPool_5(self, res):
# withConnection
d = self.dbpool.runWithConnection(self.withConnection)
d.addCallback(lambda res: self.assertEquals(res, "done"))
return d
def _testPool_6(self, res):
# Test a withConnection cannot be closed
d = self.dbpool.runWithConnection(self.close_withConnection)
return d
def _testPool_7(self, res):
# give the pool a workout
ds = []
for i in range(self.num_iterations):
sql = "select x from simple where x = %d" % i
ds.append(self.dbpool.runQuery(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
def _check(result):
for i in range(self.num_iterations):
self.failUnless(result[i][1][0][0] == i, "Value not returned")
dlist.addCallback(_check)
return dlist
def _testPool_8(self, res):
# now delete everything
ds = []
for i in range(self.num_iterations):
sql = "delete from simple where x = %d" % i
ds.append(self.dbpool.runOperation(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
return dlist
def _testPool_9(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0,
"Didn't successfully delete table contents")
self.checkConnect()
d.addCallback(_check)
return d
def checkConnect(self):
"""Check the connect/disconnect synchronous calls."""
conn = self.dbpool.connect()
self.checkOpenfunCalled(conn)
curs = conn.cursor()
curs.execute("insert into simple(x) values(1)")
curs.execute("select x from simple")
res = curs.fetchall()
self.failUnlessEqual(len(res), 1)
self.failUnlessEqual(len(res[0]), 1)
self.failUnlessEqual(res[0][0], 1)
curs.execute("delete from simple")
curs.execute("select x from simple")
self.failUnlessEqual(len(curs.fetchall()), 0)
curs.close()
self.dbpool.disconnect(conn)
def interaction(self, transaction):
transaction.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = transaction.fetchone()
self.failUnless(len(row) == 1, "Wrong size row")
self.failUnless(row[0] == i, "Value not returned.")
# should test this, but gadfly throws an exception instead
#self.failUnless(transaction.fetchone() is None, "Too many rows")
return "done"
def bad_interaction(self, transaction):
if self.can_rollback:
transaction.execute("insert into simple(x) values(0)")
transaction.execute("select * from NOTABLE")
def withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = curs.fetchone()
self.failUnless(len(row) == 1, "Wrong size row")
self.failUnless(row[0] == i, "Value not returned.")
# should test this, but gadfly throws an exception instead
#self.failUnless(transaction.fetchone() is None, "Too many rows")
finally:
curs.close()
return "done"
def close_withConnection(self, conn):
conn.close()
def bad_withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select * from NOTABLE")
finally:
curs.close()
class ReconnectTestBase:
"""Test the asynchronous DB-API code with reconnect."""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Skip the test if C{good_sql} is unavailable. Otherwise, set up the
database, create a connection pool pointed at it, and set up a simple
schema in it.
"""
if self.good_sql is None:
raise unittest.SkipTest('no good sql for reconnect test')
self.startDB()
self.dbpool = self.makePool(cp_max=1, cp_reconnect=True,
cp_good_sql=self.good_sql)
self.dbpool.start()
return self.dbpool.runOperation(simple_table_schema)
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def testPool(self):
d = defer.succeed(None)
d.addCallback(self._testPool_1)
d.addCallback(self._testPool_2)
if not self.early_reconnect:
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
return d
def _testPool_1(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_2(self, res):
# reach in and close the connection manually
self.dbpool.connections.values()[0].close()
def _testPool_3(self, res):
sql = "select count(1) from simple"
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_4(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_5(self, res):
self.flushLoggedErrors()
sql = "select * from NOTABLE" # bad sql
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: self.failIf(f.check(ConnectionLost)))
return d
class DBTestConnector:
"""A class which knows how to test for the presence of
and establish a connection to a relational database.
To enable test cases which use a central, system database,
you must create a database named DB_NAME with a user DB_USER
and password DB_PASS with full access rights to database DB_NAME.
"""
TEST_PREFIX = None # used for creating new test cases
DB_NAME = "twisted_test"
DB_USER = 'twisted_test'
DB_PASS = 'twisted_test'
DB_DIR = None # directory for database storage
nulls_ok = True # nulls supported
trailing_spaces_ok = True # trailing spaces in strings preserved
can_rollback = True # rollback supported
test_failures = True # test bad sql?
escape_slashes = True # escape \ in sql?
good_sql = ConnectionPool.good_sql
early_reconnect = True # cursor() will fail on closed connection
can_clear = True # can try to clear out tables when starting
num_iterations = 50 # number of iterations for test loops
# (lower this for slow db's)
def setUp(self):
self.DB_DIR = self.mktemp()
os.mkdir(self.DB_DIR)
if not self.can_connect():
raise unittest.SkipTest('%s: Cannot access db' % self.TEST_PREFIX)
return self.extraSetUp()
def can_connect(self):
"""Return true if this database is present on the system
and can be used in a test."""
raise NotImplementedError()
def startDB(self):
"""Take any steps needed to bring database up."""
pass
def stopDB(self):
"""Bring database down, if needed."""
pass
def makePool(self, **newkw):
"""Create a connection pool with additional keyword arguments."""
args, kw = self.getPoolArgs()
kw = kw.copy()
kw.update(newkw)
return ConnectionPool(*args, **kw)
def getPoolArgs(self):
"""Return a tuple (args, kw) of list and keyword arguments
that need to be passed to ConnectionPool to create a connection
to this database."""
raise NotImplementedError()
class GadflyConnector(DBTestConnector):
TEST_PREFIX = 'Gadfly'
nulls_ok = False
can_rollback = False
escape_slashes = False
good_sql = 'select * from simple where 1=0'
num_iterations = 1 # slow
def can_connect(self):
try: import gadfly
except: return False
if not getattr(gadfly, 'connect', None):
gadfly.connect = gadfly.gadfly
return True
def startDB(self):
import gadfly
conn = gadfly.gadfly()
conn.startup(self.DB_NAME, self.DB_DIR)
# gadfly seems to want us to create something to get the db going
cursor = conn.cursor()
cursor.execute("create table x (x integer)")
conn.commit()
conn.close()
def getPoolArgs(self):
args = ('gadfly', self.DB_NAME, self.DB_DIR)
kw = {'cp_max': 1}
return args, kw
class SQLiteConnector(DBTestConnector):
TEST_PREFIX = 'SQLite'
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
try: import sqlite
except: return False
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ('sqlite',)
kw = {'database': self.database, 'cp_max': 1}
return args, kw
class PyPgSQLConnector(DBTestConnector):
TEST_PREFIX = "PyPgSQL"
def can_connect(self):
try: from pyPgSQL import PgSQL
except: return False
try:
conn = PgSQL.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('pyPgSQL.PgSQL',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class PsycopgConnector(DBTestConnector):
TEST_PREFIX = 'Psycopg'
def can_connect(self):
try: import psycopg
except: return False
try:
conn = psycopg.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('psycopg',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class MySQLConnector(DBTestConnector):
TEST_PREFIX = 'MySQL'
trailing_spaces_ok = False
can_rollback = False
early_reconnect = False
def can_connect(self):
try: import MySQLdb
except: return False
try:
conn = MySQLdb.connect(db=self.DB_NAME, user=self.DB_USER,
passwd=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('MySQLdb',)
kw = {'db': self.DB_NAME, 'user': self.DB_USER, 'passwd': self.DB_PASS}
return args, kw
class FirebirdConnector(DBTestConnector):
TEST_PREFIX = 'Firebird'
test_failures = False # failure testing causes problems
escape_slashes = False
good_sql = None # firebird doesn't handle failed sql well
can_clear = False # firebird is not so good
num_iterations = 5 # slow
def can_connect(self):
try: import kinterbasdb
except: return False
try:
self.startDB()
self.stopDB()
return True
except:
return False
def startDB(self):
import kinterbasdb
self.DB_NAME = os.path.join(self.DB_DIR, DBTestConnector.DB_NAME)
os.chmod(self.DB_DIR, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
sql = 'create database "%s" user "%s" password "%s"'
sql %= (self.DB_NAME, self.DB_USER, self.DB_PASS);
conn = kinterbasdb.create_database(sql)
conn.close()
def getPoolArgs(self):
args = ('kinterbasdb',)
kw = {'database': self.DB_NAME, 'host': '127.0.0.1',
'user': self.DB_USER, 'password': self.DB_PASS}
return args, kw
def stopDB(self):
import kinterbasdb
conn = kinterbasdb.connect(database=self.DB_NAME,
host='127.0.0.1', user=self.DB_USER,
password=self.DB_PASS)
conn.drop_database()
def makeSQLTests(base, suffix, globals):
"""
Make a test case for every db connector which can connect.
@param base: Base class for test case. Additional base classes
will be a DBConnector subclass and unittest.TestCase
@param suffix: A suffix used to create test case names. Prefixes
are defined in the DBConnector subclasses.
"""
connectors = [GadflyConnector, SQLiteConnector, PyPgSQLConnector,
PsycopgConnector, MySQLConnector, FirebirdConnector]
for connclass in connectors:
name = connclass.TEST_PREFIX + suffix
klass = new.classobj(name, (connclass, base, unittest.TestCase), base.__dict__)
globals[name] = klass
# GadflyADBAPITestCase SQLiteADBAPITestCase PyPgSQLADBAPITestCase
# PsycopgADBAPITestCase MySQLADBAPITestCase FirebirdADBAPITestCase
makeSQLTests(ADBAPITestBase, 'ADBAPITestCase', globals())
# GadflyReconnectTestCase SQLiteReconnectTestCase PyPgSQLReconnectTestCase
# PsycopgReconnectTestCase MySQLReconnectTestCase FirebirdReconnectTestCase
makeSQLTests(ReconnectTestBase, 'ReconnectTestCase', globals())
class DeprecationTestCase(unittest.TestCase):
"""
Test deprecations in twisted.enterprise.adbapi
"""
def test_safe(self):
"""
Test deprecation of twisted.enterprise.adbapi.safe()
"""
result = self.callDeprecated(_unreleasedVersion,
safe, "test'")
# make sure safe still behaves like the original
self.assertEqual(result, "test''")
class FakePool(object):
"""
A fake L{ConnectionPool} for tests.
@ivar connectionFactory: factory for making connections returned by the
C{connect} method.
@type connectionFactory: any callable
"""
reconnect = True
noisy = True
def __init__(self, connectionFactory):
self.connectionFactory = connectionFactory
def connect(self):
"""
Return an instance of C{self.connectionFactory}.
"""
return self.connectionFactory()
def disconnect(self, connection):
"""
Do nothing.
"""
class ConnectionTestCase(unittest.TestCase):
"""
Tests for the L{Connection} class.
"""
def test_rollbackErrorLogged(self):
"""
If an error happens during rollback, L{ConnectionLost} is raised but
the original error is logged.
"""
class ConnectionRollbackRaise(object):
def rollback(self):
raise RuntimeError("problem!")
pool = FakePool(ConnectionRollbackRaise)
connection = Connection(pool)
self.assertRaises(ConnectionLost, connection.rollback)
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
class TransactionTestCase(unittest.TestCase):
"""
Tests for the L{Transaction} class.
"""
def test_reopenLogErrorIfReconnect(self):
"""
If the cursor creation raises an error in L{Transaction.reopen}, it
reconnects but log the error occurred.
"""
class ConnectionCursorRaise(object):
count = 0
def reconnect(self):
pass
def cursor(self):
if self.count == 0:
self.count += 1
raise RuntimeError("problem!")
pool = FakePool(None)
transaction = Transaction(pool, ConnectionCursorRaise())
transaction.reopen()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
class NonThreadPool(object):
def callInThreadWithCallback(self, onResult, f, *a, **kw):
success = True
try:
result = f(*a, **kw)
except Exception, e:
success = False
result = Failure()
onResult(success, result)
class DummyConnectionPool(ConnectionPool):
"""
A testable L{ConnectionPool};
"""
threadpool = NonThreadPool()
def __init__(self):
"""
Don't forward init call.
"""
self.reactor = reactor
class EventReactor(object):
"""
Partial L{IReactorCore} implementation with simple event-related
methods.
@ivar _running: A C{bool} indicating whether the reactor is pretending
to have been started already or not.
@ivar triggers: A C{list} of pending system event triggers.
"""
def __init__(self, running):
self._running = running
self.triggers = []
def callWhenRunning(self, function):
if self._running:
function()
else:
return self.addSystemEventTrigger('after', 'startup', function)
def addSystemEventTrigger(self, phase, event, trigger):
handle = (phase, event, trigger)
self.triggers.append(handle)
return handle
def removeSystemEventTrigger(self, handle):
self.triggers.remove(handle)
class ConnectionPoolTestCase(unittest.TestCase):
"""
Unit tests for L{ConnectionPool}.
"""
def test_runWithConnectionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runWithConnection} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
def raisingFunction(connection):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
d = pool.runWithConnection(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_closeLogError(self):
"""
L{ConnectionPool._close} logs exceptions.
"""
class ConnectionCloseRaise(object):
def close(self):
raise RuntimeError("problem!")
pool = DummyConnectionPool()
pool._close(ConnectionCloseRaise())
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
def test_runWithInteractionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runInteraction} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
class DummyTransaction(object):
def __init__(self, pool, connection):
pass
def raisingFunction(transaction):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
pool.transactionFactory = DummyTransaction
d = pool.runInteraction(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_unstartedClose(self):
"""
If L{ConnectionPool.close} is called without L{ConnectionPool.start}
having been called, the pool's startup event is cancelled.
"""
reactor = EventReactor(False)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a startup trigger waiting.
self.assertEquals(reactor.triggers, [('after', 'startup', pool._start)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
def test_startedClose(self):
"""
If L{ConnectionPool.close} is called after it has been started, but
not by its shutdown trigger, the shutdown trigger is cancelled.
"""
reactor = EventReactor(True)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a shutdown trigger waiting.
self.assertEquals(reactor.triggers, [('during', 'shutdown', pool.finalClose)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
| eunchong/build | third_party/twisted_10_2/twisted/test/test_adbapi.py | Python | bsd-3-clause | 26,501 |
print(xrange(<warning descr="Parameter 'start' unfilled">)</warning>)
print(xrange(1))
print(xrange(1, 2))
print(xrange(1, 2, 3))
print(xrange(1, 2, 3, <warning descr="Unexpected argument">4</warning>))
| akosyakov/intellij-community | python/testData/inspections/PyArgumentListInspection/xRange.py | Python | apache-2.0 | 203 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_location
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP location information.
description:
- Manages SNMP location configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
location:
description:
- Location information.
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp location is configured
- nxos_snmp_location:
location: Test
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure snmp location is not configured
- nxos_snmp_location:
location: Test
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "New_Test"}
existing:
description: k/v pairs of existing snmp location
returned: always
type: dict
sample: {"location": "Test"}
end_state:
description: k/v pairs of location info after module execution
returned: always
type: dict
sample: {"location": "New_Test"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-server location New_Test"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_location(module):
location = {}
location_regex = '.*snmp-server\slocation\s(?P<location>\S+).*'
command = 'show run snmp'
body = execute_show_command(command, module, command_type='cli_show_ascii')
try:
match_location = re.match(location_regex, body[0], re.DOTALL)
group_location = match_location.groupdict()
location['location'] = group_location["location"]
except (AttributeError, TypeError):
location = {}
return location
def main():
argument_spec = dict(
location=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
location = module.params['location']
state = module.params['state']
existing = get_snmp_location(module)
changed = False
commands = []
proposed = dict(location=location)
end_state = existing
if state == 'absent':
if existing and existing['location'] == location:
commands.append('no snmp-server location')
elif state == 'present':
if not existing or existing['location'] != location:
commands.append('snmp-server location {0}'.format(location))
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_location(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| tszym/ansible | lib/ansible/modules/network/nxos/nxos_snmp_location.py | Python | gpl-3.0 | 5,795 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Optimizer classes.
from tensorflow.contrib.keras.python.keras.optimizers import Adadelta
from tensorflow.contrib.keras.python.keras.optimizers import Adagrad
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras.optimizers import Adamax
from tensorflow.contrib.keras.python.keras.optimizers import Nadam
from tensorflow.contrib.keras.python.keras.optimizers import Optimizer
from tensorflow.contrib.keras.python.keras.optimizers import RMSprop
from tensorflow.contrib.keras.python.keras.optimizers import SGD
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.keras.python.keras.optimizers import deserialize
from tensorflow.contrib.keras.python.keras.optimizers import serialize
from tensorflow.contrib.keras.python.keras.optimizers import get
del absolute_import
del division
del print_function
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/keras/api/keras/optimizers/__init__.py | Python | bsd-2-clause | 1,718 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""See README for more information"""
from optparse import OptionParser
import os
import re
import sys
import unittest
# Our modules
import checkpatch
import command
import gitutil
import patchstream
import terminal
import test
parser = OptionParser()
parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
default=False, help='Display the README file')
parser.add_option('-c', '--count', dest='count', type='int',
default=-1, help='Automatically create patches from top n commits')
parser.add_option('-i', '--ignore-errors', action='store_true',
dest='ignore_errors', default=False,
help='Send patches email even if patch errors are found')
parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do a try run (create but don't email patches)")
parser.add_option('-s', '--start', dest='start', type='int',
default=0, help='Commit to start creating patches from (0 = HEAD)')
parser.add_option('-t', '--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Verbose output of errors and warnings')
parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store',
default=None, help='Output cc list for patch file (used by git)')
parser.add_option('--no-tags', action='store_false', dest='process_tags',
default=True, help="Don't process subject tags as aliaes")
parser.usage = """patman [options]
Create patches from commits in a branch, check them and email them as
specified by tags you place in the commits. Use -n to """
(options, args) = parser.parse_args()
# Run our meagre tests
if options.test:
import doctest
sys.argv = [sys.argv[0]]
suite = unittest.TestLoader().loadTestsFromTestCase(test.TestPatch)
result = unittest.TestResult()
suite.run(result)
suite = doctest.DocTestSuite('gitutil')
suite.run(result)
# TODO: Surely we can just 'print' result?
print result
for test, err in result.errors:
print err
for test, err in result.failures:
print err
# Called from git with a patch filename as argument
# Printout a list of additional CC recipients for this patch
elif options.cc_cmd:
fd = open(options.cc_cmd, 'r')
re_line = re.compile('(\S*) (.*)')
for line in fd.readlines():
match = re_line.match(line)
if match and match.group(1) == args[0]:
for cc in match.group(2).split(', '):
cc = cc.strip()
if cc:
print cc
fd.close()
elif options.full_help:
pager = os.getenv('PAGER')
if not pager:
pager = 'more'
fname = os.path.join(os.path.dirname(sys.argv[0]), 'README')
command.Run(pager, fname)
# Process commits, produce patches files, check them, email them
else:
gitutil.Setup()
if options.count == -1:
# Work out how many patches to send if we can
options.count = gitutil.CountCommitsToBranch() - options.start
col = terminal.Color()
if not options.count:
str = 'No commits found to process - please use -c flag'
print col.Color(col.RED, str)
sys.exit(1)
# Read the metadata from the commits
if options.count:
series = patchstream.GetMetaData(options.start, options.count)
cover_fname, args = gitutil.CreatePatches(options.start, options.count,
series)
# Fix up the patch files to our liking, and insert the cover letter
series = patchstream.FixPatches(series, args)
if series and cover_fname and series.get('cover'):
patchstream.InsertCoverLetter(cover_fname, series, options.count)
# Do a few checks on the series
series.DoChecks()
# Check the patches, and run them through 'git am' just to be sure
ok = checkpatch.CheckPatches(options.verbose, args)
if not gitutil.ApplyPatches(options.verbose, args,
options.count + options.start):
ok = False
# Email the patches out (giving the user time to check / cancel)
cmd = ''
if ok or options.ignore_errors:
cc_file = series.MakeCcFile(options.process_tags)
cmd = gitutil.EmailPatches(series, cover_fname, args,
options.dry_run, cc_file)
os.remove(cc_file)
# For a dry run, just show our actions as a sanity check
if options.dry_run:
series.ShowActions(args, cmd, options.process_tags)
| MarvellEmbeddedProcessors/u-boot-armada38x | tools/patman/patman.py | Python | gpl-2.0 | 5,394 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.encode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.contrib import ffmpeg
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class EncodeAudioOpTest(test.TestCase):
def _compareWavFiles(self, original, encoded):
"""Compares the important bits of two WAV files.
Some encoders will create a slightly different header to the WAV file.
This compares only the important bits of the header as well as the contents.
Args:
original: Contents of the original .wav file.
encoded: Contents of the new, encoded .wav file.
"""
self.assertLess(44, len(original))
self.assertLess(44, len(encoded))
self.assertEqual(original[:4], encoded[:4])
# Skip file size
self.assertEqual(original[8:16], encoded[8:16])
# Skip header size
self.assertEqual(original[20:36], encoded[20:36])
# Skip extra bits inserted by ffmpeg.
self.assertEqual(original[original.find(b'data'):],
encoded[encoded.find(b'data'):])
def testRoundTrip(self):
"""Reads a wav file, writes it, and compares them."""
with self.test_session():
path = os.path.join(resource_loader.get_data_files_path(),
'testdata/mono_10khz.wav')
with open(path, 'rb') as f:
original_contents = f.read()
audio_op = ffmpeg.decode_audio(
original_contents,
file_format='wav',
samples_per_second=10000,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=10000)
encoded_contents = encode_op.eval()
self._compareWavFiles(original_contents, encoded_contents)
if __name__ == '__main__':
test.main()
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/ffmpeg/encode_audio_op_test.py | Python | apache-2.0 | 2,597 |
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False, asvar=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
self.asvar = asvar
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_translated(lang_code):
english_name = translation.get_language_info(lang_code)['name']
return translation.ugettext(english_name)
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
The translated string can be stored in a variable using `asvar`::
{% blocktrans with bar=foo|filter boo=baz|filter asvar var %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
{{ var }}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
asvar = None
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
elif option == "asvar":
try:
value = remaining_bits.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the asvar option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(options['count'].items())[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed,
asvar=asvar)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
| vitaly4uk/django | django/templatetags/i18n.py | Python | bsd-3-clause | 18,976 |
a = "two"
"one %s three" % a | idea4bsd/idea4bsd | python/testData/refactoring/introduceVariable/substringInExpressionStatement.after.py | Python | apache-2.0 | 28 |
# test equality for classes/instances to other types
class A:
pass
class B:
pass
class C(A):
pass
print(A == None)
print(None == A)
print(A == A)
print(A() == A)
print(A() == A())
print(A == B)
print(A() == B)
print(A() == B())
print(A == C)
print(A() == C)
print(A() == C())
| mhoffma/micropython | tests/basics/equal_class.py | Python | mit | 295 |
#!/usr/bin/env python3
def __safe_call(f, a):
"""Call a function and capture all exceptions."""
try:
return f(a)
except Exception as e:
return "{}@{}({})".format(a.__class__.__name__, id(a), e)
def __safe_error(msg, a, b):
"""Generate the error message for assert_XX without causing an error."""
return "{} ({}) {} {} ({})".format(
__safe_call(str, a),
__safe_call(repr, a),
msg,
__safe_call(str, b),
__safe_call(repr, b),
)
def assert_eq(a, b, msg=None):
"""Assert equal with better error message."""
assert a == b, msg or __safe_error("!=", a, b)
def assert_not_in(needle, haystack, msg=None):
"""Assert equal with better error message."""
assert needle not in haystack, msg or __safe_error(
"already in", needle, haystack
)
def assert_is(a, b):
"""Assert is with better error message."""
assert a is b, __safe_error("is not", a, b)
def assert_type(
obj, cls, msg="{obj} ({obj!r}) should be a {cls}, not {objcls}"
):
"""Raise a type error if obj is not an instance of cls."""
if not isinstance(obj, cls):
raise TypeError(msg.format(obj=obj, objcls=type(obj), cls=cls))
def assert_type_or_none(obj, classes):
"""Raise a type error if obj is not an instance of cls or None."""
if obj is not None:
assert_type(obj, classes)
def assert_len_eq(lists):
"""Check all lists in a list are equal length"""
# Sanity check
max_len = max(len(p) for p in lists)
for i, p in enumerate(lists):
assert len(
p
) == max_len, "Length check failed!\nl[{}] has {} elements != {} ({!r})\n{!r}".format(
i, len(p), max_len, p, lists
)
| SymbiFlow/symbiflow-arch-defs | utils/lib/asserts.py | Python | isc | 1,746 |
from carapace import config
from carapace.sdk import registry
config.updateConfig()
registry.registerConfig(config)
| oubiwann/carapace | carapace/app/__init__.py | Python | mit | 118 |
logparser = r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2},\d{3})\s+' \
r'(DEBUG|ERROR|INFO)\s+\[(\w+):(\w+):?(\w+)?\]\s+(.+)$'
| the-zebulan/CodeWars | katas/kyu_6/parse_the_log.py | Python | mit | 134 |
'''dossier.fc Feature Collections
:class:`dossier.fc.FeatureCollection` provides
convenience methods for working with collections of features
such as :class:`dossier.fc.StringCounter`.
A feature collection provides ``__add__`` and ``__sub__`` so that
adding/subtracting FeatureCollections does the right thing for its
constituent features.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2015 Diffeo, Inc.
'''
from __future__ import absolute_import, division, print_function
try:
from collections import Counter
except ImportError:
from backport_collections import Counter
from collections import MutableMapping, Sequence
import copy
from itertools import chain, ifilter, imap
import logging
import operator
import cbor
import pkg_resources
import streamcorpus
from dossier.fc.exceptions import ReadOnlyException, SerializationError
from dossier.fc.feature_tokens import FeatureTokens, FeatureTokensSerializer
from dossier.fc.geocoords import GeoCoords, GeoCoordsSerializer
from dossier.fc.string_counter import StringCounterSerializer, StringCounter, \
NestedStringCounter, NestedStringCounterSerializer
from dossier.fc.vector import SparseVector, DenseVector
logger = logging.getLogger(__name__)
class FeatureCollectionChunk(streamcorpus.CborChunk):
'''
A :class:`FeatureCollectionChunk` provides a way to serialize
a colllection of feature collections to a single blob of data.
Here's an example that writes two feature collections to an existing
file handle::
fc1 = FeatureCollection({'NAME': {'foo': 2, 'baz': 1}})
fc2 = FeatureCollection({'NAME': {'foo': 4, 'baz': 2}})
fh = StringIO()
chunk = FeatureCollectionChunk(file_obj=fh, mode='wb')
chunk.add(fc1)
chunk.add(fc2)
chunk.flush()
And the blob created inside the buffer can be read from to produce
an iterable of the feature collections that were written::
fh = StringIO(fh.getvalue())
chunk = FeatureCollectionChunk(file_obj=fh, mode='rb')
rfc1, rfc2 = list(chunk)
assert fc1 == rfc1
assert fc2 == rfc2
'''
def __init__(self, *args, **kwargs):
kwargs['write_wrapper'] = FeatureCollection.to_dict
kwargs['read_wrapper'] = FeatureCollection.from_dict
kwargs['message'] = lambda x: x
super(FeatureCollectionChunk, self).__init__(*args, **kwargs)
class FeatureCollection(MutableMapping):
'''
A collection of features.
This is a dictionary from feature name to a
:class:`collections.Counter` or similar object. In typical use
callers will not try to instantiate individual dictionary elements,
but will fall back on the collection's default-value behavior::
fc = FeatureCollection()
fc['NAME']['John Smith'] += 1
The default default feature type is
:class:`~dossier.fc.StringCounter`.
**Feature collection construction and serialization:**
.. automethod:: __init__
.. automethod:: loads
.. automethod:: dumps
.. automethod:: from_dict
.. automethod:: to_dict
.. automethod:: register_serializer
**Feature collection values and attributes:**
.. autoattribute:: read_only
.. autoattribute:: generation
.. autoattribute:: DISPLAY_PREFIX
.. autoattribute:: EPHEMERAL_PREFIX
**Feature collection computation:**
.. automethod:: __add__
.. automethod:: __sub__
.. automethod:: __mul__
.. automethod:: __imul__
.. automethod:: total
.. automethod:: merge_with
'''
__slots__ = ['_features', '_read_only']
DISPLAY_PREFIX = '#'
'''Prefix on names of features that are human-readable.
Processing may convert a feature ``name`` to a similar feature
``#name`` that is human-readable, while converting the original
feature to a form that is machine-readable only; for instance,
replacing strings with integers for faster comparison.
'''
EPHEMERAL_PREFIX = '_'
'''Prefix on names of features that are not persisted.
:meth:`to_dict` and :meth:`dumps` will not write out features
that begin with this character.
'''
TOKEN_PREFIX = '@'
'''Prefix on names of features that contain tokens.'''
GEOCOORDS_PREFIX = '!'
'''Prefix on names of features that contain geocoords.'''
NESTED_PREFIX = '%'
'''Prefix on names of features based on NestedStringCounters'''
@staticmethod
def register_serializer(feature_type, obj):
'''
This is a **class** method that lets you define your own
feature type serializers. ``tag`` should be the name of
the feature type that you want to define serialization
for. Currently, the valid values are ``StringCounter``,
``Unicode``, ``SparseVector`` or ``DenseVector``.
Note that this function is not thread safe.
``obj`` must be an object with three attributes defined.
``obj.loads`` is a function that takes a CBOR created
Python data structure and returns a new feature counter.
``obj.dumps`` is a function that takes a feature counter
and returns a Python data structure that can be serialized
by CBOR.
``obj.constructor`` is a function with no parameters that
returns the Python ``type`` that can be used to construct
new features. It should be possible to call
``obj.constructor()`` to get a new and empty feature
counter.
'''
registry.add(feature_type, obj)
def __init__(self, data=None, read_only=False):
'''
Creates a new empty feature collection.
If ``data`` is a dictionary-like object with a structure similar
to that of a feature collection (i.e., a dict of multisets),
then it is used to initialize the feature collection.
'''
self._features = {}
self.read_only = read_only
if data is not None:
self._from_dict_update(data)
@classmethod
def loads(cls, data):
'''Create a feature collection from a CBOR byte string.'''
rep = cbor.loads(data)
if not isinstance(rep, Sequence):
raise SerializationError('expected a CBOR list')
if len(rep) != 2:
raise SerializationError('expected a CBOR list of 2 items')
metadata = rep[0]
if 'v' not in metadata:
raise SerializationError('no version in CBOR metadata')
if metadata['v'] != 'fc01':
raise SerializationError('invalid CBOR version {!r} '
'(expected "fc01")'
.format(metadata['v']))
read_only = metadata.get('ro', False)
contents = rep[1]
return cls.from_dict(contents, read_only=read_only)
def dumps(self):
'''Create a CBOR byte string from a feature collection.'''
metadata = {'v': 'fc01'}
if self.read_only:
metadata['ro'] = 1
rep = [metadata, self.to_dict()]
return cbor.dumps(rep)
@classmethod
def from_dict(cls, data, read_only=False):
'''Recreate a feature collection from a dictionary.
The dictionary is of the format dumped by :meth:`to_dict`.
Additional information, such as whether the feature collection
should be read-only, is not included in this dictionary, and
is instead passed as parameters to this function.
'''
fc = cls(read_only=read_only)
fc._features = {}
fc._from_dict_update(data)
return fc
def _from_dict_update(self, data):
for name, feat in data.iteritems():
if not isinstance(name, unicode):
name = name.decode('utf-8')
if name.startswith(self.EPHEMERAL_PREFIX):
self._features[name] = feat
continue
if isinstance(feat, cbor.Tag):
if feat.tag not in cbor_tags_to_names:
raise SerializationError(
'Unknown CBOR value (tag id: %d): %r'
% (feat.tag, feat.value))
loads = registry.get(cbor_tags_to_names[feat.tag]).loads
feat = feat.value
elif is_native_string_counter(feat):
loads = registry.get('StringCounter').loads
elif isinstance(feat, unicode):
# A Unicode string.
loads = registry.get('Unicode').loads
elif registry.is_serializeable(feat):
loads = lambda x: x
else:
raise SerializationError(
'Unknown CBOR value (type: %r): %r' % (type(feat), feat))
value = loads(feat)
if hasattr(value, 'read_only'):
value.read_only = self.read_only
self._features[name] = value
def to_dict(self):
'''Dump a feature collection's features to a dictionary.
This does not include additional data, such as whether
or not the collection is read-only. The returned dictionary
is suitable for serialization into JSON, CBOR, or similar
data formats.
'''
def is_non_native_sc(ty, encoded):
return (ty == 'StringCounter'
and not is_native_string_counter(encoded))
fc = {}
native = ('StringCounter', 'Unicode')
for name, feat in self._features.iteritems():
if name.startswith(self.EPHEMERAL_PREFIX):
continue
if not isinstance(name, unicode):
name = name.decode('utf-8')
tyname = registry.feature_type_name(name, feat)
encoded = registry.get(tyname).dumps(feat)
# This tomfoolery is to support *native untagged* StringCounters.
if tyname not in native or is_non_native_sc(tyname, encoded):
encoded = cbor.Tag(cbor_names_to_tags[tyname], encoded)
fc[name] = encoded
return fc
@property
def generation(self):
'''Get the generation number for this feature collection.
This is the highest generation number across all counters
in the collection, if the counters support generation
numbers. This collection has not changed if the generation
number has not changed.
'''
return max(getattr(collection, 'generation', 0)
for collection in self._features.itervalues())
def __repr__(self):
return 'FeatureCollection(%r)' % self._features
def __missing__(self, key):
default_tyname = FeatureTypeRegistry.get_default_feature_type_name(key)
if self.read_only:
# reaturn a read-only instance of the default type, of
# which there is one, because we'll only ever need one,
# because it's read-only.
return registry.get_read_only(default_tyname)
if key.startswith(self.EPHEMERAL_PREFIX):
# When the feature name starts with an ephemeral prefix, then
# we have no idea what it should be---anything goes. Therefore,
# the caller must set the initial value themselves (and we must
# be careful not to get too eager with relying on __missing__).
raise KeyError(key)
default_value = registry.get_constructor(default_tyname)()
self[key] = default_value
return default_value
def __contains__(self, key):
'''
Returns ``True`` if the feature named ``key`` is in this
FeatureCollection.
:type key: unicode
'''
return key in self._features
def merge_with(self, other, multiset_op, other_op=None):
'''Merge this feature collection with another.
Merges two feature collections using the given ``multiset_op``
on each corresponding multiset and returns a new
:class:`FeatureCollection`. The contents of the two original
feature collections are not modified.
For each feature name in both feature sets, if either feature
collection being merged has a :class:`collections.Counter`
instance as its value, then the two values are merged by
calling `multiset_op` with both values as parameters. If
either feature collection has something other than a
:class:`collections.Counter`, and `other_op` is not
:const:`None`, then `other_op` is called with both values to
merge them. If `other_op` is :const:`None` and a feature
is not present in either feature collection with a counter
value, then the feature will not be present in the result.
:param other: The feature collection to merge into ``self``.
:type other: :class:`FeatureCollection`
:param multiset_op: Function to merge two counters
:type multiset_op: fun(Counter, Counter) -> Counter
:param other_op: Function to merge two non-counters
:type other_op: fun(object, object) -> object
:rtype: :class:`FeatureCollection`
'''
result = FeatureCollection()
for ms_name in set(self._counters()) | set(other._counters()):
c1 = self.get(ms_name, None)
c2 = other.get(ms_name, None)
if c1 is None and c2 is not None:
c1 = c2.__class__()
if c2 is None and c1 is not None:
c2 = c1.__class__()
result[ms_name] = multiset_op(c1, c2)
if other_op is not None:
for o_name in (set(self._not_counters()) |
set(other._not_counters())):
v = other_op(self.get(o_name, None), other.get(o_name, None))
if v is not None:
result[o_name] = v
return result
def __add__(self, other):
'''
Add features from two FeatureCollections.
>>> fc1 = FeatureCollection({'foo': Counter('abbb')})
>>> fc2 = FeatureCollection({'foo': Counter('bcc')})
>>> fc1 + fc2
FeatureCollection({'foo': Counter({'b': 4, 'c': 2, 'a': 1})})
Note that if a feature in either of the collections is not an
instance of :class:`collections.Counter`, then it is ignored.
'''
return self.merge_with(other, operator.add)
def __iadd__(self, other):
'''
In-place add features from two FeatureCollections.
>>> fc1 = FeatureCollection({'foo': Counter('abbb')})
>>> fc2 = FeatureCollection({'foo': Counter('bcc')})
>>> fc1 += fc2
FeatureCollection({'foo': Counter({'b': 4, 'c': 2, 'a': 1})})
Note that if a feature in either of the collections is not an
instance of :class:`collections.Counter`, then it is ignored.
'''
if self.read_only:
raise ReadOnlyException()
fc = self.merge_with(other, operator.iadd)
self._features = fc._features
return self
def __sub__(self, other):
'''
Subtract features from two FeatureCollections.
>>> fc1 = FeatureCollection({'foo': Counter('abbb')})
>>> fc2 = FeatureCollection({'foo': Counter('bcc')})
>>> fc1 - fc2
FeatureCollection({'foo': Counter({'b': 2, 'a': 1})})
Note that if a feature in either of the collections is not an
instance of :class:`collections.Counter`, then it is ignored.
'''
return self.merge_with(other, operator.sub)
def __isub__(self, other):
'''
In-place subtract features from two FeatureCollections.
>>> fc1 = FeatureCollection({'foo': Counter('abbb')})
>>> fc2 = FeatureCollection({'foo': Counter('bcc')})
>>> fc1 -= fc2
FeatureCollection({'foo': Counter({'b': 2, 'a': 1})})
Note that if a feature in either of the collections is not an
instance of :class:`collections.Counter`, then it is ignored.
'''
if self.read_only:
raise ReadOnlyException()
fc = self.merge_with(other, operator.isub)
self._features = fc._features
return self
def __mul__(self, coef):
fc = copy.deepcopy(self)
fc *= coef
return fc
def __imul__(self, coef):
'''In-place multiplication by a scalar.'''
if self.read_only:
raise ReadOnlyException()
if coef == 1:
return self
for name in self._counters():
self[name] *= coef
return self
def total(self):
'''
Returns sum of all counts in all features that are multisets.
'''
feats = imap(lambda name: self[name], self._counters())
return sum(chain(*map(lambda mset: map(abs, mset.values()), feats)))
def __getitem__(self, key):
'''
Returns the feature named ``key``. If ``key`` does not
exist, then a new empty :class:`StringCounter` is created.
Alternatively, a second optional positional argument can
be given that is used as a default value.
Note that traditional indexing is supported too, e.g.::
fc = FeatureCollection({'NAME': {'foo': 1}})
assert fc['NAME'] == StringCounter({'foo': 1})
:type key: unicode
'''
v = self._features.get(key)
if v is not None:
return v
return self.__missing__(key)
def get(self, key, default=None):
if key not in self:
return default
else:
return self._features[key]
def __setitem__(self, key, value):
if self.read_only:
raise ReadOnlyException()
if not isinstance(key, unicode):
if isinstance(key, str):
key = unicode(key)
else:
raise TypeError(key)
if hasattr(value, 'read_only'):
value.read_only = self.read_only
self._features[key] = value
def __delitem__(self, key):
'''
Deletes the feature named ``key`` from this feature collection.
:type key: unicode
'''
if self.read_only:
raise ReadOnlyException()
if key in self._features:
del self._features[key]
else:
raise KeyError(key)
def _counters(self):
'''
Returns a generator of ``feature_name`` where ``feature_name``
corresponds to a feature that is an instance of
:class:`collections.Counter`. This method is useful when you
need to perform operations only on features that are multisets.
'''
return ifilter(lambda n: is_counter(self[n]), self)
def _not_counters(self):
'''
Returns a generator of ``feature_name`` where ``feature_name``
corresponds to a feature that is **not** an instance of
:class:`collections.Counter`. This method is useful when you
need to perform operations only on features that are **not**
multisets.
'''
return ifilter(lambda n: not is_counter(self[n]), self)
def __iter__(self):
return self._features.iterkeys()
def __len__(self):
'''
Returns the number of features in this collection.
'''
return len(set(iter(self)))
@property
def read_only(self):
'''Flag if this feature collection is read-only.
When a feature collection is read-only, no part of it
can be modified. Individual feature counters cannot
be added, deleted, or changed. This attribute is
preserved across serialization and deserialization.
'''
return self._read_only
@read_only.setter
def read_only(self, ro):
self._read_only = ro
for (k, v) in self._features.iteritems():
if hasattr(v, 'read_only'):
v.read_only = ro
class FeatureTypeRegistry (object):
'''
This is a pretty bogus class that has exactly one instance. Its
purpose is to guarantee the correct lazy loading of entry points
into the registry.
'''
ENTRY_POINT_GROUP = 'dossier.fc.feature_types'
DEFAULT_FEATURE_TYPE_NAME = 'StringCounter'
DEFAULT_FEATURE_TYPE_PREFIX_NAMES = {
FeatureCollection.DISPLAY_PREFIX: 'StringCounter',
FeatureCollection.EPHEMERAL_PREFIX: 'StringCounter',
FeatureCollection.TOKEN_PREFIX: 'FeatureTokens',
FeatureCollection.GEOCOORDS_PREFIX: 'GeoCoords',
FeatureCollection.NESTED_PREFIX: 'NestedStringCounter',
}
@classmethod
def get_default_feature_type_name(cls, feature_name):
by_prefix = cls.DEFAULT_FEATURE_TYPE_PREFIX_NAMES.get(feature_name[0])
if by_prefix is None:
return cls.DEFAULT_FEATURE_TYPE_NAME
else:
return by_prefix
def __init__(self):
self._registry = {}
self._inverse = {}
self._entry_points = False
def add(self, name, obj):
'''Register a new feature serializer.
The feature type should be one of the fixed set of feature
representations, and `name` should be one of ``StringCounter``,
``SparseVector``, or ``DenseVector``. `obj` is a describing
object with three fields: `constructor` is a callable that
creates an empty instance of the representation; `dumps` is
a callable that takes an instance of the representation and
returns a JSON-compatible form made of purely primitive
objects (lists, dictionaries, strings, numbers); and `loads`
is a callable that takes the response from `dumps` and recreates
the original representation.
Note that ``obj.constructor()`` *must* return an
object that is an instance of one of the following
types: ``unicode``, :class:`dossier.fc.StringCounter`,
:class:`dossier.fc.SparseVector` or
:class:`dossier.fc.DenseVector`. If it isn't, a
:exc:`ValueError` is raised.
'''
ro = obj.constructor()
if name not in cbor_names_to_tags:
print(name)
raise ValueError(
'Unsupported feature type name: "%s". '
'Allowed feature type names: %r'
% (name, cbor_names_to_tags.keys()))
if not is_valid_feature_instance(ro):
raise ValueError(
'Constructor for "%s" returned "%r" which has an unknown '
'sub type "%r". (mro: %r). Object must be an instance of '
'one of the allowed types: %r'
% (name, ro, type(ro), type(ro).mro(), ALLOWED_FEATURE_TYPES))
self._registry[name] = {'obj': obj, 'ro': obj.constructor()}
self._inverse[obj.constructor] = name
def feature_type_name(self, feat_name, obj):
ty = type(obj)
if ty not in self._inverse:
raise SerializationError(
'Python type "%s" is not in the feature type registry: %r\n\n'
'The offending feature (%s): %r'
% (ty, self._inverse, feat_name, obj))
return self._inverse[ty]
def is_serializeable(self, obj):
return type(obj) in self._inverse
def _get(self, type_name):
self._load_entry_points()
if type_name not in self._registry:
raise SerializationError(
'Feature type %r not in feature type registry: %r'
% (type_name, self._registry))
return self._registry[type_name]
def get(self, type_name):
return self._get(type_name)['obj']
def get_constructor(self, type_name):
return self._get(type_name)['obj'].constructor
def get_read_only(self, type_name):
return self._get(type_name)['ro']
def types(self):
self._load_entry_points()
return self._registry.keys()
def _load_entry_points(self):
if self._entry_points:
return
self._entry_points = True
for epoint in pkg_resources.iter_entry_points(self.ENTRY_POINT_GROUP):
try:
obj = epoint.load()
except (ImportError, pkg_resources.DistributionNotFound):
import traceback
logger.warn(traceback.format_exc())
continue
self.add(epoint.name, obj)
def _reset(self):
self._entry_points = False
self._registry = {}
self._inverse = {}
self.add('StringCounter', StringCounterSerializer)
self.add('Unicode', UnicodeSerializer)
self.add('GeoCoords', GeoCoordsSerializer)
self.add('FeatureTokens', FeatureTokensSerializer)
self.add('NestedStringCounter', NestedStringCounterSerializer)
def __enter__(self):
return self
def __exit__(self, *args):
self._reset()
class UnicodeSerializer(object):
def __init__(self):
raise NotImplementedError()
# cbor natively supports Unicode, so we can use the identity function.
loads = staticmethod(lambda x: x)
dumps = staticmethod(lambda x: x)
constructor = unicode
cbor_names_to_tags = {
# Tagged just in case someone wants to changed the binary format.
# By default, FeatureCollection will deserialize it as a special case
# into a dict of {unicode |--> int} with no tag.
'StringCounter': 55800,
# Not tagged because CBOR supports it natively.
'Unicode': None,
# These are *always* tagged.
'SparseVector': 55801,
'DenseVector': 55802,
# 55803 was FeatureOffsets
'FeatureTokens': 55804,
'GeoCoords': 55805,
'NestedStringCounter': 55806,
}
cbor_tags_to_names = {}
for k, v in cbor_names_to_tags.items():
if v:
cbor_tags_to_names[v] = k
ALLOWED_FEATURE_TYPES = (
unicode, StringCounter, SparseVector, DenseVector,
FeatureTokens,
GeoCoords, NestedStringCounter,
)
def is_native_string_counter(cbor_data):
return isinstance(cbor_data, dict)
def is_counter(obj):
return isinstance(obj, Counter) \
or getattr(obj, 'is_counter', False)
def is_valid_feature_instance(obj):
return isinstance(obj, ALLOWED_FEATURE_TYPES)
registry = FeatureTypeRegistry()
registry._reset()
| dossier/dossier.fc | python/dossier/fc/feature_collection.py | Python | mit | 26,206 |
#!/usr/local/bin/python3
import argparse
import gym
import logging
import tensorflow as tf
from dqn_agent import DQNAgent
from network import Network
from replay_memory import ReplayMemory
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--log_level', default='INFO', help='Log verbosity')
# Environment
parser.add_argument('--env', default='CartPole-v0',
help='OpenAI Gym environment name')
parser.add_argument('--monitor', action='store_true',
help='Turn on OpenAI Gym monitor')
parser.add_argument('--monitor_path', default='/tmp/gym',
help='Path for OpenAI Gym monitor logs')
parser.add_argument('--disable_video', action='store_true',
help='Disable video recording while when running the monitor')
# Network
parser.add_argument('--network', default='simple', choices=['simple', 'cnn'],
help='Network architecture type')
parser.add_argument('--lr', default=0.001, type=float, help='Learning rate')
parser.add_argument('--reg_param', default=0.001, type=float,
help='Regularization param')
parser.add_argument('--optimizer', default='sgd',
choices=['adadelta', 'adagrad', 'adam', 'ftrl', 'sgd', 'momentum', 'rmsprop'],
help='Type of optimizer for gradient descent')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for MomentumOptimizer')
parser.add_argument('--rmsprop_decay', default=0.95, type=float,
help='Decay for RMSPropOptimizer')
# Agent
parser.add_argument('--num_episodes', default=10000, type=int,
help='Number of episodes to train')
parser.add_argument('--max_steps_per_episode', default=1000, type=int,
help='Max steps to train for each episode')
parser.add_argument('--minibatch_size', default=30, type=int,
help='Minibatch size for each training step')
parser.add_argument('--frames_per_state', default=1, type=int,
help='Number of consecutive frames that form a state')
parser.add_argument('--resize_width', default=0, type=int,
help='Resized screen width for frame pre-processing (0 for no resizing)')
parser.add_argument('--resize_height', default=0, type=int,
help='Resized screen height for frame pre-processing (0 for no resizing)')
parser.add_argument('--reward_discount', default=0.9, type=float,
help='Discount factor for future rewards')
parser.add_argument('--replay_memory_capacity', default=10000, type=int,
help='Max size of the memory for experience replay')
parser.add_argument('--replay_start_size', default=0, type=int,
help='Size to prefill the replay memory to based on random actions')
parser.add_argument('--init_random_action_prob', default=0.9, type=float,
help='Initial probability for choosing random actions')
parser.add_argument('--min_random_action_prob', default=0.1, type=float,
help='Threshold at which to stop decaying random action probability')
parser.add_argument('--random_action_explore_steps', default=10000, type=int,
help='Number of steps over which to decay the random action probability')
parser.add_argument('--update_freq', default=1, type=int,
help='Number of actions by the agent between successive learning updates')
parser.add_argument('--target_update_freq', default=10000, type=int,
help='Target network update frequency in terms of number of training steps')
# Distribution
parser.add_argument('--ps_hosts', default='',
help='Parameter Servers. Comma separated list of host:port pairs')
parser.add_argument('--worker_hosts', default='localhost:0',
help='Worker hosts. Comma separated list of host:port pairs')
parser.add_argument('--job', default='worker', choices=['ps', 'worker'],
help='Whether this instance is a param server or a worker')
parser.add_argument('--task_id', default=0, type=int,
help='Index of this task within the job')
parser.add_argument('--gpu_id', default=0, type=int,
help='Index of the GPU to run the training on')
parser.add_argument('--sync', action='store_true',
help='Whether to perform synchronous training')
parser.add_argument('--disable_cpu_param_pinning', action='store_true',
help='If set, param server will not pin the varaibles to CPU, allowing '
'TensorFlow to default to GPU:0 if the host has GPU devices')
parser.add_argument('--disable_target_replication', action='store_true',
help='Unless set, the target params will be replicated on each GPU. '
'Setting the flag defaults to a single set of target params managed '
'by the param server.')
# Summary
parser.add_argument('--logdir', default='/tmp/train_logs',
help='Directory for training summary and logs')
parser.add_argument('--summary_freq', default=100, type=int,
help='Frequency for writing summary (in terms of global steps')
return parser.parse_args()
def run_worker(cluster, server, args):
env = gym.make(args.env)
worker_job = args.job
num_workers = len(cluster.job_tasks(worker_job))
ps_device = None
if num_workers > 1 and not args.disable_cpu_param_pinning:
# If in a distributed setup, have param servers pin params to CPU.
# Otherwise, in a multi-GPU setup, /gpu:0 would be used for params
# by default, and it becomes a communication bottleneck.
ps_device = '/cpu'
# If no GPU devices are found, then allow_soft_placement in the
# config below results in falling back to CPU.
worker_device = '/job:%s/task:%d/gpu:%d' % \
(worker_job, args.task_id, args.gpu_id)
replica_setter = tf.train.replica_device_setter(
worker_device=worker_device,
cluster=cluster,
)
with tf.device(replica_setter):
network = Network.create_network(
config=args,
input_shape=DQNAgent.get_input_shape(env, args),
num_actions=env.action_space.n,
num_replicas=num_workers,
ps_device=ps_device,
worker_device=worker_device,
)
init_op = tf.initialize_all_variables()
# Designate the first worker task as the chief
is_chief = (args.task_id == 0)
# Create a Supervisor that oversees training and co-ordination of workers
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=args.logdir,
init_op=init_op,
global_step=network.global_step,
summary_op=None, # Explicitly disable as DQNAgent handles summaries
recovery_wait_secs=3,
)
# Start the gym monitor if needed
video = False if args.disable_video else None
if args.monitor:
env.monitor.start(args.monitor_path, resume=True, video_callable=video)
# Initialize memory for experience replay
replay_memory = ReplayMemory(args.replay_memory_capacity)
# Start the session and kick-off the train loop
config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
with sv.managed_session(server.target, config=config) as session:
dqn_agent = DQNAgent(
env, network, session, replay_memory, args,
enable_summary=is_chief, # Log summaries only from the chief worker
)
dqn_agent.train(args.num_episodes, args.max_steps_per_episode, sv)
# Close the gym monitor
if args.monitor:
env.monitor.close()
# Stop all other services
sv.stop()
if __name__ == '__main__':
args = parse_args()
logging.getLogger().setLevel(args.log_level)
ps_hosts = args.ps_hosts.split(',') if args.ps_hosts else []
worker_hosts = args.worker_hosts.split(',') if args.worker_hosts else []
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
# Create a TensorFlow server that acts either as a param server or
# as a worker. For non-distributed setup, we still create a single
# instance cluster without any --ps_hosts and one item in --worker_hosts
# that corresponds to localhost.
server = tf.train.Server(
cluster,
job_name=args.job,
task_index=args.task_id
)
if args.job == 'ps':
# Param server
server.join()
elif args.job == 'worker':
# Start the worker and run the train loop
run_worker(cluster, server, args)
| viswanathgs/dist-dqn | src/main.py | Python | mit | 7,997 |
import asynctest
from asynctest.mock import patch
import json
class TestRundeckJob(asynctest.TestCase):
def setUp(self):
patcher1 = patch('charlesbot_rundeck.rundeck_job.http_get_request')
self.addCleanup(patcher1.stop)
self.mock_http_get_request = patcher1.start()
from charlesbot_rundeck.rundeck_job import RundeckJob
self.rd_job = RundeckJob()
def test_invalid_rundeck_json_response(self):
self.mock_http_get_request.side_effect = ["{}"]
success = yield from self.rd_job.retrieve_rundeck_job_info(
"token",
"baseurl",
"project name",
"job name"
)
self.assertFalse(success)
def test_empty_rundeck_response(self):
self.mock_http_get_request.side_effect = ["[]"]
success = yield from self.rd_job.retrieve_rundeck_job_info(
"token",
"baseurl",
"project name",
"job name"
)
self.assertFalse(success)
def test_single_rundeck_response(self):
response = [
{
"id": "rd1",
"name": "rundeckone",
}
]
self.mock_http_get_request.side_effect = [json.dumps(response)]
success = yield from self.rd_job.retrieve_rundeck_job_info(
"token",
"baseurl",
"project name",
"job name"
)
self.assertTrue(success)
self.assertEqual(self.rd_job.id, "rd1")
self.assertEqual(self.rd_job.name, "rundeckone")
self.assertEqual(self.rd_job.friendly_name, "")
def test_multiple_rundeck_responses(self):
response = [
{
"id": "rd1",
"name": "rundeckone",
},
{
"id": "rd2",
"name": "rundecktwo",
}
]
self.mock_http_get_request.side_effect = [json.dumps(response)]
success = yield from self.rd_job.retrieve_rundeck_job_info(
"token",
"baseurl",
"project name",
"job name"
)
self.assertFalse(success)
| marvinpinto/charlesbot-rundeck | tests/test_rundeck_job.py | Python | mit | 2,175 |
import re
import struct
from traitlets import Bytes, Unicode, TraitError
# reference to https://stackoverflow.com/a/385597/1338797
float_re = r'''
(?:
[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc.
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc.
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?
)
'''
stl_re = r'''
solid .* \n # header
(?:
\s* facet \s normal (?: \s ''' + float_re + r''' ){3}
\s* outer \s loop
(?:
\s* vertex (?: \s ''' + float_re + r''' ){3}
){3}
\s* endloop
\s* endfacet
) + # at least 1 facet.
\s* endsolid (?: .*)?
\s* $ # allow trailing WS
'''
ascii_stl = re.compile(stl_re, re.VERBOSE)
class AsciiStlData(Unicode):
def validate(self, owner, stl):
stl = super(AsciiStlData, self).validate(owner, stl)
if ascii_stl.match(stl) is None:
raise TraitError('Given string is not valid ASCII STL data.')
return stl
class BinaryStlData(Bytes):
HEADER = 80
COUNT_SIZE = 4
FACET_SIZE = 50
def validate(self, owner, stl):
stl = super(BinaryStlData, self).validate(owner, stl)
if len(stl) < self.HEADER + self.COUNT_SIZE:
raise TraitError(
'Given bytestring is too short ({}) for Binary STL data.'
.format(len(stl))
)
(num_facets,) = struct.unpack('<I', stl[self.HEADER : self.HEADER + self.COUNT_SIZE])
expected_size = self.HEADER + self.COUNT_SIZE + num_facets * self.FACET_SIZE
if len(stl) != expected_size:
raise TraitError(
'Given bytestring has wrong length ({}) for Binary STL data. '
'For {} facets {} bytes were expected.'
.format(len(stl), num_facets, expected_size)
)
return stl
| K3D-tools/K3D-jupyter | k3d/validation/stl.py | Python | mit | 1,932 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Sphinx configuration."""
from __future__ import print_function
import os
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ['image.nonlocal_uri']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
extlinks = {
'source': ('https://github.com/inveniosoftware/invenio-search-ui/tree/master/%s', 'source')
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio-Search-UI'
copyright = u'2015, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join(os.path.dirname(__file__), '..',
'invenio_search_ui', 'version.py'),
'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'description': 'UI for Invenio-Search.',
'github_user': 'inveniosoftware',
'github_repo': 'invenio-search-ui',
'github_button': False,
'github_banner': True,
'show_powered_by': False,
'extra_nav_links': {
'invenio-search-ui@GitHub': 'https://github.com/inveniosoftware/invenio-search-ui',
'invenio-search-ui@PyPI': 'https://pypi.python.org/pypi/invenio-search-ui/',
}
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-search-ui_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio-search-ui.tex', u'invenio-search-ui Documentation',
u'CERN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio-search-ui', u'invenio-search-ui Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio-search-ui', u'Invenio-Search-UI Documentation',
author, 'invenio-search-ui', 'UI for Invenio-Search.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Autodoc configuraton.
autoclass_content = 'both'
| inveniosoftware/invenio-search-ui | docs/conf.py | Python | mit | 10,173 |
# -*- coding: utf-8 -*-
"""
Send many mails
===============
Send mail to students. The script consider two columns from
one spreadsheets. The first one is about the receivers,
the second one about the customized message for each of them.
"""
#########################################
# logging et import
import os
import warnings
from pyquickhelper.loghelper import fLOG
fLOG(OutputPrint=True)
from ensae_teaching_cs.automation_students import enumerate_feedback, enumerate_send_email
import pymmails
#########################################
# On définit le contenu du mail.
cc = []
sujet = "Question de code - ENSAE 1A - projet de programmation"
col_name = None # "mail"
col_mail = "mail"
columns = ["sujet", "Question de code"]
col_group = "groupe"
delay_sending = False
test_first_mail = False # regarder le premier mail avant de les envoyer
skip = 0 # to start again after a failure
only = None
skiprows = 1
folder = os.path.normpath(os.path.abspath(os.path.join(
*([os.path.dirname(__file__)] + ([".."] * 5) + ["_data", "ecole", "ENSAE", "2017-2018", "1A_projet"]))))
student = os.path.join(folder, "ENSAE 1A - projet python.xlsx")
if not os.path.exists(student):
raise FileNotFoundError(student)
##############################
# début commun
begin = """
Bonjour,
""" + \
"""
Bonjour,
Voici les questions de code. Celles-ci sont
extraites des programmes que vous m'avez transmis.
Les noms de fonctions que j'utilise y font référence
quand je ne recopie pas le code. La réponse est souvent
liée à la performance.
""".replace("\n", " ")
#####################
# fin commune
end = """
""".replace("\n", " ") + \
"""
Xavier
"""
###################################
# Lecture des de la feuille Excel
import pandas
df = pandas.read_excel(student, sheet_name=0,
skiprows=skiprows, engine='openpyxl')
if len(df.columns) < 4:
raise ValueError("Probably an issue while reading the spreadsheet:\n{0}\n{1}".format(
df.columns, df.head()))
if len(" ".join(df.columns).split("Unnamed")) > 4:
raise ValueError("Probably an issue while reading the spreadsheet:\n{0}\n{1}".format(
df.columns, df.head()))
fLOG("shape", df.shape)
fLOG("columns", df.columns)
###################################
# mot de passe
from pyquickhelper.loghelper import get_keyword
user = get_password("gmail", "ensae_teaching_cs,user")
pwd = get_password("gmail", "ensae_teaching_cs,pwd")
###################################
# On envoie les mails.
# Le paramètre delay_sending retourne une fonction qu'il suffit d'exécuter
# pour envoyer le mail.
# Si mailbox est None, la fonction affiche les résultats mais ne fait rien.
fLOG("connect", user)
mailbox = pymmails.sender.create_smtp_server("gmail", user, pwd)
fLOG("send")
# remplacer mailbox par None pour voir le premier mail sans l'envoyer
mails = enumerate_send_email(mailbox if not test_first_mail else None,
sujet, user + "@gmail.com",
df, exc=True, fLOG=fLOG, delay_sending=delay_sending,
begin=begin, end=end, skip=skip,
cc=cc, only=only, col_group=col_group,
col_name=col_name, col_mail=col_mail,
cols=columns)
for i, m in enumerate(mails):
fLOG("------------", m)
###################################
# fin
mailbox.close()
fLOG("Done")
| sdpython/ensae_teaching_cs | _doc/examples/automation/send_mails.py | Python | mit | 3,434 |
from __future__ import unicode_literals
from django import template
from django.utils.html import mark_safe
from .. import conf
from ..models import Icon
register = template.Library()
@register.simple_tag
def svg_icons_icon(name, **kwargs):
filters = {'name__iexact': name}
value = ''
if kwargs.get('group'):
filters['group__name__iexact'] = kwargs.get('group')
try:
icon = Icon.objects.get(**filters)
value = icon.source
except Icon.DoesNotExist:
if conf.settings.DEBUG is True:
value = ('<span class="svg-icon-error">'
'<b>template tag svg_icons_icon: </b>'
'no icon by the name "{0}"</span>').format(name)
else:
value = ''
except Icon.MultipleObjectsReturned:
if conf.settings.DEBUG is True:
value = ('<span class="svg-icon-error">'
'<b>template tag svg_icons_icon: </b>'
'there are more than one '
'icon by the name "{0}" '
'try to use the "group" parameter</span>').format(name)
else:
icon = Icon.objects.filter(**filters)[:1].get()
value = icon.source
return mark_safe(value)
| rouxcode/django-svg-icons | svg_icons/templatetags/svg_icons_tags.py | Python | mit | 1,258 |
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2=ejgw$ytij5oh#i$eeg237okk8m3nkm@5a!j%&1(s2j7yl2xm'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'auth_functional.RequestFixtureMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = ''
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'tests/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_paginator_ext',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| anler/django-paginator-ext | tests/settings.py | Python | mit | 5,356 |
import re, sys, os
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = __file__
sys.exit(run())
| commitmachine/pg-hoffserver | pghoffserver/gunicorn_python.py | Python | mit | 131 |
from random import random
from functools import partial
import csv
import json
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.spinner import Spinner
from kivy.graphics import Color, Line, Ellipse
import tensorflow as tf
import numpy as np
maxLen = 60
class Gesture:
def __init__(self):
self.classId = 0
self.points = []
def from_points(self, input):
for i in range(2, len(input), 2):
self.points.append((input[i] - input[i-2], input[i+1]-input[i-1]))
def to_dict(self):
dict = {
"classId" : self.classId,
"points" : self.points
}
return dict
def from_dict(dict):
g = Gesture()
g.classId = dict["classId"]
g.points = dict["points"]
return g
def to_tensor(self):
return self.points[:maxLen] + [self.points[-1]]*(maxLen - len(self.points))
class GestureBase:
def __init__(self):
self.gestures = []
self.gestureIds = {'None': 0}
def save(self, path):
print("Gestures %d" % len(self.gestures))
with open(path, 'w') as file:
data = {
"classes": self.gestureIds,
"gestures": [g.to_dict() for g in self.gestures],
}
json.dump(data, file, indent=2)
def load(self, path):
with open(path, 'r') as file:
data = json.load(file)
self.gestureIds = data["classes"]
self.gestures = [Gesture.from_dict(g) for g in data["gestures"]]
def get_classes_in_order(self):
items = sorted(self.gestureIds.items(), key=lambda p: p[1])
return [i[0] for i in items]
def add_gesture(self, className, points):
gesture = Gesture()
if className not in self.gestureIds:
self.gestureIds[className] = gesture.classId = len(self.gestureIds)
else:
gesture.classId = self.gestureIds[className]
gesture.from_points(points)
self.gestures.append(gesture)
def to_tensor(self):
return np.array([g.to_tensor() for g in self.gestures])
def classes_to_tensor(self):
ret = []
for g in self.gestures:
list = [0] * 10
list[g.classId] = 1
ret.append(list)
return np.array(ret)
def lengths_to_tensor(self):
ret = [len(g.points) for g in self.gestures]
return np.array(ret)
#return [100 for g in self.gestures]
class GestureRecognizer:
numberOfExamples = None #dynamic
sampleVectorLen = 2 # x, y coords
numMemCells = 24
def __init__(self):
self.inputData = tf.placeholder(tf.float32, [None, maxLen, self.sampleVectorLen])
self.expectedClasses = tf.placeholder(tf.float32, [None, 10])
self.inputLengths = tf.placeholder(tf.int32, [None])
cell = tf.contrib.rnn.LSTMCell(self.numMemCells, state_is_tuple=True)
cellOut, cellState = tf.nn.dynamic_rnn(
cell, self.inputData, dtype=tf.float32, sequence_length=self.inputLengths)
batchSize = tf.shape(cellOut)[0]
index = tf.range(0, batchSize) * maxLen + (self.inputLengths - 1)
flat = tf.reshape(cellOut, [-1, self.numMemCells])
last = tf.gather(flat, index)
print(last.get_shape())
weight = tf.Variable(tf.truncated_normal([self.numMemCells, int(self.expectedClasses.get_shape()[1])], stddev = 0.1))
bias = tf.Variable(tf.constant(0.1, shape=[self.expectedClasses.get_shape()[1]]))
#prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
prediction = tf.matmul(last, weight) + bias
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=self.expectedClasses))
#cross_entropy = -tf.reduce_sum(self.expectedClasses * tf.log(tf.clip_by_value(prediction,1e-10,1.0)))
optimizer = tf.train.GradientDescentOptimizer(0.1)
self.trainer = optimizer.minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(self.expectedClasses,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.predictionMax = tf.argmax(prediction, 1)
self.classifier = tf.nn.softmax(prediction)
self.sess = tf.Session()
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
def train(self, base):
examples = base.to_tensor()
labels = base.classes_to_tensor()
lengths = base.lengths_to_tensor()
feed = {self.inputData: examples,
self.expectedClasses: labels,
self.inputLengths: lengths}
for i in range(1000):
self.sess.run(self.trainer, feed)
if i % 10 == 0:
print("Epoch %d" % i)
print(self.sess.run(self.accuracy, feed))
print("Trained")
def classify(self, points):
gesture = Gesture()
gesture.from_points(points)
feed = {self.inputData: [gesture.to_tensor()],
self.inputLengths: [len(gesture.points)]}
index, prob = self.sess.run([self.predictionMax, self.classifier], feed)
index = index[0]
print("Class Id %d" % index)
prob = prob[0][index]
print("Probability {:.1%}".format(prob))
class ToolBar(BoxLayout):
def __init__(self, **kwargs):
self.controller = kwargs.pop("controller")
super(ToolBar, self).__init__(**kwargs)
self.add_widget(Label(text='Class:'))
self.classesSpinner = Spinner(values=['None'], text='None')
self.classesSpinner.bind(text=self.select_class)
self.add_widget(self.classesSpinner)
self.add_widget(Label(text='Add new'))
self.classInput = TextInput(multiline=False)
self.classInput.bind(on_text_validate=self.add_class)
self.add_widget(self.classInput)
self.saveButton = Button(text='Save Gestures')
self.saveButton.bind(on_release=self.save_gestures)
self.add_widget(self.saveButton)
self.loadButton = Button(text='Load Gestures')
self.loadButton.bind(on_release=self.load_gestures)
self.add_widget(self.loadButton)
self.learnButton = Button(text='Learn')
self.learnButton.bind(on_release=self.learn_gestures)
self.add_widget(self.learnButton)
self.toggleModeButton = Button(text='Adding')
self.toggleModeButton.bind(on_release=self.toggle_mode)
self.add_widget(self.toggleModeButton)
def save_gestures(self, button):
self.controller.save()
def load_gestures(self, button):
self.controller.load()
def learn_gestures(self, button):
self.controller.learn()
def classify_gestures(self, button):
self.controller.classify()
def toggle_mode(self, button):
button.text = self.controller.toggle_mode()
def add_class(self, text):
self.classesSpinner.values.append(text.text)
text.text = ''
def select_class(self, spinner, text):
self.controller.className = text
class MyPaintWidget(Widget):
previousLine = None
controller = None
def __init__(self, **kwargs):
super(MyPaintWidget, self).__init__(**kwargs)
def on_touch_down(self, touch):
self.p = (touch.x, touch.y)
if self.collide_point(*self.p):
self.canvas.clear()
color = (random(), 1, 1)
with self.canvas:
Color(*color, mode='hsv')
touch.ud['line'] = Line(points=[touch.x, touch.y])
Ellipse(pos=[touch.x - 2, touch.y - 2], size=[4, 4])
def on_touch_move(self, touch):
if 'line' in touch.ud:
p = [touch.x, touch.y]
if ((np.linalg.norm(np.subtract(p, self.p))) > 20):
with self.canvas:
Ellipse(pos=[touch.x - 2, touch.y - 2], size=[4, 4])
line = touch.ud['line']
line.points += p
self.p = p
def on_touch_up(self, touch):
if 'line' in touch.ud:
line = touch.ud['line']
self.previousLine = line
self.controller.handle_gesture(line.points)
print(len(line.points))
class GestureApp(App):
def build(self):
layout = BoxLayout(orientation='vertical')
self.toolBar = ToolBar(size_hint=(1, None), height=40, controller=self)
layout.add_widget(self.toolBar)
mainArea = MyPaintWidget(size_hint=(1, 1))
mainArea.controller = self
layout.add_widget(mainArea)
return layout
def clear_canvas(self, obj):
self.painter.canvas.clear()
def __init__(self, **kwargs):
super(GestureApp, self).__init__(**kwargs)
self.handle_gesture = self.add_gesture_from_points
self.gestureRecognizer = GestureRecognizer()
self.base = GestureBase()
self.className = 'None'
def add_gesture_from_points(self, points):
self.base.add_gesture(self.className, points)
def save(self):
self.base.save("Gestures.json")
def load(self):
self.base.load("Gestures.json")
asd = self.base.get_classes_in_order()
print(asd)
self.toolBar.classesSpinner.values = self.base.get_classes_in_order()
def learn(self):
self.gestureRecognizer.train(self.base)
def classify(self, points):
self.gestureRecognizer.classify(points)
def toggle_mode(self):
if(self.handle_gesture == self.add_gesture_from_points):
self.handle_gesture = self.classify
return "Classifying"
else:
self.handle_gesture = self.add_gesture_from_points
return "Adding"
if __name__ == '__main__':
GestureApp().run() | romanchom/GestureRecognitionVR | MouseGestures/App.py | Python | mit | 8,792 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-22 15:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='deployment',
name='platform_id',
),
migrations.RemoveField(
model_name='deployment',
name='platform_ip',
),
migrations.AlterField(
model_name='deployment',
name='sensor_id',
field=models.TextField(unique=True),
),
]
| cloudcomputinghust/IoT | co-ordinator/api/migrations/0002_auto_20170222_1514.py | Python | mit | 664 |
import datetime
from django.conf import settings
from django import template
from django.core.urlresolvers import reverse
from django.utils.dateformat import format
from schedule.conf.settings import CHECK_PERMISSION_FUNC
from schedule.models import Calendar
from schedule.periods import weekday_names, weekday_abbrs, Month
register = template.Library()
@register.inclusion_tag("schedule/_month_table.html", takes_context=True)
def month_table(context, calendar, month, size="regular", shift=None):
if shift:
if shift == -1:
month = month.prev()
if shift == 1:
month = month.next()
if size == "small":
context['day_names'] = weekday_abbrs
else:
context['day_names'] = weekday_names
context['calendar'] = calendar
context['month'] = month
context['size'] = size
return context
@register.inclusion_tag("schedule/_day_cell.html", takes_context=True)
def day_cell(context, calendar, day, month, size="regular" ):
context.update({
'calendar' : calendar,
'day' : day,
'month' : month,
'size' : size
})
return context
@register.inclusion_tag("schedule/_daily_table.html", takes_context=True)
def daily_table( context, day, width, width_slot, height, start=8, end=20, increment=30):
"""
Display a nice table with occurrences and action buttons.
Arguments:
width - width of the table (px)
width_slot - width of the slot column (px)
height - height of the table
start - hour at which the day starts
end - hour at which the day ends
increment - size of a time slot (in minutes)
"""
user = context['request'].user
context['addable'] = CHECK_PERMISSION_FUNC(None, user)
width_occ = width - width_slot
day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))
occurrences = day_part.get_occurrences()
occurrences = _cook_occurrences(day_part, occurrences, width_occ, height)
# get slots to display on the left
slots = _cook_slots(day_part, increment, width, height)
context['occurrences'] = occurrences
context['slots'] = slots
context['width'] = width
context['width_slot'] = width_slot
context['width_occ'] = width_occ
context['height'] = height
return context
@register.inclusion_tag("schedule/_event_title.html", takes_context=True)
def title(context, occurrence ):
context.update({
'occurrence' : occurrence,
})
return context
@register.inclusion_tag("schedule/_event_options.html", takes_context=True)
def options(context, occurrence ):
context.update({
'occurrence' : occurrence,
'MEDIA_URL' : getattr(settings, "MEDIA_URL"),
})
context['view_occurrence'] = occurrence.get_absolute_url()
user = context['request'].user
if CHECK_PERMISSION_FUNC(occurrence.event, user):
context['edit_occurrence'] = occurrence.get_edit_url()
print context['edit_occurrence']
context['cancel_occurrence'] = occurrence.get_cancel_url()
context['delete_event'] = reverse('delete_event', args=(occurrence.event.id,))
context['edit_event'] = reverse('edit_event', args=(occurrence.event.calendar.slug, occurrence.event.id,))
else:
context['edit_event'] = context['delete_event'] = ''
return context
@register.inclusion_tag("schedule/_create_event_options.html", takes_context=True)
def create_event_url(context, calendar, slot ):
context.update ( {
'calendar' : calendar,
'MEDIA_URL' : getattr(settings, "MEDIA_URL"),
})
lookup_context = {
'calendar_slug': calendar.slug,
}
context['create_event_url'] ="%s%s" % (
reverse( "calendar_create_event", kwargs=lookup_context),
querystring_for_date(slot))
return context
class CalendarNode(template.Node):
def __init__(self, content_object, distinction, context_var, create=False):
self.content_object = template.Variable(content_object)
self.distinction = distinction
self.context_var = context_var
def render(self, context):
calendar = Calendar.objects.get_calendar_for_object(self.content_object.resolve(context), self.distinction)
context[self.context_var] = Calendar.objects.get_calendar_for_object(self.content_object.resolve(context), self.distinction)
return ''
def do_get_calendar_for_object(parser, token):
contents = token.split_contents()
if len(contents) == 4:
tag_name, content_object, _, context_var = contents
distinction = None
elif len(contents) == 5:
tag_name, content_object, distinction, _, context_var = token.split_contents()
else:
raise template.TemplateSyntaxError, "%r tag follows form %r <content_object> as <context_var>" % (token.contents.split()[0], token.contents.split()[0])
return CalendarNode(content_object, distinction, context_var)
class CreateCalendarNode(template.Node):
def __init__(self, content_object, distinction, context_var, name):
self.content_object = template.Variable(content_object)
self.distinction = distinction
self.context_var = context_var
self.name = name
def render(self, context):
context[self.context_var] = Calendar.objects.get_or_create_calendar_for_object(self.content_object.resolve(context), self.distinction, name = self.name)
return ''
def do_get_or_create_calendar_for_object(parser, token):
contents = token.split_contents()
if len(contents) > 2:
tag_name = contents[0]
obj = contents[1]
if 'by' in contents:
by_index = contents.index('by')
distinction = contents[by_index+1]
else:
distinction = None
if 'named' in contents:
named_index = contents.index('named')
name = contents[named_index+1]
if name[0] == name[-1]:
name = name[1:-1]
else:
name = None
if 'as' in contents:
as_index = contents.index('as')
context_var = contents[as_index+1]
else:
raise template.TemplateSyntaxError, "%r tag requires an a context variable: %r <content_object> [named <calendar name>] [by <distinction>] as <context_var>" % (token.split_contents()[0], token.split_contents()[0])
else:
raise template.TemplateSyntaxError, "%r tag follows form %r <content_object> [named <calendar name>] [by <distinction>] as <context_var>" % (token.split_contents()[0], token.split_contents()[0])
return CreateCalendarNode(obj, distinction, context_var, name)
register.tag('get_calendar', do_get_calendar_for_object)
register.tag('get_or_create_calendar', do_get_or_create_calendar_for_object)
@register.simple_tag
def querystring_for_date(date, num=6):
query_string = '?'
qs_parts = ['year=%d', 'month=%d', 'day=%d', 'hour=%d', 'minute=%d', 'second=%d']
qs_vars = (date.year, date.month, date.day, date.hour, date.minute, date.second)
query_string += '&'.join(qs_parts[:num]) % qs_vars[:num]
return query_string
@register.simple_tag
def prev_url(target, slug, period):
return '%s%s' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.prev().start))
@register.simple_tag
def next_url(target, slug, period):
return '%s%s' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.next().start))
@register.inclusion_tag("schedule/_prevnext.html")
def prevnext( target, slug, period, fmt=None):
if fmt is None:
fmt = settings.DATE_FORMAT
context = {
'slug' : slug,
'period' : period,
'period_name': format(period.start, fmt),
'target':target,
'MEDIA_URL': settings.MEDIA_URL,
}
return context
@register.inclusion_tag("schedule/_detail.html")
def detail( occurrence ):
context = {
'occurrence' : occurrence,
'MEDIA_URL': settings.MEDIA_URL,
}
return context
def _period_duration(period):
"Return the number of seconds of specified period"
duration = period.end - period.start
return (duration.days * 24 * 60 * 60) + duration.seconds
def _cook_occurrences(period, occs, width, height):
""" Prepare occurrences to be displayed.
Calculate dimensions and position (in px) for each occurrence.
The algorithm tries to fit overlapping occurrences so that they require a minimum
number of "columns".
Arguments:
period - time period for the whole series
occs - occurrences to be displayed
increment - slot size in minutes
width - width of the occurrences column (px)
height - height of the table (px)
"""
last = {}
# find out which occurrences overlap
for o in occs[:]:
o.data = period.classify_occurrence(o)
if not o.data:
occs.remove(o)
continue
o.level = -1
o.max = 0
if not last:
last[0] = o
o.level = 0
else:
for k in sorted(last.keys()):
if last[k].end <= o.start:
o.level = k
last[k] = o
break
if o.level == -1:
k = k + 1
last[k] = o
o.level = k
# calculate position and dimensions
for o in occs:
# number of overlapping occurrences
o.max = len([n for n in occs if not(n.end<=o.start or n.start>=o.end)])
for o in occs:
o.cls = o.data['class']
o.real_start = max(o.start, period.start)
o.real_end = min(o.end, period.end)
# number of "columns" is a minimum number of overlaps for each overlapping group
o.max = min([n.max for n in occs if not(n.end<=o.start or n.start>=o.end)] or [1])
w = int(width / (o.max))
o.width = w - 2
o.left = w * o.level
duration_seconds = _period_duration(period)
o.top = int(height * (float((o.real_start - period.start).seconds) / duration_seconds))
o.height = int(height * (float((o.real_end - o.real_start).seconds) / duration_seconds))
o.height = min(o.height, height - o.top) # trim what extends beyond the area
return occs
def _cook_slots(period, increment, width, height):
"""
Prepare slots to be displayed on the left hand side
calculate dimensions (in px) for each slot.
Arguments:
period - time period for the whole series
increment - slot size in minutes
width - width of the slot column (px)
height - height of the table (px)
"""
tdiff = datetime.timedelta(minutes=increment)
num = _period_duration(period)/tdiff.seconds
s = period.start
slots = []
for i in range(num):
sl = period.get_time_slot(s, s + tdiff)
sl.top = int(height / float(num)) * i
sl.height = int(height / float(num))
slots.append(sl)
s = s + tdiff
return slots
@register.simple_tag
def hash_occurrence(occ):
return '%s_%s' % (occ.start.strftime('%Y%m%d%H%M%S'), occ.event.id)
| tscholze/py-hasi-home-analytical-system-interface | hasi/schedule/templatetags/scheduletags.py | Python | mit | 11,258 |
# """Test for views creation and link to html pages."""
# from pyramid import testing
# from pyramid_learning_journal.models import (
# Entry,
# get_tm_session,
# )
# from pyramid_learning_journal.models.meta import Base
# from pyramid_learning_journal.views.notfound import notfound_view
# from pyramid_learning_journal.views.default import (
# list_view,
# create_view,
# detail_view,
# edit_view
# )
# from pyramid.config import Configurator
# from pyramid.httpexceptions import HTTPNotFound, HTTPFound
# from faker import Faker
# import pytest
# import datetime
# import transaction
# import os
# FAKE_STUFF = Faker()
# FAKE_ENTRIES = [Entry(
# title=FAKE_STUFF.text(20),
# body=FAKE_STUFF.text(250),
# creation_date=datetime.datetime.now(),
# ) for x in range(25)]
# @pytest.fixture
# def dummy_request(db_session):
# """Make a fake HTTP request."""
# return testing.DummyRequest(dbsession=db_session)
# @pytest.fixture
# def add_models(dummy_request):
# """Add entries to a dummy request."""
# dummy_request.dbsession.add_all(FAKE_ENTRIES)
# @pytest.fixture(scope="session")
# def configuration(request):
# """Set up a Configurator instance."""
# config = testing.setUp(settings={
# 'sqlalchemy.url': os.environ.get('TEST_DATABASE')
# })
# config.include('pyramid_learning_journal.models')
# config.include('pyramid_learning_journal.routes')
# def teardown():
# testing.tearDown()
# request.addfinalizer(teardown)
# return config
# @pytest.fixture
# def db_session(configuration, request):
# """Create a session for interacting with the test database."""
# SessionFactory = configuration.registry['dbsession_factory']
# session = SessionFactory()
# engine = session.bind
# Base.metadata.create_all(engine)
# def teardown():
# session.transaction.rollback()
# Base.metadata.drop_all(engine)
# request.addfinalizer(teardown)
# return session
# @pytest.fixture(scope="session")
# def testapp(request):
# """Create a test application to use for functional tests."""
# from webtest import TestApp
# def main(global_config, **settings):
# """Function returns a fake Pyramid WSGI application."""
# settings['sqlalchemy.url'] = os.environ.get('TEST_DATABASE')
# config = Configurator(settings=settings)
# config.include('pyramid_jinja2')
# config.include('pyramid_learning_journal.models')
# config.include('pyramid_learning_journal.routes')
# config.add_static_view(name='static',
# path='pyramid_learning_journal:static')
# config.scan()
# return config.make_wsgi_app()
# app = main({})
# testapp = TestApp(app)
# SessionFactory = app.registry['dbsession_factory']
# engine = SessionFactory().bind
# Base.metadata.create_all(bind=engine)
# def teardown():
# Base.metadata.drop_all(bind=engine)
# request.addfinalizer(teardown)
# return testapp
# @pytest.fixture
# def fill_test_db(testapp):
# """Set fake entries to the db for a session."""
# SessionFactory = testapp.app.registry['dbsession_factory']
# with transaction.manager:
# dbsession = get_tm_session(SessionFactory, transaction.manager)
# dbsession.add_all(FAKE_ENTRIES)
# # import pdb; pdb.set_trace()
# return dbsession
# @pytest.fixture
# def reset_db(testapp):
# """Clear and start a new DB."""
# SessionFactory = testapp.app.registry['dbsession_factory']
# engine = SessionFactory().bind
# Base.metadata.drop_all(bind=engine)
# Base.metadata.create_all(bind=engine)
# @pytest.fixture
# def post_request(dummy_request):
# """Make a fake HTTP POST request."""
# dummy_request.method = "POST"
# return dummy_request
# # # ----- Unit Tests ----- #
# def test_filling_fake_db(fill_test_db, db_session):
# """Check for entries added to db."""
# assert len(db_session.query(Entry).all()) == 25
# # def test_list_view_returns_dict(dummy_request):
# # """Test list view returns a dict when called."""
# # assert type(list_view(dummy_request)) == dict
# # def test_detail_view_with_id_raises_except(dummy_request):
# # """Test proper error raising with non matching id on detail view."""
# # dummy_request.matchdict['id'] = '9000'
# # with pytest.raises(HTTPNotFound):
# # detail_view(dummy_request)
# # def test_detail_view_returns_dict_with_db(db_session, dummy_request):
# # """Test detail view returns a dict when called."""
# # fake = Entry(
# # title=u'Stuff',
# # body=u'Some thing goes here.',
# # creation_date=datetime.datetime.now(),
# # )
# # db_session.add(fake)
# # fakeid = str(db_session.query(Entry)[0].id)
# # dummy_request.matchdict['id'] = fakeid
# # response = detail_view(dummy_request)
# # assert type(response) == dict
# # def test_create_view_returns_dict(dummy_request):
# # """Test create view returns a dict when called."""
# # assert type(create_view(dummy_request)) == dict
# # def test_edit_view_returns_dict_with_db(testapp, db_session):
# # """Test edit view returns a dict when called with a db."""
# # fake = Entry(
# # title=u'Stuff',
# # body=u'Some thing goes here.',
# # creation_date=datetime.datetime.now(),
# # )
# # db_session.add(fake)
# # fakeid = str(db_session.query(Entry)[0].id)
# # dummy_request.matchdict['id'] = fakeid
# # response = testapp.get('/journal/1/edit-entry')
# # ---------------------------
# # response = edit_view(dummy_request)
# # assert type(response) == dict
# # def test_db_gets_new_entry_with_content(dummy_request, db_session):
# # """Test db gets entry with proper content."""
# # fake = Entry(
# # title=u'Stuff',
# # body=u'Some thing goes here.',
# # creation_date=datetime.datetime.now(),
# # )
# # db_session.add(fake)
# # fakeid = str(db_session.query(Entry)[0].id)
# # dummy_request.matchdict['id'] = fakeid
# # response = detail_view(dummy_request)
# # assert len(db_session.query(Entry).all()) == 1
# # assert fake.title in response['entry'].title
# # assert fake.body in response['entry'].body
# # def test_edit_view_with_id_raises_except(dummy_request):
# # """Test proper error raising with non matching id on edit view."""
# # dummy_request.matchdict['id'] = '9000'
# # with pytest.raises(HTTPNotFound):
# # edit_view(dummy_request)
# # def test_list_view_returns_empty_without_db(dummy_request):
# # """Test list view returns a dict when called."""
# # response = list_view(dummy_request)
# # assert len(response['posts']) == 0
# # #----- Functional Tests ----- #
# # def test_home_route_has_home_contents(testapp, db_session):
# # """Test list view is routed to home page."""
# # response = testapp.get('/')
# # assert '<h1 class="blog-title">The Pyramid Blog</h1>' in response
# # def test_home_view_returns_200(testapp, db_session):
# # """."""
# # response = testapp.get('/')
# # assert response.status_code == 200
# # # def test_home_route_has_list_of_entries(testapp, db_session):
# # # """Test if there are the right amount of entries on the home page."""
# # # response = testapp.get('/')
# # # num_posts = len(response.html.find_all('h2'))
# # # print(response)
# # # assert num_posts == 25
# # # def test_new_entry_view_returns_proper_content(testapp, db_session):
# # # """New entry view returns the actual content from the html."""
# # # response = testapp.get('/journal/new-entry')
# # # # html = response.html
# # # # expected_text = '<h1 class="blog-title">Create New Entry!</h1>'
# # # print(response)
# # # # assert expected_text in str(html)
# # # # response = testapp.get('/')
# # # # assert '<h1 class="blog-title">The Pyramid Blog</h1>' in response
# # #<h1 class="blog-title">Entry View</h1>
# # # def test_detail_view_has_single_entry(testapp, db_session, fill_test_db):
# # # """Test that the detail page only brings up one entry."""
# # # response = testapp.get('/journal/1')
# # # html = response.html
# # # assert html.find()
# # # num_list_items = (len(html.find_all('h3')))
# # # assert num_list_items == 1
# # # def test_detail_view_returns_proper_content(testapp, db_session, fill_test_db):
# # # """Entry view returns a Response object when given a request."""
# # # # import pdb; pdb.set_trace()
# # # response = testapp.get('/journal/1')
# # # html = response.html
# # # assert html.find()
# # # expected_text = '<div class="entries">'
# # # assert expected_text in str(html)
# # # def test_edit_view_has_single_entry(testapp, db_session, fill_test_db):
# # # """Test that the detail page only brings up one entry."""
# # # response = testapp.get('/journal/1/edit-entry')
# # # html = response.html
# # # assert html.find()
# # # num_list_items = (len(html.find_all('h3')))
# # # assert num_list_items == 1
# # # def test_edit_view_returns_proper_content(testapp, db_session, fill_test_db):
# # # """Entry view returns a Response object when given a request."""
# # # response = testapp.get('/journal/1/edit-entry')
# # # assert '<div class="titlearea">' in response.html.text
# # # def test_detail_view_with_bad_id(testapp, db_session, fill_test_db):
# # # """."""
# # # response = testapp.get('/journal/9001', status=404)
# # # assert "These are not the pages you're looking for!" in response.text
# # # def test_edit_view_with_bad_id(testapp, db_session, fill_test_db):
# # # """."""
# # # response = testapp.get('/journal/9001/edit-entry', status=404)
# # # assert "These are not the pages you're looking for!" in response.text | endere/pyramid-learning-journal | pyramid_learning_journal/pyramid_learning_journal/tests.py | Python | mit | 9,988 |
# -*- test-case-name: xmantissa.test.test_sharing -*-
"""
This module provides various abstractions for sharing public data in Axiom.
"""
import os
import warnings
from zope.interface import implementedBy, directlyProvides, Interface
from twisted.python.reflect import qual, namedAny
from twisted.protocols.amp import Argument, Box, parseString
from epsilon.structlike import record
from axiom import userbase
from axiom.item import Item
from axiom.attributes import reference, text, AND
from axiom.upgrade import registerUpgrader
ALL_IMPLEMENTED_DB = u'*'
ALL_IMPLEMENTED = object()
class NoSuchShare(Exception):
"""
User requested an object that doesn't exist, was not allowed.
"""
class ConflictingNames(Exception):
"""
The same name was defined in two separate interfaces.
"""
class RoleRelationship(Item):
"""
RoleRelationship is a bridge record linking member roles with group roles
that they are members of.
"""
schemaVersion = 1
typeName = 'sharing_relationship'
member = reference(
doc="""
This is a reference to a L{Role} which is a member of my 'group' attribute.
""")
group = reference(
doc="""
This is a reference to a L{Role} which represents a group that my 'member'
attribute is a member of.
""")
def _entuple(r):
"""
Convert a L{record} to a tuple.
"""
return tuple(getattr(r, n) for n in r.__names__)
class Identifier(record('shareID localpart domain')):
"""
A fully-qualified identifier for an entity that can participate in a
message either as a sender or a receiver.
"""
@classmethod
def fromSharedItem(cls, sharedItem):
"""
Return an instance of C{cls} derived from the given L{Item} that has
been shared.
Note that this API does not provide any guarantees of which result it
will choose. If there are are multiple possible return values, it will
select and return only one. Items may be shared under multiple
L{shareID}s. A user may have multiple valid account names. It is
sometimes impossible to tell from context which one is appropriate, so
if your application has another way to select a specific shareID you
should use that instead.
@param sharedItem: an L{Item} that should be shared.
@return: an L{Identifier} describing the C{sharedItem} parameter.
@raise L{NoSuchShare}: if the given item is not shared or its store
does not contain any L{LoginMethod} items which would identify a user.
"""
localpart = None
for (localpart, domain) in userbase.getAccountNames(sharedItem.store):
break
if localpart is None:
raise NoSuchShare()
for share in sharedItem.store.query(Share,
Share.sharedItem == sharedItem):
break
else:
raise NoSuchShare()
return cls(
shareID=share.shareID,
localpart=localpart, domain=domain)
def __cmp__(self, other):
"""
Compare this L{Identifier} to another object.
"""
# Note - might be useful to have this usable by arbitrary L{record}
# objects. It can't be the default, but perhaps a mixin?
if not isinstance(other, Identifier):
return NotImplemented
return cmp(_entuple(self), _entuple(other))
class IdentifierArgument(Argument):
"""
An AMP argument which can serialize and deserialize an L{Identifier}.
"""
def toString(self, obj):
"""
Convert the given L{Identifier} to a string.
"""
return Box(shareID=obj.shareID.encode('utf-8'),
localpart=obj.localpart.encode('utf-8'),
domain=obj.domain.encode('utf-8')).serialize()
def fromString(self, inString):
"""
Convert the given string to an L{Identifier}.
"""
box = parseString(inString)[0]
return Identifier(shareID=box['shareID'].decode('utf-8'),
localpart=box['localpart'].decode('utf-8'),
domain=box['domain'].decode('utf-8'))
class Role(Item):
"""
A Role is an identifier for a group or individual which has certain
permissions.
Items shared within the sharing system are always shared with a particular
role.
"""
schemaVersion = 1
typeName = 'sharing_role'
externalID = text(
doc="""
This is the external identifier which the role is known by. This field is
used to associate users with their primary role. If a user logs in as
[email protected], the sharing system will associate his primary role with
the pre-existing role with the externalID of '[email protected]', or
'Everybody' if no such role exists.
For group roles, the externalID is not currently used except as a
display identifier. Group roles should never have an '@' character in
them, however, to avoid confusion with user roles.
""", allowNone=False)
# XXX TODO: In addition to the externalID, we really need to have something
# that identifies what protocol the user for the role is expected to log in
# as, and a way to identify the way that their role was associated with
# their login. For example, it might be acceptable for some security
# applications (e.g. spam prevention) to simply use an HTTP cookie. For
# others (accounting database manipulation) it should be possible to
# require more secure methods of authentication, like a signed client
# certificate.
description = text(
doc="""
This is a free-form descriptive string for use by users to explain the
purpose of the role. Since the externalID is used by security logic
and must correspond to a login identifier, this can be used to hold a
user's real name.
""")
def becomeMemberOf(self, groupRole):
"""
Instruct this (user or group) Role to become a member of a group role.
@param groupRole: The role that this group should become a member of.
"""
self.store.findOrCreate(RoleRelationship,
group=groupRole,
member=self)
def allRoles(self, memo=None):
"""
Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself.
"""
if memo is None:
memo = set()
elif self in memo:
# this is bad, but we have successfully detected and prevented the
# only really bad symptom, an infinite loop.
return
memo.add(self)
yield self
for groupRole in self.store.query(Role,
AND(RoleRelationship.member == self,
RoleRelationship.group == Role.storeID)):
for roleRole in groupRole.allRoles(memo):
yield roleRole
def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):
"""
Share an item with this role. This provides a way to expose items to
users for later retrieval with L{Role.getShare}.
@param sharedItem: an item to be shared.
@param shareID: a unicode string. If provided, specify the ID under which
the shared item will be shared.
@param interfaces: a list of Interface objects which specify the methods
and attributes accessible to C{toRole} on C{sharedItem}.
@return: a L{Share} which records the ability of the given role to
access the given item.
"""
if shareID is None:
shareID = genShareID(sharedItem.store)
return Share(store=self.store,
shareID=shareID,
sharedItem=sharedItem,
sharedTo=self,
sharedInterfaces=interfaces)
def getShare(self, shareID):
"""
Retrieve a proxy object for a given shareID, previously shared with
this role or one of its group roles via L{Role.shareItem}.
@return: a L{SharedProxy}. This is a wrapper around the shared item
which only exposes those interfaces explicitly allowed for the given
role.
@raise: L{NoSuchShare} if there is no item shared to the given role for
the given shareID.
"""
shares = list(
self.store.query(Share,
AND(Share.shareID == shareID,
Share.sharedTo.oneOf(self.allRoles()))))
interfaces = []
for share in shares:
interfaces += share.sharedInterfaces
if shares:
return SharedProxy(shares[0].sharedItem,
interfaces,
shareID)
raise NoSuchShare()
def asAccessibleTo(self, query):
"""
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
@return: an iterable which yields the shared proxies that are available
to the given role, from the given query.
"""
# XXX TODO #2371: this method really *should* be returning an L{IQuery}
# provider as well, but that is kind of tricky to do. Currently, doing
# queries leaks authority, because the resulting objects have stores
# and "real" items as part of their interface; having this be a "real"
# query provider would obviate the need to escape the L{SharedProxy}
# security constraints in order to do any querying.
allRoles = list(self.allRoles())
count = 0
unlimited = query.cloneQuery(limit=None)
for result in unlimited:
allShares = list(query.store.query(
Share,
AND(Share.sharedItem == result,
Share.sharedTo.oneOf(allRoles))))
interfaces = []
for share in allShares:
interfaces += share.sharedInterfaces
if allShares:
count += 1
yield SharedProxy(result, interfaces, allShares[0].shareID)
if count == query.limit:
return
class _really(object):
"""
A dynamic proxy for dealing with 'private' attributes on L{SharedProxy},
which overrides C{__getattribute__} itself. This is pretty syntax to avoid
ugly references to __dict__ and super and object.__getattribute__() in
dynamic proxy implementations.
"""
def __init__(self, orig):
"""
Create a _really object with a dynamic proxy.
@param orig: an object that overrides __getattribute__, probably
L{SharedProxy}.
"""
self.orig = orig
def __setattr__(self, name, value):
"""
Set an attribute on my original, unless my original has not yet been set,
in which case set it on me.
"""
try:
orig = object.__getattribute__(self, 'orig')
except AttributeError:
object.__setattr__(self, name, value)
else:
object.__setattr__(orig, name, value)
def __getattribute__(self, name):
"""
Get an attribute present on my original using L{object.__getattribute__},
not the overridden version.
"""
return object.__getattribute__(object.__getattribute__(self, 'orig'),
name)
ALLOWED_ON_PROXY = ['__provides__', '__dict__']
class SharedProxy(object):
"""
A shared proxy is a dynamic proxy which provides exposes methods and
attributes declared by shared interfaces on a given Item. These are
returned from L{Role.getShare} and yielded from L{Role.asAccessibleTo}.
Shared proxies are unlike regular items because they do not have 'storeID'
or 'store' attributes (unless explicitly exposed). They are designed
to make security easy to implement: if you have a shared proxy, you can
access any attribute or method on it without having to do explicit
permission checks.
If you *do* want to perform an explicit permission check, for example, to
render some UI associated with a particular permission, it can be performed
as a functionality check instead. For example, C{if getattr(proxy,
'feature', None) is None:} or, more formally,
C{IFeature.providedBy(proxy)}. If your interfaces are all declared and
implemented properly everywhere, these checks will work both with shared
proxies and with the original Items that they represent (but of course, the
original Items will always provide all of their features).
(Note that object.__getattribute__ still lets you reach inside any object,
so don't imagine this makes you bulletproof -- you have to cooperate with
it.)
"""
def __init__(self, sharedItem, sharedInterfaces, shareID):
"""
Create a shared proxy for a given item.
@param sharedItem: The original item that was shared.
@param sharedInterfaces: a list of interfaces which C{sharedItem}
implements that this proxy should allow access to.
@param shareID: the external identifier that the shared item was shared
as.
"""
rself = _really(self)
rself._sharedItem = sharedItem
rself._shareID = shareID
rself._adapterCache = {}
# Drop all duplicate shared interfaces.
uniqueInterfaces = list(sharedInterfaces)
# XXX there _MUST_ Be a better algorithm for this
for left in sharedInterfaces:
for right in sharedInterfaces:
if left.extends(right) and right in uniqueInterfaces:
uniqueInterfaces.remove(right)
for eachInterface in uniqueInterfaces:
if not eachInterface.providedBy(sharedItem):
impl = eachInterface(sharedItem, None)
if impl is not None:
rself._adapterCache[eachInterface] = impl
rself._sharedInterfaces = uniqueInterfaces
# Make me look *exactly* like the item I am proxying for, at least for
# the purposes of adaptation
# directlyProvides(self, providedBy(sharedItem))
directlyProvides(self, uniqueInterfaces)
def __repr__(self):
"""
Return a pretty string representation of this shared proxy.
"""
rself = _really(self)
return 'SharedProxy(%r, %r, %r)' % (
rself._sharedItem,
rself._sharedInterfaces,
rself._shareID)
def __getattribute__(self, name):
"""
@return: attributes from my shared item, present in the shared interfaces
list for this proxy.
@param name: the name of the attribute to retrieve.
@raise AttributeError: if the attribute was not found or access to it was
denied.
"""
if name in ALLOWED_ON_PROXY:
return object.__getattribute__(self, name)
rself = _really(self)
if name == 'sharedInterfaces':
return rself._sharedInterfaces
elif name == 'shareID':
return rself._shareID
for iface in rself._sharedInterfaces:
if name in iface:
if iface in rself._adapterCache:
return getattr(rself._adapterCache[iface], name)
return getattr(rself._sharedItem, name)
raise AttributeError("%r has no attribute %r" % (self, name))
def __setattr__(self, name, value):
"""
Set an attribute on the shared item. If the name of the attribute is in
L{ALLOWED_ON_PROXY}, set it on this proxy instead.
@param name: the name of the attribute to set
@param value: the value of the attribute to set
@return: None
"""
if name in ALLOWED_ON_PROXY:
self.__dict__[name] = value
else:
raise AttributeError("unsettable: "+repr(name))
def _interfacesToNames(interfaces):
"""
Convert from a list of interfaces to a unicode string of names suitable for
storage in the database.
@param interfaces: an iterable of Interface objects.
@return: a unicode string, a comma-separated list of names of interfaces.
@raise ConflictingNames: if any of the names conflict: see
L{_checkConflictingNames}.
"""
if interfaces is ALL_IMPLEMENTED:
names = ALL_IMPLEMENTED_DB
else:
_checkConflictingNames(interfaces)
names = u','.join(map(qual, interfaces))
return names
class Share(Item):
"""
A Share is a declaration that users with a given role can access a given
set of functionality, as described by an Interface object.
They should be created with L{Role.shareItem} and retrieved with
L{Role.asAccessibleTo} and L{Role.getShare}.
"""
schemaVersion = 2
typeName = 'sharing_share'
shareID = text(
doc="""
The shareID is the externally-visible identifier for this share. It is
free-form text, which users may enter to access this share.
Currently the only concrete use of this attribute is in HTTP[S] URLs, but
in the future it will be used in menu entries.
""",
allowNone=False)
sharedItem = reference(
doc="""
The sharedItem attribute is a reference to the item which is being
provided.
""",
allowNone=False,
whenDeleted=reference.CASCADE)
sharedTo = reference(
doc="""
The sharedTo attribute is a reference to the Role which this item is shared
with.
""",
allowNone=False)
sharedInterfaceNames = text(
doc="""
This is an internal implementation detail of the sharedInterfaces
attribute.
""",
allowNone=False)
def __init__(self, **kw):
"""
Create a share.
Consider this interface private; use L{shareItem} instead.
"""
# XXX TODO: All I really want to do here is to have enforcement of
# allowNone happen at the _end_ of __init__; axiom should probably do
# that by default, since there are several __init__s like this which
# don't really do anything scattered throughout the codebase.
kw['sharedInterfaceNames'] = _interfacesToNames(kw.pop('sharedInterfaces'))
super(Share, self).__init__(**kw)
def sharedInterfaces():
"""
This attribute is the public interface for code which wishes to discover
the list of interfaces allowed by this Share. It is a list of
Interface objects.
"""
def get(self):
if not self.sharedInterfaceNames:
return ()
if self.sharedInterfaceNames == ALL_IMPLEMENTED_DB:
I = implementedBy(self.sharedItem.__class__)
L = list(I)
T = tuple(L)
return T
else:
return tuple(map(namedAny, self.sharedInterfaceNames.split(u',')))
def set(self, newValue):
self.sharedAttributeNames = _interfacesToNames(newValue)
return get, set
sharedInterfaces = property(
doc=sharedInterfaces.__doc__,
*sharedInterfaces())
def upgradeShare1to2(oldShare):
"Upgrader from Share version 1 to version 2."
sharedInterfaces = []
attrs = set(oldShare.sharedAttributeNames.split(u','))
for iface in implementedBy(oldShare.sharedItem.__class__):
if set(iface) == attrs or attrs == set('*'):
sharedInterfaces.append(iface)
newShare = oldShare.upgradeVersion('sharing_share', 1, 2,
shareID=oldShare.shareID,
sharedItem=oldShare.sharedItem,
sharedTo=oldShare.sharedTo,
sharedInterfaces=sharedInterfaces)
return newShare
registerUpgrader(upgradeShare1to2, 'sharing_share', 1, 2)
def genShareID(store):
"""
Generate a new, randomized share-ID for use as the default of shareItem, if
none is specified.
@return: a random share-ID.
@rtype: unicode.
"""
return unicode(os.urandom(16).encode('hex'), 'ascii')
def getEveryoneRole(store):
"""
Get a base 'Everyone' role for this store, which is the role that every
user, including the anonymous user, has.
"""
return store.findOrCreate(Role, externalID=u'Everyone')
def getAuthenticatedRole(store):
"""
Get the base 'Authenticated' role for this store, which is the role that is
given to every user who is explicitly identified by a non-anonymous
username.
"""
def tx():
def addToEveryone(newAuthenticatedRole):
newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store))
return newAuthenticatedRole
return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated')
return store.transact(tx)
def getPrimaryRole(store, primaryRoleName, createIfNotFound=False):
"""
Get Role object corresponding to an identifier name. If the role name
passed is the empty string, it is assumed that the user is not
authenticated, and the 'Everybody' role is primary. If the role name
passed is non-empty, but has no corresponding role, the 'Authenticated'
role - which is a member of 'Everybody' - is primary. Finally, a specific
role can be primary if one exists for the user's given credentials, that
will automatically always be a member of 'Authenticated', and by extension,
of 'Everybody'.
@param primaryRoleName: a unicode string identifying the role to be
retrieved. This corresponds to L{Role}'s externalID attribute.
@param createIfNotFound: a boolean. If True, create a role for the given
primary role name if no exact match is found. The default, False, will
instead retrieve the 'nearest match' role, which can be Authenticated or
Everybody depending on whether the user is logged in or not.
@return: a L{Role}.
"""
if not primaryRoleName:
return getEveryoneRole(store)
ff = store.findUnique(Role, Role.externalID == primaryRoleName, default=None)
if ff is not None:
return ff
authRole = getAuthenticatedRole(store)
if createIfNotFound:
role = Role(store=store,
externalID=primaryRoleName)
role.becomeMemberOf(authRole)
return role
return authRole
def getSelfRole(store):
"""
Retrieve the Role which corresponds to the user to whom the given store
belongs.
"""
return getAccountRole(store, userbase.getAccountNames(store))
def getAccountRole(store, accountNames):
"""
Retrieve the first Role in the given store which corresponds an account
name in C{accountNames}.
Note: the implementation currently ignores all of the values in
C{accountNames} except for the first.
@param accountNames: A C{list} of two-tuples of account local parts and
domains.
@raise ValueError: If C{accountNames} is empty.
@rtype: L{Role}
"""
for (localpart, domain) in accountNames:
return getPrimaryRole(store, u'%s@%s' % (localpart, domain),
createIfNotFound=True)
raise ValueError("Cannot get named role for unnamed account.")
def shareItem(sharedItem, toRole=None, toName=None, shareID=None,
interfaces=ALL_IMPLEMENTED):
"""
Share an item with a given role. This provides a way to expose items to
users for later retrieval with L{Role.getShare}.
This API is slated for deprecation. Prefer L{Role.shareItem} in new code.
@param sharedItem: an item to be shared.
@param toRole: a L{Role} instance which represents the group that has
access to the given item. May not be specified if toName is also
specified.
@param toName: a unicode string which uniquely identifies a L{Role} in the
same store as the sharedItem.
@param shareID: a unicode string. If provided, specify the ID under which
the shared item will be shared.
@param interfaces: a list of Interface objects which specify the methods
and attributes accessible to C{toRole} on C{sharedItem}.
@return: a L{Share} which records the ability of the given role to access
the given item.
"""
warnings.warn("Use Role.shareItem() instead of sharing.shareItem().",
PendingDeprecationWarning,
stacklevel=2)
if toRole is None:
if toName is not None:
toRole = getPrimaryRole(sharedItem.store, toName, True)
else:
toRole = getEveryoneRole(sharedItem.store)
return toRole.shareItem(sharedItem, shareID, interfaces)
def _linearize(interface):
"""
Return a list of all the bases of a given interface in depth-first order.
@param interface: an Interface object.
@return: a L{list} of Interface objects, the input in all its bases, in
subclass-to-base-class, depth-first order.
"""
L = [interface]
for baseInterface in interface.__bases__:
if baseInterface is not Interface:
L.extend(_linearize(baseInterface))
return L
def _commonParent(zi1, zi2):
"""
Locate the common parent of two Interface objects.
@param zi1: a zope Interface object.
@param zi2: another Interface object.
@return: the rightmost common parent of the two provided Interface objects,
or None, if they have no common parent other than Interface itself.
"""
shorter, longer = sorted([_linearize(x)[::-1] for x in zi1, zi2],
key=len)
for n in range(len(shorter)):
if shorter[n] != longer[n]:
if n == 0:
return None
return shorter[n-1]
return shorter[-1]
def _checkConflictingNames(interfaces):
"""
Raise an exception if any of the names present in the given interfaces
conflict with each other.
@param interfaces: a list of Zope Interface objects.
@return: None
@raise ConflictingNames: if any of the attributes of the provided
interfaces are the same, and they do not have a common base interface which
provides that name.
"""
names = {}
for interface in interfaces:
for name in interface:
if name in names:
otherInterface = names[name]
parent = _commonParent(interface, otherInterface)
if parent is None or name not in parent:
raise ConflictingNames("%s conflicts with %s over %s" % (
interface, otherInterface, name))
names[name] = interface
def getShare(store, role, shareID):
"""
Retrieve the accessible facet of an Item previously shared with
L{shareItem}.
This method is pending deprecation, and L{Role.getShare} should be
preferred in new code.
@param store: an axiom store (XXX must be the same as role.store)
@param role: a L{Role}, the primary role for a user attempting to retrieve
the given item.
@return: a L{SharedProxy}. This is a wrapper around the shared item which
only exposes those interfaces explicitly allowed for the given role.
@raise: L{NoSuchShare} if there is no item shared to the given role for the
given shareID.
"""
warnings.warn("Use Role.getShare() instead of sharing.getShare().",
PendingDeprecationWarning,
stacklevel=2)
return role.getShare(shareID)
def asAccessibleTo(role, query):
"""
Return an iterable which yields the shared proxies that are available to
the given role, from the given query.
This method is pending deprecation, and L{Role.asAccessibleTo} should be
preferred in new code.
@param role: The role to retrieve L{SharedProxy}s for.
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
"""
warnings.warn(
"Use Role.asAccessibleTo() instead of sharing.asAccessibleTo().",
PendingDeprecationWarning,
stacklevel=2)
return role.asAccessibleTo(query)
def itemFromProxy(obj):
"""
Retrieve the real, underlying Item based on a L{SharedProxy} object, so
that you can access all of its attributes and methods.
This function is provided because sometimes it's hard to figure out how to
cleanly achieve some behavior, especially running a query which relates to
a shared proxy which you have retrieved. However, if you find yourself
calling it a lot, that's a very bad sign: calling this method is implicitly
a breach of the security that the sharing system tries to provide.
Normally, if your code is acting as an agent of role X, it has access to a
L{SharedProxy} that only provides interfaces explicitly allowed to X. If
you make a mistake and call a method that the user is not supposed to be
able to access, the user will receive an exception rather than be allowed
to violate the system's security constraints.
However, once you have retrieved the underlying item, all bets are off, and
you have to perform your own security checks. This is error-prone, and
should be avoided. We suggest, instead, adding explicitly allowed methods
for performing any queries which your objects need.
@param obj: a L{SharedProxy} instance
@return: the underlying Item instance of the given L{SharedProxy}, with all
of its methods and attributes exposed.
"""
return object.__getattribute__(obj, '_sharedItem')
def unShare(sharedItem):
"""
Remove all instances of this item from public or shared view.
"""
sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()
def randomEarlyShared(store, role):
"""
If there are no explicitly-published public index pages to display, find a
shared item to present to the user as first.
"""
for r in role.allRoles():
share = store.findFirst(Share, Share.sharedTo == r,
sort=Share.storeID.ascending)
if share is not None:
return share.sharedItem
raise NoSuchShare("Why, that user hasn't shared anything at all!")
| twisted/mantissa | xmantissa/sharing.py | Python | mit | 30,864 |
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
import jinja2.ext
import os
class TemplateProvider(object):
_template_dirs = []
def __init__(self, *args, **kwargs):
self._env = self._create_env(*args, **kwargs)
def _create_env(self, *args, **kwargs):
raise NotImplementedError()
def _get_template(self, *args, **kwargs):
raise NotImplementedError()
def render_to_string(self, *args, **kwargs):
raise NotImplementedError()
class JinjaTemplateProvider(TemplateProvider):
def __init__(self, template_dirs, **extra_options):
super(JinjaTemplateProvider, self).__init__(template_dirs,
**extra_options)
def _create_env(self, template_dirs, **extra_options):
env = Environment(loader=FileSystemLoader(template_dirs),
extensions=[jinja2.ext.i18n, jinja2.ext.with_],
autoescape=True, **extra_options)
env.install_gettext_callables(gettext=lambda s: s,
ngettext=lambda s: s)
return env
def get_template(self, template, globals=None, env=None):
env = env or self._env
if isinstance(template, Template):
return template
elif isinstance(template, basestring):
try:
return env.get_template(template, globals=globals)
except TemplateNotFound, e:
raise TemplateNotFound(str(e))
for t in template:
try:
return env.get_template(t, globals=globals)
except TemplateNotFound:
continue
raise TemplateNotFound(template)
def render_to_string(self, template, context=None):
template = self.get_template(template)
context = dict(context or {})
return template.render(context)
def _register_function(self, tag_type, function, name):
assert tag_type in ['global', 'filter']
if name is None:
name = function.__name__
getattr(self._env, {'filter': 'filters',
'global': 'globals'}[tag_type])[name] = function
def register_filter(self, function, name=None):
self._register_function('filter', function, name)
def register_global(self, function, name=None):
self._register_function('global', function, name)
def register_template_dir(self, template_dir):
new_dirs = set(os.listdir(template_dir))
for existing in self._template_dirs:
if new_dirs.intersection(set(os.listdir(existing))):
raise ValueError(
'There are overlapping directories '
'with existing templates: %s' %
new_dirs.intersection(set(os.listdir(template_dir))))
self._template_dirs.append(template_dir)
self._env.loader = FileSystemLoader(self._template_dirs)
| ra2er/templateit | templateit/providers.py | Python | mit | 3,011 |
#!/usr/bin/python
# load all opp file, store in a matrix, and print to files for significant interactions
import os, sys
import psyco
psyco.full()
class PW:
def __init__(self):
self.pw={}
self.pw={}
self.pw={}
self.pw={}
self.opps=[]
return
def get_opp_names(self):
fnames=os.listdir("./")
opp_names=[]
for fname in fnames:
if fname[len(fname)-4:len(fname)] == ".opp":
opp_names.append(fname)
self.opps = opp_names
return
def print_opp_names(self):
for fname in self.opps:
print fname
def load(self):
if not self.opps:
self.get_opp_names()
for fname in self.opps:
#ARG03A0020_010.opp
conf1=fname[:14]
lines = open(fname).readlines()
for line in lines:
if len(line) < 20: continue
fields = line.split()
conf2=fields[1]
ele = float(fields[2])
vdw = float(fields[3])
raw = float(fields[4])
crt = float(fields[5])
if self.pw.has_key(conf1):
self.pw[conf1][conf2]=(ele, vdw, raw, crt)
else:
self.pw[conf1] = {}
self.pw[conf1][conf2]= (ele, vdw, raw, crt)
def filter(self, t):
count = 0
confs=self.pw.keys()
for conf1 in confs:
for conf2 in confs:
if abs(self.pw[conf1][conf2][0]) > t and abs(self.pw[conf1][conf2][1]) > t:
count += 1
return count
if __name__== "__main__":
pwtable = PW()
# pwtable.get_opp_names()
# pwtable.print_opp_names()
pwtable.load()
t=float(sys.argv[1])
print "Number of entries with ele or/and vdw greater than %f is %d" % (t, pwtable.filter(t))
| MarilyGunnersLab/MCCE | mcce_stable/bin/thinopp.py | Python | mit | 1,775 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerLoadBalancingRulesOperations(object):
"""LoadBalancerLoadBalancingRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerLoadBalancingRuleListResult"]
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerLoadBalancingRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.LoadBalancerLoadBalancingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerLoadBalancingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerLoadBalancingRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
load_balancing_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancingRule"
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancingRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.LoadBalancingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_load_balancer_load_balancing_rules_operations.py | Python | mit | 8,924 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import beanstalkc
from flask import Blueprint, current_app, jsonify, g, request
import config
api = Blueprint('api', __name__)
@api.before_request
def before_request():
g.beanstalk = beanstalkc.Connection(
host=config.BEANSTALKD_HOST, port=config.BEANSTALKD_PORT)
@api.route('/push', methods=['POST'])
def push_jobs():
jobs = request.json
for job in jobs:
if job['app_name'] not in config.APPS:
ret = dict(
error='unknown_app_name',
detail='Unknown app name %s' % job['app_name'])
return jsonify(ret), 400
if len(jobs) < 5:
for job in jobs:
if job['app_name'] not in config.APPS:
continue
g.beanstalk.use(config.PUSH_TUBE % job['app_name'])
priority = config.PRIORITIES.get(job.get('priority', 'low'))
delay = job.get('delay', 0)
g.beanstalk.put(json.dumps(job), priority=priority, delay=delay)
else:
g.beanstalk.use(config.BATCH_PUSH_TUBE)
g.beanstalk.put(json.dumps(jobs))
current_app.logger.info(jobs)
return jsonify(dict())
@api.route('/push_stats', methods=['GET'])
def push_stats():
ret = g.beanstalk.stats()
ret['tubes'] = []
for app_name in config.APPS.keys():
try:
ret['tubes'].append(
g.beanstalk.stats_tube(config.PUSH_TUBE % app_name))
except beanstalkc.CommandFailed:
continue
return jsonify(ret)
| BeanYoung/push-turbo | src/api.py | Python | mit | 1,548 |
'''
Created on 1.12.2016
@author: Darren
'''
'''
A group of two or more people wants to meet and minimize the total travel distance. You are given a 2D grid of values 0 or 1, where each 1 marks the home of someone in the group. The distance is calculated using Manhattan Distance, where distance(p1, p2) = |p2.x - p1.x| + |p2.y - p1.y|.
For example, given three people living at (0,0), (0,4), and (2,2):
1 - 0 - 0 - 0 - 1
| | | | |
0 - 0 - 0 - 0 - 0
| | | | |
0 - 0 - 1 - 0 - 0
The point (0,2) is an ideal meeting point, as the total travel distance of 2+2+2=6 is minimal. So return 6.
'''
| darrencheng0817/AlgorithmLearning | Python/leetcode/BestMeetingPoint.py | Python | mit | 610 |
# -*- coding: utf-8 -*-
import math
# Constants taken from http://cesiumjs.org/2013/04/25/Horizon-culling/
radiusX = 6378137.0
radiusY = 6378137.0
radiusZ = 6356752.3142451793
# Stolen from https://github.com/bistromath/gr-air-modes/blob/master/python/mlat.py
# WGS84 reference ellipsoid constants
# http://en.wikipedia.org/wiki/Geodetic_datum#Conversion_calculations
# http://en.wikipedia.org/wiki/File%3aECEF.png
wgs84_a = radiusX # Semi-major axis
wgs84_b = radiusZ # Semi-minor axis
wgs84_e2 = 0.0066943799901975848 # First eccentricity squared
wgs84_a2 = wgs84_a ** 2 # To speed things up a bit
wgs84_b2 = wgs84_b ** 2
def LLH2ECEF(lon, lat, alt):
lat *= (math.pi / 180.0)
lon *= (math.pi / 180.0)
def n(x):
return wgs84_a / math.sqrt(1 - wgs84_e2 * (math.sin(x) ** 2))
x = (n(lat) + alt) * math.cos(lat) * math.cos(lon)
y = (n(lat) + alt) * math.cos(lat) * math.sin(lon)
z = (n(lat) * (1 - wgs84_e2) + alt) * math.sin(lat)
return [x, y, z]
# alt is in meters
def ECEF2LLH(x, y, z):
ep = math.sqrt((wgs84_a2 - wgs84_b2) / wgs84_b2)
p = math.sqrt(x ** 2 + y ** 2)
th = math.atan2(wgs84_a * z, wgs84_b * p)
lon = math.atan2(y, x)
lat = math.atan2(
z + ep ** 2 * wgs84_b * math.sin(th) ** 3,
p - wgs84_e2 * wgs84_a * math.cos(th) ** 3
)
N = wgs84_a / math.sqrt(1 - wgs84_e2 * math.sin(lat) ** 2)
alt = p / math.cos(lat) - N
r = 180 / math.pi
lon *= r
lat *= r
return [lon, lat, alt]
| loicgasser/quantized-mesh-tile | quantized_mesh_tile/llh_ecef.py | Python | mit | 1,533 |
from uiot import *
d("led", "blue", onboardled, "off", "on")
d("button", "forward", d3, 0, 1)
d("button", "back", d4, 0, 1)
run()
| ulno/micropython-extra-ulno | examples/presentation_remote/remote1/copy/autostart.py | Python | mit | 132 |
import unittest
from Hero import Hero
class HeroTest(unittest.TestCase):
def setUp(self):
self.hero = Hero(10, 0)
def test_hero_default(self):
self.assertEqual(
(self.hero.health,
self.hero.magic,
self.hero.x,
self.hero.y), (10, 0, None, None))
def test_setHealth(self):
self.hero.setHealth(-1)
self.assertEqual(self.hero.health, 0)
self.hero.setHealth(5)
self.assertEqual(self.hero.health, 5)
if __name__ == '__main__':
unittest.main()
| vpachedzhi/Hero_Game_Qt | tests.py | Python | mit | 561 |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as anm
#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
plt.close('all')
data = np.loadtxt('solar_system.dat')
data2 = data[:,0:15]
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlim3d([np.min(data2[:,0::3]), np.max(data2[:,0::3])])
ax.set_xlabel('X')
ax.set_ylim3d([np.min(data2[:,1::3]), np.max(data2[:,1::3])])
ax.set_ylabel('Y')
ax.set_zlim3d([np.min(data2[:,2::3]), np.max(data2[:,2::3])])
ax.set_zlabel('Z')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, np.size(data2[0,:])/3))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c)
for c in colors], [])
ax.view_init(30, 0)
data3 = np.reshape(data2,(np.size(data2[0,:])/3,np.size(data2[:,0]),3))
n = 0
for i in np.arange(0,int(np.size(data2[0,:])/3),1):
data3[i,:,0:3] = data2[:,i+n:i+n+3]
n = n + 2
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return pts + lines,
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
#i = (2 * i) % data3.shape[1]
for line, pt, xi in zip(lines, pts, data3):
x, y, z = xi[:i,0:3].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return pts + lines
anim = anm.FuncAnimation(fig, animate, init_func=init,
frames=int(np.size(data2[:,0])), interval=1, blit=True)
writer = anm.writers['ffmpeg'](fps=30)
anim.save('inner_sol_sys.mp4', writer = writer)#, 'ffmpeg_file', fps=15, extra_args=['-vcodec', 'libx264']
| dcelisgarza/applied_math | solar_system/animatep2.py | Python | mit | 1,987 |
#!/usr/bin/env python3
__author__ = "Jeremy Brown"
__copyright__ = "2014 Jeremy Brown"
__license__ = "MIT"
"""
Improved Eurler's Method solver
Inspired by my math homework, which showed me that Euler's Method is a very
repetitive process. I couldn't find a tool that would easily let me solve
using this method and I didn't want to enter a few very similar forumlas 10
times with different values, so I wrote one myself. I also prefer coding to
doing my math homework so this is a compromise.
-----------------------------------------------------------------------------
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
Uses the simpleeval library by Daniel Fairhead for parsing equations.
https://github.com/danthedeckie/simpleeval
Distributed under the MIT License
-----------------------------------------------------------------------------
Usage:
The script prompts for:
- the ODE
- initial value of x
- initial value of y
- value to calculate up to (last value of the table)
- step value (h), the increment of x for each result
and outputs a quick 2-column table of results.
Supported functions:
- Floor
- Ceiling
- Exponential
- Logarithm (natural, base 10, any base)
- Power
- Square root
- Sin, cos, tan
- asin, acos, atan
- sinh, cosh, tanh
- asinh, acosh, atanh
"""
"""
no y' = xy^2 - y/x
"""
from simpleeval.simpleeval import SimpleEval
import math
def function(x, y, formula_string):
"""Evaluate the passed formula using passed variables."""
return evaluator.eval(formula_string)
def func_y_star(x, y, h, formula_string):
"""Calculates the y*(n+1) using the formula and passed variables."""
return y + h * function(x, y, formula_string)
def func_y(x, y, h, formula_string):
"""Calculates the y(n+1) using the formula and passed variables."""
return y + h * (function(x, y, formula_string) + function(x + h, func_y_star(x, y, h, formula_string), formula_string)) / 2
def print_table(results):
"""Prints the presults to the console."""
print("\n---RESULTS---\n")
for r in results:
print(r[0], "\t", r[1])
print()
def prompt_value(message):
"""Prompts the user for a value and converts it to a float"""
val = input(message)
while not val or not (val.isdigit() or is_float(val)):
if not (val.isdigit() or is_float(val)):
print("Invalid input, please enter a valid number")
val = input(message)
return float(val)
def is_float(value):
"""Checks if the specified value is a float"""
try:
float(value)
return True
except ValueError:
return False
supported_functions = {"ceil": math.ceil,
"floor": math.floor,
"factorial": math.factorial,
"exp": math.exp,
"ln": math.log,
"log": math.log,
"log10": math.log10,
"pow": math.pow,
"sqrt": math.sqrt,
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"sinh": math.sinh,
"cosh": math.cosh,
"tanh": math.tanh,
"asinh": math.asinh,
"acosh": math.acosh,
"atanh": math.atanh}
print("\nImproved Euler's Method ODE solver\nCopyright 2014 Jeremy Brown")
formula_string = str(input("\nEnter an ODE (with all operators, incl. *) to be solved: "))
x = prompt_value("Enter an initial x: ")
y = prompt_value("Enter an initial y: ")
MAX = prompt_value("Enter the value to calculate up to: ")
h = prompt_value("Enter the step value (h) to use for the calculation: ")
results = []
results.append([x, y])
evaluator = SimpleEval(names={"x": x, "y": y, "pi": math.pi, "e": math.e}, functions=supported_functions)
while x <= MAX:
y = func_y(x, y, h, formula_string)
x += h
vals = [float("{0:.4f}".format(x)), float("{0:.4f}".format(y))]
results.append(vals)
print_table(results)
| j-bro/improved-eulers-method | euler.py | Python | mit | 4,813 |
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# these are system modules
import commands
import numpy
import os
import sys
# these are my local modules
from env import gidgetConfigVars
import miscIO
import miscTCGA
import path
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
debugON = 0
## debugON = 1
# NOTE: this is a modified script that handles ONLY the microRNAseq data
# from BCGSC
platformStrings = [
'bcgsc.ca/illuminaga_mirnaseq/mirnaseq/',
'bcgsc.ca/illuminahiseq_mirnaseq/mirnaseq/']
dataTypeDict = {}
dataTypeDict["IlluminaGA_miRNASeq"] = ["N", "MIRN"]
dataTypeDict["IlluminaHiSeq_miRNASeq"] = ["N", "MIRN"]
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# from Timo's resegmentation code:
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getLastBit(aName):
ii = len(aName) - 1
while (aName[ii] != '/'):
ii -= 1
# print ' <%s> <%s> ' % ( aName, aName[ii+1:] )
return (aName[ii + 1:])
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def loadNameMap(mapFilename):
metaData = {}
fh = file(mapFilename)
for aLine in fh:
aLine = aLine.strip()
tokenList = aLine.split('\t')
metaData[tokenList[1]] = tokenList[0]
fh.close()
return (metaData)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# hsa-let-7a-2 MIMAT0010195 N:MIRN:hsa-let-7a-2:::::MIMAT0010195
def makeFeatureName(tok0, tok1, metaData):
try:
featName = "N:MIRN:" + metaData[tok1] + ":::::" + tok1
print " all good : ", tok0, tok1, featName
except:
featName = "N:MIRN:" + tok0 + ":::::" + tok1
print " BAD ??? ", tok0, tok1, featName
return (featName)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def makeOutputFilename(outDir, tumorList, zString, outSuffix):
if (len(tumorList) == 1):
zCancer = tumorList[0]
else:
tumorList.sort()
zCancer = tumorList[0]
for aCancer in tumorList[1:]:
zCancer = zCancer + '_' + aCancer
print " --> combined multi-cancer name : <%s> " % zCancer
# start by pasting together the outDir, cancer sub-dir, then '/'
# and then the cancer name again, followed by a '.'
outFilename = outDir + zCancer + "/" + zCancer + "."
# now we are just going to assume that we are writing to the current
# working directory (21dec12)
outFilename = outDir + zCancer + "."
# next we want to replace all '/' in the platform string with '__'
i1 = 0
while (i1 >= 0):
i2 = zString.find('/', i1)
if (i1 > 0 and i2 > 0):
outFilename += "__"
if (i2 > 0):
outFilename += zString[i1:i2]
i1 = i2 + 1
else:
i1 = i2
# and finally we add on the suffix (usually something like '25jun')
if (not outSuffix.startswith(".")):
outFilename += "."
outFilename += outSuffix
return (outFilename)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
# list of cancer directory names
cancerDirNames = [
'acc', 'blca', 'brca', 'cesc', 'cntl', 'coad', 'dlbc', 'esca', 'gbm',
'hnsc', 'kich', 'kirc', 'kirp', 'laml', 'lcll', 'lgg', 'lihc', 'lnnh',
'luad', 'lusc', 'ov', 'paad', 'prad', 'read', 'sarc', 'skcm', 'stad',
'thca', 'ucec', 'lcml', 'pcpg', 'meso', 'tgct', 'ucs' ]
if (1):
if (len(sys.argv) < 4):
print " Usage: %s <outSuffix> <platformID> <tumorType#1> [tumorType#2 ...] [snapshot-name]"
print " currently supported platforms : ", platformStrings
print " currently supported tumor types : ", cancerDirNames
print " ERROR -- bad command line arguments "
sys.exit(-1)
else:
# output suffix ...
outSuffix = sys.argv[1]
# specified platform ...
platformID = sys.argv[2]
if (platformID[-1] != '/'):
platformID += '/'
if (platformID not in platformStrings):
print " platform <%s> is not supported " % platformID
print " currently supported platforms are: ", platformStrings
sys.exit(-1)
platformStrings = [platformID]
# assume that the default snapshotName is "dcc-snapshot"
snapshotName = "dcc-snapshot"
# specified tumor type(s) ...
argList = sys.argv[3:]
# print argList
tumorList = []
for aType in argList:
tumorType = aType.lower()
if (tumorType in cancerDirNames):
tumorList += [tumorType]
elif (tumorType.find("snap") >= 0):
snapshotName = tumorType
print " using this snapshot : <%s> " % snapshotName
else:
print " ERROR ??? tumorType <%s> not in list of known tumors ??? " % tumorType
print cancerDirNames
if (len(tumorList) < 1):
print " ERROR ??? have no tumor types in list ??? ", tumorList
sys.exit(-1)
print " tumor type(s) list : ", tumorList
# --------------------------------------
# HERE is where the real work starts ...
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# now we need to get set up for writing the output ...
# NEW: 21dec12 ... assuming that we will write to current working directory
outDir = "./"
outFilename = makeOutputFilename(
outDir, tumorList, platformID, outSuffix)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# initialize a bunch of things ...
sampleList = []
gotFiles = []
geneList = []
numGenes = 0
numProc = 0
iS = 0
# and then loop over tumor types ...
for zCancer in tumorList:
print ' '
print ' ********************************** '
print ' LOOP over %d CANCER TYPES ... %s ' % (len(tumorList), zCancer)
# piece together the directory name ...
## topDir = gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/dcc-snapshot/public/tumor/" + zCancer + "/cgcc/" + platformID
topDir = gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/" + \
snapshotName + "/public/tumor/" + zCancer + "/cgcc/" + platformID
print ' starting from top-level directory ', topDir
dMatch = "Level_3"
if (not os.path.exists(topDir)):
print ' --> <%s> does not exist ' % topDir
continue
d1 = path.path(topDir)
for dName in d1.dirs():
print dName
if (dName.find(dMatch) >= 0):
print ' '
print ' found a <%s> directory : <%s> ' % (dMatch, dName)
archiveName = getLastBit(dName)
print ' archiveName : ', archiveName
if (dName.find("IlluminaHiSeq") > 0):
zPlat = "IlluminaHiSeq_miRNASeq"
elif (dName.find("IlluminaGA") > 0):
zPlat = "IlluminaGA_miRNASeq"
else:
print " not a valid platform: %s ??? !!! " % (dName)
sys.exit(-1)
cmdString = "%s/shscript/expression_matrix_mimat.pl " % gidgetConfigVars['TCGAFMP_ROOT_DIR']
cmdString += "-m " + gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/mirna_bcgsc/tcga_mirna_bcgsc_hg19.adf "
cmdString += "-o %s " % outDir
cmdString += "-p %s " % topDir
cmdString += "-n %s " % zPlat
print " "
print cmdString
print " "
(status, output) = commands.getstatusoutput(cmdString)
normMatFilename = outDir + "/expn_matrix_mimat_norm_%s.txt" % (zPlat)
print " normMatFilename = <%s> " % normMatFilename
# make sure that we can open this file ...
try:
fh = file(normMatFilename, 'r')
gotFiles += [normMatFilename]
fh.close()
except:
print " "
print " Not able to open expn_matrix_mimat_norm file ??? "
print " "
sys.exit(-1)
print " "
print " "
if (len(gotFiles) == 0):
print " ERROR in new_Level3_miRNAseq ... no data files found "
sys.exit(-1)
if (len(gotFiles) > 1):
print " ERROR ??? we should have only one file at this point "
print gotFiles
sys.exit(-1)
# if we get this far, we should make sure that the output directory we
# want exists
print " --> testing that we have an output directory ... <%s> " % outDir
tsvIO.createDir(outDir)
print " output file name will be called <%s> " % outFilename
# we also need to read in the mapping file ...
metaData = loadNameMap(
gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/mirna_bcgsc/mature.fa.flat.human.mirbase_v19.txt")
if (1):
fh = file(gotFiles[0], 'r')
numRow = miscIO.num_lines(fh) - 1
numCol = miscIO.num_cols(fh, '\t') - 1
rowLabels = []
dataMatrix = [0] * numRow
for iR in range(numRow):
dataMatrix[iR] = [0] * numCol
hdrLine = fh.readline()
hdrLine = hdrLine.strip()
hdrTokens = hdrLine.split('\t')
if (len(hdrTokens) != (numCol + 1)):
print " ERROR #1 ??? "
sys.exit(-1)
done = 0
iR = 0
numNA = 0
while (not done):
aLine = fh.readline()
aLine = aLine.strip()
tokenList = aLine.split('\t')
if (len(tokenList) != (numCol + 1)):
done = 1
else:
aLabel = tokenList[0]
# print " label = <%s> " % aLabel
labelTokens = aLabel.split('.')
# print labelTokens
featName = makeFeatureName(
labelTokens[0], labelTokens[1], metaData)
# print featName
rowLabels += [featName]
for iC in range(numCol):
try:
fVal = float(tokenList[iC + 1])
dataMatrix[iR][iC] = fVal
except:
dataMatrix[iR][iC] = NA_VALUE
numNA += 1
iR += 1
print " iR=%d numNA=%d " % (iR, numNA)
dataD = {}
dataD['rowLabels'] = rowLabels
dataD['colLabels'] = hdrTokens[1:]
dataD['dataMatrix'] = dataMatrix
dataD['dataType'] = "N:MIRN"
print ' writing out data matrix to ', outFilename
newFeatureName = "C:SAMP:mirnPlatform:::::seq"
newFeatureValue = zPlat
dataD = tsvIO.addConstFeature(dataD, newFeatureName, newFeatureValue)
sortRowFlag = 0
sortColFlag = 0
tsvIO.writeTSV_dataMatrix(
dataD, sortRowFlag, sortColFlag, outFilename)
print ' '
print ' DONE !!! '
print ' '
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
| cancerregulome/gidget | commands/feature_matrix_construction/main/new_Level3_miRNAseq.py | Python | mit | 11,860 |
# -*- coding: utf-8 -*-
from datetime import timedelta
from workalendar.core import WesternCalendar, ChristianMixin
class Cyprus(WesternCalendar, ChristianMixin):
"Cyprus"
include_epiphany = True
include_clean_monday = True
include_good_friday = True
include_easter_saturday = True
include_easter_sunday = True
include_easter_monday = True
include_whit_monday = True
whit_monday_label = 'Pentecost Monday'
include_christmas_eve = True
include_christmas_day = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(3, 25, "Greek Independence Day"),
(4, 1, "Cyprus National Day"),
(5, 1, "Labour Day"),
(7, 15, "Dormition of the Theotokos"),
(10, 1, "Cyprus Independence Day"),
(10, 28, "Greek National Day"),
)
def get_variable_days(self, year):
days = super(Cyprus, self).get_variable_days(year)
days.append((self.get_easter_monday(year) +
timedelta(days=1), "Easter Tuesday"))
return days
| gregn610/workalendar | workalendar/europe/cyprus.py | Python | mit | 1,077 |
import psycopg2
def pg_connect(
host,
port,
user,
password,
database):
"""
Small connection class
If no password is suplied we try the default postgres user
and expect a setted pg_ident or something similar
"""
#if DEBUG: keep("Model().pg_connect()")
try:
if password:
conn = psycopg2.connect(
database=database,
user=user,
port=port,
password=str(password),
host=host
#connection_factory = psycopg2.extras.DictConnection
)
else:
conn = psycopg2.connect(
database=database,
user=user,
port=port,
password="",
host=None
#connection_factory = psycopg2.extras.DictConnection
)
except psycopg2.Error, psy_err:
print "The connection is not possible!"
print psy_err
print psycopg2.Error
if host is None:
raise psy_err
conn.set_isolation_level(0)
return conn
def pg_get_data(connection, query):
''' Just a general method to fetch the date for different queries '''
#if DEBUG: keep("Model().pg_get_data()")
cur = connection.cursor()
cur.execute(query)
data = cur.fetchall()
column_headers = [desc[0] for desc in cur.description]
return column_headers, data
| ragged/yapgt | utils/connect.py | Python | mit | 1,481 |
from binascii import hexlify
from struct import pack, unpack
import hashlib
import time
import sys
import traceback
import electrum_dgb as electrum
from electrum_dgb.bitcoin import EncodeBase58Check, DecodeBase58Check, TYPE_ADDRESS, int_to_hex, var_int
from electrum_dgb.i18n import _
from electrum_dgb.plugins import BasePlugin, hook
from electrum_dgb.keystore import Hardware_KeyStore, parse_xpubkey
from ..hw_wallet import HW_PluginBase
from electrum_dgb.util import format_satoshis_plain, print_error
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
btchip.setAlternateCoinVersions = setAlternateCoinVersions
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError:
BTCHIP = False
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
try:
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
if len(lastChild) == 1:
childnum = int(lastChild[0])
else:
childnum = 0x80000000 | int(lastChild[0])
xpub = "0488B21E".decode('hex') + chr(depth) + self.i4b(fingerprint) + self.i4b(childnum) + str(nodeData['chainCode']) + str(publicKey)
except Exception, e:
#self.give_error(e, True)
return None
finally:
#self.handler.clear_dialog()
pass
return EncodeBase58Check(xpub)
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException, e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException, e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def perform_hw1_preflight(self):
try:
firmware = self.dongleObject.getFirmwareVersion()['version'].split(".")
if not checkFirmware(firmware):
self.dongleObject.dongle.close()
raise Exception("HW1 firmware version too old. Please update at https://www.ledgerwallet.com")
try:
self.dongleObject.getOperationMode()
except BTChipException, e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler <> None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts <> 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException, e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00):
raise BaseException("Device not in Bitcoin mode")
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for %s') % self.device)
def sign_message(self, sequence, message, password):
self.signing = True
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...")
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException, e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return ''
except Exception, e:
self.give_error(e, True)
finally:
self.handler.clear_dialog()
self.signing = False
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
r = str(r)
s = str(s)
# And convert it
return chr(27 + 4 + (signature[0] & 0x01)) + r + s
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
self.signing = True
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
p2shTransaction = False
reorganize = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin.get('is_coinbase'):
self.give_error("Coinbase not supported") # should never happen
if len(txin['pubkeys']) > 1:
p2shTransaction = True
for i, x_pubkey in enumerate(txin['x_pubkeys']):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
inputs.append([txin['prev_tx'].raw, txin['prevout_n'], txin.get('redeemScript'), txin['prevout_hash'], signingPos ])
inputsPaths.append(hwAddress)
pubKeys.append(txin['pubkeys'])
# Sanity check
if p2shTransaction:
for txinput in tx.inputs():
if len(txinput['pubkeys']) < 2:
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)/2)
txOutput += script
txOutput = txOutput.decode('hex')
# Recognize outputs - only one output and one change is authorized
if not p2shTransaction:
if len(tx.outputs()) > 2: # should never happen
self.give_error("Transaction with more than 2 outputs not supported")
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
changePath = self.get_derivation()[2:] + "/%d/%d"%index
changeAmount = amount
else:
output = address
outputAmount = amount
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
if not p2shTransaction:
txtmp = bitcoinTransaction(bytearray(utxo[0].decode('hex')))
chipInputs.append(self.get_client().getTrustedInput(txtmp, utxo[1]))
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = utxo[3].decode('hex')[::-1].encode('hex')
tmp += int_to_hex(utxo[1], 4)
chipInputs.append({'value' : tmp.decode('hex')})
redeemScripts.append(bytearray(utxo[2].decode('hex')))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize()
self.get_client().enableAlternate2fa(False)
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
if not p2shTransaction:
outputData = self.get_client().finalizeInput(output, format_satoshis_plain(outputAmount),
format_satoshis_plain(tx.get_fee()), changePath, bytearray(rawTx.decode('hex')))
reorganize = True
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.clear_dialog()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.clear_dialog()
# Reformat transaction
inputIndex = 0
while inputIndex < len(inputs):
if p2shTransaction:
signaturesPack = [signatures[inputIndex]] * len(pubKeys[inputIndex])
inputScript = get_p2sh_input_script(redeemScripts[inputIndex], signaturesPack)
preparedTrustedInputs.append([ ("\x00" * 4) + chipInputs[inputIndex]['value'], inputScript ])
else:
inputScript = get_regular_input_script(signatures[inputIndex], pubKeys[inputIndex][0].decode('hex'))
preparedTrustedInputs.append([ chipInputs[inputIndex]['value'], inputScript ])
inputIndex = inputIndex + 1
updatedTransaction = format_transaction(transactionOutput, preparedTrustedInputs)
updatedTransaction = hexlify(updatedTransaction)
if reorganize:
tx.update(updatedTransaction)
else:
tx.update_signatures(updatedTransaction)
self.signing = False
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def btchip_is_connected(self, keystore):
try:
self.get_client(keystore).getFirmwareVersion()
except Exception as e:
return False
return True
def get_btchip_device(self, device):
ledger = False
if (device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c) or (device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c) or (device.product_key[0] == 0x2c97):
ledger = True
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
self.handler = handler
client = self.get_btchip_device(device)
if client <> None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
#client.handler = wizard
client.handler = self.create_handler(wizard)
#client.get_xpub('m')
client.get_xpub("m/44'/0'") # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
#client.handler = wizard
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation)
return xpub
def get_client(self, wallet, force_pair=True, noPin=False):
aborted = False
client = self.client
if not client or client.bad:
try:
d = getDongle(BTCHIP_DEBUG)
client = btchip(d)
ver = client.getFirmwareVersion()
firmware = ver['version'].split(".")
wallet.canAlternateCoinVersions = (ver['specialVersion'] >= 0x20 and
map(int, firmware) >= [1, 0, 1])
if not checkFirmware(firmware):
d.close()
try:
updateFirmware()
except Exception, e:
aborted = True
raise e
d = getDongle(BTCHIP_DEBUG)
client = btchip(d)
try:
client.getOperationMode()
except BTChipException, e:
if (e.sw == 0x6985):
d.close()
dialog = StartBTChipPersoDialog()
dialog.exec_()
# Then fetch the reference again as it was invalidated
d = getDongle(BTCHIP_DEBUG)
client = btchip(d)
else:
raise e
if not noPin:
# Immediately prompts for the PIN
remaining_attempts = client.getVerifyPinRemainingAttempts()
if remaining_attempts <> 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = wallet.password_dialog(msg)
if not confirmed:
aborted = True
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
client.verifyPin(pin)
if wallet.canAlternateCoinVersions:
client.setAlternateCoinVersions(30, 5)
except BTChipException, e:
try:
client.dongle.close()
except:
pass
client = None
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
raise e
except Exception, e:
try:
client.dongle.close()
except:
pass
client = None
if not aborted:
raise Exception("Could not connect to your Ledger wallet. Please verify access permissions, PIN, or unplug the dongle and plug it again")
else:
raise e
client.bad = False
wallet.device_checked = False
wallet.proper_device = False
self.client = client
return self.client | protonn/Electrum-Cash | plugins/ledger/ledger.py | Python | mit | 22,169 |
# -*- coding: utf-8 -*-
"""
Facade patterns
"""
class Power(object):
"""
SubSystem
"""
@staticmethod
def start():
print "Power on!"
class Display(object):
"""
Subsystem
"""
@staticmethod
def start():
print "Display on!"
class Client(object):
"""
Facade
"""
def __init__(self):
self.power = Power()
self.display = Display()
self.components = [self.power, self.display]
def start_all(self):
for _component in self.components:
_component.start()
if __name__ == '__main__':
client = Client()
client.start_all()
| xuwei0455/design_patterns | Facade.py | Python | mit | 645 |
#!/usr/bin/env python
import obd_io
import serial
import platform
import obd_sensors
from datetime import datetime
import time
from obd_utils import scanSerial
class OBD_Capture():
def __init__(self):
self.supportedSensorList = []
self.port = None
localtime = time.localtime(time.time())
def connect(self):
portnames = scanSerial()
print portnames
for port in portnames:
self.port = obd_io.OBDPort(port, None, 2, 2)
if(self.port.State == 0):
self.port.close()
self.port = None
else:
break
if(self.port):
print "Connected to "+self.port.port.name
def is_connected(self):
return self.port
def getSupportedSensorList(self):
return self.supportedSensorList
def capture_data(self):
text = ""
#Find supported sensors - by getting PIDs from OBD
# its a string of binary 01010101010101
# 1 means the sensor is supported
self.supp = self.port.sensor(0)[1]
self.supportedSensorList = []
self.unsupportedSensorList = []
# loop through PIDs binary
for i in range(0, len(self.supp)):
if self.supp[i] == "1":
# store index of sensor and sensor object
self.supportedSensorList.append([i+1, obd_sensors.SENSORS[i+1]])
else:
self.unsupportedSensorList.append([i+1, obd_sensors.SENSORS[i+1]])
for supportedSensor in self.supportedSensorList:
text += "supported sensor index = " + str(supportedSensor[0]) + " " + str(supportedSensor[1].shortname) + "\n"
time.sleep(3)
if(self.port is None):
return None
#Loop until Ctrl C is pressed
localtime = datetime.now()
current_time = str(localtime.hour)+":"+str(localtime.minute)+":"+str(localtime.second)+"."+str(localtime.microsecond)
#log_string = current_time + "\n"
text = current_time + "\n"
results = {}
for supportedSensor in self.supportedSensorList:
sensorIndex = supportedSensor[0]
(name, value, unit) = self.port.sensor(sensorIndex)
text += name + " = " + str(value) + " " + str(unit) + "\n"
results[obd_sensors.SENSORS[sensorIndex].shortname] = str(value)+" "+str(unit);
self.allSensorData = results
return text
if __name__ == "__main__":
o = OBD_Capture()
o.connect()
time.sleep(3)
if not o.is_connected():
print "Not connected"
else:
o.capture_data()
| iclwy524/asuradaProject | sensor/pyobd/obd_capture.py | Python | mit | 2,641 |
def mutate_string(string, position, character):
p=list(s)
p[int(position)]=c
p=''.join(p)
return p
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
| manishbisht/Competitive-Programming | Hackerrank/Practice/Python/3.string/17.Mutations.py | Python | mit | 268 |
#!/usr/bin/env python
import pywikibot
from pywikibot.data import api
site = pywikibot.Site()
reason = '[[WP:CSD#U2|U2]]: Userpage or subpage of a nonexistent user'
def user_does_not_exist(name):
req = api.Request(action='query', list='users', ususers=name)
data = req.submit()
return 'missing' in data['query']['users'][0]
countok = 0
countbad = 0
gen = site.newpages(showBot=True, namespaces=[3], returndict=True, user='EdwardsBot')
for page, pagedict in gen:
username = page.title(withNamespace=False)
if user_does_not_exist(username):
print page
page.delete(reason, prompt=False)
countbad += 1
else:
countok += 1
print 'BAD: {0}'.format(countbad)
print 'OK: {0}'.format(countok)
| legoktm/pywikipedia-scripts | clean_up_mess.py | Python | mit | 747 |
import os
from coverage_reporter.options import Option, OptionList
DEFAULT_PLUGIN_DIRS = [ os.path.dirname(__file__) + '/collectors/',
os.path.dirname(__file__) + '/filters/',
os.path.dirname(__file__) + '/reports/',
]
DEFAULT_PLUGINS = [ 'figleaf_collector',
'coverage_collector',
'patch',
'exclude',
'minimum',
'summarize',
'annotate',
]
DEFAULT_CONFIG_PATHS = ['/etc/coverage_reporter', os.path.expanduser('~/.coverage_reporter'), '.coverage_reporter']
DEFAULT_OPTIONS = OptionList([ Option('skip_default_config', 'boolean'),
Option('config_file', 'list', default=[]),
Option('plugin_dir', 'list', default=[]),
Option('plugin', 'list', default=[]),
Option('disable_plugin', 'list', default=[]) ])
| dugan/coverage-reporter | coverage_reporter/defaults.py | Python | mit | 1,043 |
# flake8: noqa
"""
Tools for object-modeling OSC responses received from ``scsynth``.
"""
from .bases import Request, RequestBundle, Requestable, Response
from .buffers import (
BufferAllocateReadChannelRequest,
BufferAllocateReadRequest,
BufferAllocateRequest,
BufferCloseRequest,
BufferCopyRequest,
BufferFillRequest,
BufferFreeRequest,
BufferGenerateRequest,
BufferGetContiguousRequest,
BufferGetRequest,
BufferInfoResponse,
BufferNormalizeRequest,
BufferQueryRequest,
BufferReadChannelRequest,
BufferReadRequest,
BufferSetContiguousRequest,
BufferSetContiguousResponse,
BufferSetRequest,
BufferSetResponse,
BufferWriteRequest,
BufferZeroRequest,
)
from .buses import (
ControlBusFillRequest,
ControlBusGetContiguousRequest,
ControlBusGetRequest,
ControlBusSetContiguousRequest,
ControlBusSetContiguousResponse,
ControlBusSetRequest,
ControlBusSetResponse,
)
from .groups import (
GroupDeepFreeRequest,
GroupFreeAllRequest,
GroupNewRequest,
GroupQueryTreeRequest,
ParallelGroupNewRequest,
QueryTreeResponse,
)
from .movement import (
GroupHeadRequest,
GroupTailRequest,
MoveRequest,
NodeAfterRequest,
NodeBeforeRequest,
)
from .nodes import (
NodeFreeRequest,
NodeInfoResponse,
NodeMapToAudioBusRequest,
NodeMapToControlBusRequest,
NodeQueryRequest,
NodeRunRequest,
NodeSetContiguousResponse,
NodeSetRequest,
NodeSetResponse,
)
from .server import (
ClearScheduleRequest,
DoneResponse,
DumpOscRequest,
FailResponse,
NothingRequest,
NotifyRequest,
QuitRequest,
StatusRequest,
StatusResponse,
SyncRequest,
SyncedResponse,
)
from .synthdefs import (
SynthDefFreeAllRequest,
SynthDefFreeRequest,
SynthDefLoadDirectoryRequest,
SynthDefLoadRequest,
SynthDefReceiveRequest,
SynthDefRemovedResponse,
)
from .synths import SynthNewRequest, TriggerResponse
__all__ = [
"BufferAllocateReadChannelRequest",
"BufferAllocateReadRequest",
"BufferAllocateRequest",
"BufferCloseRequest",
"BufferCopyRequest",
"BufferFillRequest",
"BufferFreeRequest",
"BufferGenerateRequest",
"BufferGetContiguousRequest",
"BufferGetRequest",
"BufferInfoResponse",
"BufferNormalizeRequest",
"BufferQueryRequest",
"BufferReadChannelRequest",
"BufferReadRequest",
"BufferSetContiguousRequest",
"BufferSetContiguousResponse",
"BufferSetRequest",
"BufferSetResponse",
"BufferWriteRequest",
"BufferZeroRequest",
"ClearScheduleRequest",
"ControlBusFillRequest",
"ControlBusGetContiguousRequest",
"ControlBusGetRequest",
"ControlBusSetContiguousRequest",
"ControlBusSetContiguousResponse",
"ControlBusSetRequest",
"ControlBusSetResponse",
"DoneResponse",
"DumpOscRequest",
"FailResponse",
"GroupDeepFreeRequest",
"GroupDumpTreeRequest",
"GroupFreeAllRequest",
"GroupHeadRequest",
"GroupNewRequest",
"GroupQueryTreeRequest",
"GroupTailRequest",
"MoveRequest",
"NodeAfterRequest",
"NodeBeforeRequest",
"NodeCommandRequest",
"NodeFillRequest",
"NodeFreeRequest",
"NodeInfoResponse",
"NodeMapToAudioBusRequest",
"NodeMapToControlBusRequest",
"NodeOrderRequest",
"NodeQueryRequest",
"NodeRunRequest",
"NodeSetContiguousRequest",
"NodeSetContiguousResponse",
"NodeSetRequest",
"NodeSetResponse",
"NodeTraceRequest",
"NothingRequest",
"NotifyRequest",
"ParallelGroupNewRequest",
"QueryTreeResponse",
"QuitRequest",
"Request",
"RequestBundle",
"Requestable",
"Response",
"StatusRequest",
"StatusResponse",
"SyncRequest",
"SyncedResponse",
"SynthDefFreeAllRequest",
"SynthDefFreeRequest",
"SynthDefLoadDirectoryRequest",
"SynthDefLoadRequest",
"SynthDefReceiveRequest",
"SynthDefRemovedResponse",
"SynthNewRequest",
"TriggerResponse",
]
| josiah-wolf-oberholtzer/supriya | supriya/commands/__init__.py | Python | mit | 4,060 |
from Qt import QtWidgets, QtCore, QtGui
import math
from . import resource
from .exc import *
class ControlLayout(QtWidgets.QGridLayout):
def __init__(self, columns=1, parent=None):
super(ControlLayout, self).__init__(parent)
self._columns = columns
self.setContentsMargins(20, 20, 20, 20)
self.setHorizontalSpacing(20)
self.setRowStretch(1000, 1)
self.widgets = []
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
self._columns = value
widgets = list(self.widgets)
for w in widgets:
self.takeWidget(w)
for w in widgets:
self.addWidget(w)
@property
def count(self):
return len(self.widgets)
def takeWidget(self, widget):
if widget not in self.widgets:
return None
self.widgets.pop(self.widgets.index(widget))
self.takeAt(self.indexOf(widget))
return widget
def addWidget(self, widget):
count = self.count
row = math.floor(count / self.columns)
column = (count % self.columns)
super(ControlLayout, self).addWidget(widget, row, column)
self.widgets.append(widget)
class FormWidget(QtWidgets.QWidget):
def __init__(self, name, columns=1, layout_horizontal=False, parent=None):
super(FormWidget, self).__init__(parent)
self.name = name
self.controls = {}
self.forms = {}
self.parent = parent
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
if layout_horizontal:
self.form_layout = QtWidgets.QBoxLayout(
QtWidgets.QBoxLayout.LeftToRight
)
else:
self.form_layout = QtWidgets.QBoxLayout(
QtWidgets.QBoxLayout.TopToBottom
)
self.form_layout.setContentsMargins(0, 0, 0, 0)
self.form_layout.setSpacing(0)
self.control_layout = ControlLayout(columns=columns)
self.form_layout.addLayout(self.control_layout)
self.layout.addLayout(self.form_layout)
self.setProperty('form', True)
self.setAttribute(QtCore.Qt.WA_StyledBackground, True)
@property
def valid(self):
is_valid = []
for name, control in self.controls.iteritems():
control.validate()
is_valid.append(control.valid)
for name, form in self.forms.iteritems():
is_valid.append(form.valid)
return all(is_valid)
def get_value(self, flatten=False):
'''Get the value of this forms fields and subforms fields.
:param flatten: If set to True, return a flattened dict
'''
form_data = {}
for name, control in self.controls.iteritems():
form_data[name] = control.get_value()
for name, form in self.forms.iteritems():
form_value = form.get_value(flatten=flatten)
if flatten:
form_data.update(form_value)
else:
form_data[name] = form_value
return form_data
def set_value(self, strict=True, **data):
'''Set the value of all the forms subforms and fields. You can pass
an additional keyword argument strict to False to ignore mismatched
names and subforms.
:param strict: raise exceptions for any invalid names in data
:param data: Field data used to set the values of the form
usage::
myform.set_value(
strict=True,
**{
'strfield': 'ABCDEFG',
'intfield': 1,
'subform': {
'subform_strfield': 'BCDEFGH',
'subform_intfield': 2,}},
)
'''
for name, value in data.iteritems():
if isinstance(value, dict):
try:
self.forms[name].set_value(**value)
except KeyError:
if strict:
raise FormNotFound(name + ' does not exist')
continue
try:
self.controls[name].set_value(value)
except KeyError:
if strict:
raise FieldNotFound(name + ' does not exist')
def add_header(self, title, description=None, icon=None):
'''Add a header'''
self.header = Header(title, description, icon, self)
self.layout.insertWidget(0, self.header)
def add_form(self, name, form):
'''Add a subform'''
self.form_layout.addWidget(form)
self.forms[name] = form
setattr(self, name, form)
def add_control(self, name, control):
'''Add a control'''
self.control_layout.addWidget(control.main_widget)
self.controls[name] = control
setattr(self, name, control)
class FormDialog(QtWidgets.QDialog):
def __init__(self, widget, *args, **kwargs):
super(FormDialog, self).__init__(*args, **kwargs)
self.widget = widget
self.cancel_button = QtWidgets.QPushButton('&cancel')
self.accept_button = QtWidgets.QPushButton('&accept')
self.cancel_button.clicked.connect(self.reject)
self.accept_button.clicked.connect(self.on_accept)
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setRowStretch(1, 1)
self.setLayout(self.layout)
self.button_layout = QtWidgets.QHBoxLayout()
self.button_layout.setContentsMargins(20, 20, 20, 20)
self.button_layout.addWidget(self.cancel_button)
self.button_layout.addWidget(self.accept_button)
self.layout.addWidget(self.widget, 0, 0)
self.layout.addLayout(self.button_layout, 2, 0)
def __getattr__(self, attr):
try:
return getattr(self.widget, attr)
except AttributeError:
raise AttributeError('FormDialog has no attr: {}'.format(attr))
def on_accept(self):
if self.widget.valid:
self.accept()
return
class FormGroup(QtWidgets.QWidget):
toggled = QtCore.Signal(bool)
def __init__(self, widget, *args, **kwargs):
super(FormGroup, self).__init__(*args, **kwargs)
self.widget = widget
self.widget.setProperty('groupwidget', True)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setStretch(1, 0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
self.title = QtWidgets.QPushButton(self.widget.name)
icon = QtGui.QIcon()
icon.addPixmap(
QtGui.QPixmap(':/icons/plus'),
QtGui.QIcon.Normal,
QtGui.QIcon.Off
)
icon.addPixmap(
QtGui.QPixmap(':/icons/minus'),
QtGui.QIcon.Normal,
QtGui.QIcon.On
)
self.title.setIcon(icon)
self.title.setProperty('grouptitle', True)
self.title.setCheckable(True)
self.title.setChecked(True)
self.title.toggled.connect(self.toggle_collapsed)
self.layout.addWidget(self.title)
self.layout.addWidget(self.widget)
def set_enabled(self, value):
self.title.blockSignals(True)
self.title.setChecked(value)
self.widget.setVisible(value)
self.title.blockSignals(False)
def toggle_collapsed(self, collapsed):
self.toggled.emit(collapsed)
self.widget.setVisible(self.title.isChecked())
def __getattr__(self, attr):
try:
return getattr(self.widget, attr)
except AttributeError:
raise AttributeError('FormDialog has no attr: {}'.format(attr))
class Header(QtWidgets.QWidget):
def __init__(self, title, description=None, icon=None, parent=None):
super(Header, self).__init__(parent)
self.grid = QtWidgets.QGridLayout()
self.setLayout(self.grid)
self.title = QtWidgets.QLabel(title)
self.title.setProperty('title', True)
if description:
self.descr = QtWidgets.QLabel(description)
self.descr.setProperty('description', True)
self.descr.setAlignment(QtCore.Qt.AlignCenter)
if icon:
self.icon = QtWidgets.QLabel()
self.icon.setPixmap(icon)
self.grid.addWidget(self.icon, 0, 0)
self.grid.addWidget(self.title, 0, 1)
self.grid.addWidget(self.descr, 1, 0, 1, 2)
else:
self.grid.addWidget(self.title, 0, 0)
self.grid.addWidget(self.descr, 1, 0)
self.title.setAlignment(QtCore.Qt.AlignCenter)
self.setProperty('header', True)
self.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self._mouse_button = None
self._mouse_last_pos = None
def mousePressEvent(self, event):
self._mouse_button = event.button()
super(Header, self).mousePressEvent(event)
self._window = self.window()
def mouseMoveEvent(self, event):
'''Click + Dragging moves window'''
if self._mouse_button == QtCore.Qt.LeftButton:
if self._mouse_last_pos:
p = self._window.pos()
v = event.globalPos() - self._mouse_last_pos
self._window.move(p + v)
self._mouse_last_pos = event.globalPos()
super(Header, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self._mouse_button = None
self._mouse_last_pos = None
self._window = None
super(Header, self).mouseReleaseEvent(event)
class ScalingImage(QtWidgets.QLabel):
__images = {}
def __init__(self, image=None, parent=None):
super(ScalingImage, self).__init__(parent)
self.images = self.__images
if not image:
image = ':/images/noimg'
self.set_image(image)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
def set_image(self, image):
if image not in self.images:
if not isinstance(image, QtGui.QImage):
if not QtCore.QFile.exists(image):
return
self.img = QtGui.QImage(image)
self.images[image] = self.img
else:
self.img = self.images[image]
self.setMinimumSize(227, 128)
self.scale_pixmap()
self.repaint()
def scale_pixmap(self):
scaled_image = self.img.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatioByExpanding,
QtCore.Qt.FastTransformation)
self.pixmap = QtGui.QPixmap(scaled_image)
def resizeEvent(self, event):
self.do_resize = True
super(ScalingImage, self).resizeEvent(event)
def paintEvent(self, event):
if self.do_resize:
self.scale_pixmap()
self.do_resize = False
offsetX = -((self.pixmap.width() - self.width()) * 0.5)
offsetY = -((self.pixmap.height() - self.height()) * 0.5)
painter = QtGui.QPainter(self)
painter.drawPixmap(offsetX, offsetY, self.pixmap)
class IconButton(QtWidgets.QPushButton):
'''A button with an icon.
:param icon: path to icon file or resource
:param tip: tooltip text
:param name: object name
:param size: width, height tuple (default: (32, 32))
'''
def __init__(self, icon, tip, name, size=(32, 32), *args, **kwargs):
super(IconButton, self).__init__(*args, **kwargs)
self.setObjectName(name)
self.setIcon(QtGui.QIcon(icon))
self.setIconSize(QtCore.QSize(*size))
self.setSizePolicy(
QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
self.setFixedHeight(size[0])
self.setFixedWidth(size[1])
self.setToolTip(tip)
| danbradham/psforms | psforms/widgets.py | Python | mit | 12,100 |
import re
_regex_default = re.compile('\S+')
def parse_save_file(filedata):
detectors, sources = [], []
for line in filedata:
if line.startswith('[SOURCE]'):
data = _regex_default.findall(line[len('[SOURCE]'):])
name, isotope, m, d_cover, mat_cover, pos, activity = data
pos = (0, 0, float(pos))
if m.lower() == 'none':
m = None
activity = float(activity)
else:
activity = None
m = float(m)
d_cover = float(d_cover)
args = name, isotope, m, d_cover, mat_cover, pos, activity
sources.append(args)
elif line.startswith('[DETECTOR]'):
data = _regex_default.findall(line[len('[DETECTOR]'):])
name, pos, q, d, mat, d_cover, mat_cover = data
pos = (0, 0, float(pos))
q = float(q)
d = float(d)
d_cover = float(d_cover)
args = name, pos, q, d, mat, d_cover, mat_cover
detectors.append(args)
return detectors, sources
def parse_isotope_file(filedata):
const_time = {'s': 1., 'm': 60., 'h': 3600., 'd': 86400., 'y': 31577600.}
SPECTRUM = []
try:
header = _regex_default.findall(filedata.pop(0))
NAME, MOLAR, HALF_LIFE = header[:3]
FLAGS = header[3:]
if HALF_LIFE[-1] in const_time:
HALF_LIFE = const_time[HALF_LIFE[-1]] * float(HALF_LIFE[:-1])
else:
HALF_LIFE = float(HALF_LIFE)
for line in filedata:
band = _regex_default.findall(line)
e, p = band[:2]
SPECTRUM.append((float(e) * 0.001, float(p) * 0.01))
return NAME, FLAGS, float(MOLAR), float(HALF_LIFE), SPECTRUM
except Exception as err:
print(err)
return None
def parse_material_file(filedata):
try:
NAME, DENSITY = _regex_default.findall(filedata.pop(0))
ATTENUATIONS = []
k_ph = 0
for line in filedata:
_l = _regex_default.findall(line)
e, k = float(_l[0]), float(_l[1])
if len(_l) > 2:
k_ph = float(_l[2])
ATTENUATIONS.append((e, k, k_ph))
_v1 = (0, ATTENUATIONS[0][1], ATTENUATIONS[0][2])
_v2 = (200000, 0, 0)
ATTENUATIONS = [_v1] + ATTENUATIONS + [_v2]
return NAME, float(DENSITY), ATTENUATIONS
except Exception as err:
print(err)
return None
| industrialsynthfreak/xr3 | nist.py | Python | mit | 2,046 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='waypoint_event',
name='name',
),
]
| flipjack/misrutas | project/app/migrations/0002_remove_waypoint_event_name.py | Python | mit | 343 |
# import jinja2
import flask as fk
from corrdb.common.core import setup_app
import tarfile
from StringIO import StringIO
from io import BytesIO
import zipfile
import json
import time
import boto3
import traceback
app = setup_app(__name__)
s3 = boto3.resource('s3')
# Templates
# loader = jinja2.PackageLoader('cloud', 'templates')
# template_env = jinja2.Environment(autoescape=True, loader=loader)
# template_env.globals.update(url_for=fk.url_for)
# template_env.globals.update(get_flashed_messages=fk.get_flashed_messages)
#Remove templates
#include admin power everywhere here.
# Stormpath
from flask.ext.stormpath import StormpathManager
stormpath_manager = StormpathManager(app)
from datetime import date, timedelta
from functools import update_wrapper
def get_week_days(year, week):
d = date(year,1,1)
if(d.weekday()>3):
d = d+timedelta(7-d.weekday())
else:
d = d - timedelta(d.weekday())
dlt = timedelta(days = (week-1)*7)
return d + dlt, d + dlt + timedelta(days=6)
def find_week_days(year, current):
index = 1
while True:
if index == 360:
break
interval = get_week_days(year, index)
if current > interval[0] and current < interval[1]:
return interval
index +=1
class InMemoryZip(object):
def __init__(self):
# Create the in-memory file-like object
self.in_memory_zip = StringIO()
def append(self, filename_in_zip, file_contents):
'''Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.'''
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
return self
def read(self):
'''Returns a string with the contents of the in-memory zip.'''
self.in_memory_zip.seek(0)
return self.in_memory_zip.read()
def writetofile(self, filename):
'''Writes the in-memory zip to a file.'''
f = file(filename, "w")
f.write(self.read())
f.close()
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and fk.request.method == 'OPTIONS':
resp = app.make_default_options_response()
else:
resp = fk.make_response(f(*args, **kwargs))
if not attach_to_all and fk.request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def load_bundle(record):
# Include record files later.
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
try:
bundle_buffer = StringIO()
# with open(record.container.image['location'], 'rb') as fh:
# image_buffer.write(fh.read())
# res = key.get_contents_to_filename(record.container.image['location'])
# s3_client = boto3.client('s3')
# res = s3_client.get_object(Bucket='ddsm-bucket', Key=record.container.image['location'])
obj = s3.Object(bucket_name='reproforge-bundles', key=record.environment.bundle['location'])
res = obj.get()
bundle_buffer.write(res['Body'].read())
bundle_buffer.seek(0)
data = zipfile.ZipInfo("%s"%(record.project.name, record.environment.bundle['location'].split('_')))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
data.external_attr |= 0777 << 16L # -rwx-rwx-rwx
zf.writestr(data, bundle_buffer.read())
except:
print traceback.print_exc()
try:
json_buffer = StringIO()
json_buffer.write(record.to_json())
json_buffer.seek(0)
data = zipfile.ZipInfo("%s_%s.json"%(record.project.name, str(record.id)))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
data.external_attr |= 0777 << 16L # -rwx-rwx-rwx
zf.writestr(data, json_buffer.read())
except:
print traceback.print_exc()
memory_file.seek(0)
# imz.append('record.tar', image_buffer.read()).append("record.json", json_buffer.read())
# print record.container.image['location'].split("/")[-1].split(".")[0]+".zip"
return [memory_file, record.environment.bundle['location'].split("/")[-1].split(".")[0]+".zip"]
def delete_project_files(project):
s3_files = s3.Bucket('reproforge-pictures')
s3_bundles = s3.Bucket('reproforge-bundles')
from corrdb.common.models import ProjectModel
from corrdb.common.models import RecordModel
from corrdb.common.models import EnvironmentModel
from corrdb.common.models import FileModel
for record in project.records:
for _file in record.files:
s3_files.delete_key(_file.location)
for environment_id in project.history:
_environment = EnvironmentModel.objects.with_id(environment_id)
if _environment.bundle["scope"] == "local":
s3_bundles.delete_key(_environment.bundle["location"])
del _environment
def delete_record_files(record):
s3_files = s3.Bucket('reproforge-pictures')
from corrdb.common.models import RecordModel
from corrdb.common.models import FileModel
for record in project.records:
for _file in record.files:
s3_files.delete_key(_file.location)
def delete_record_file(record_file):
s3_files = s3.Bucket('reproforge-pictures')
s3_files.delete_key(record_file.location)
def load_file(file_model):
file_buffer = StringIO()
obj = s3.Object(bucket_name='reproforge-files', key=file_model.location)
res = obj.get()
file_buffer.write(res['Body'].read())
file_buffer.seek(0)
return [file_buffer, file_model.location.split('_')[1]]
def load_picture(profile):
picture_buffer = StringIO()
obj = s3.Object(bucket_name='reproforge-pictures', key=profile.picture['location'])
res = obj.get()
picture_buffer.write(res['Body'].read())
picture_buffer.seek(0)
return [picture_buffer, digest]
def upload_bundle(current_user, environment, file_obj):
dest_filename = str(current_user.id)+"-"+str(environment.id)+"_%s"%file_obj.filename #kind is record| signature
print "About to write..."
try:
s3.Bucket('reproforge-bundles').put_object(Key=str(current_user.id)+"-"+str(environment.id)+"_%s"%file_obj.filename, Body=file_obj.read())
environment.bundle['location'] = dest_filename
environment.bundle['size'] = file_obj.tell()
print "%s saving..."%file_obj.filename
environment.save()
return True
except:
return False
print traceback.print_exc()
def upload_file(current_user, file_model, file_obj):
dest_filename = str(current_user.id)+"-"+str(file_model.record.id)+"_%s"%file_obj.filename #kind is record| signature
print "About to write..."
try:
s3.Bucket('reproforge-files').put_object(Key=str(current_user.id)+"-"+str(record.id)+"_%s"%file_obj.filename, Body=file_obj.read())
file_model.location = dest_filename
file_model.size = file_obj.tell()
print "%s saving..."%file_obj.filename
file_model.save()
return True
except:
return False
print traceback.print_exc()
def upload_picture(current_user, file_obj):
dest_filename = str(current_user.id) #kind is record| signature
print "About to write..."
try:
s3.Bucket('reproforge-pictures').put_object(Key=str(current_user.id)+"."+file_obj.filename.split('.')[-1], Body=file_obj.read())
print "%s saving..."%file_obj.filename
return True
except:
return False
print traceback.print_exc()
CLOUD_VERSION = 1
CLOUD_URL = '/cloud/v{0}'.format(CLOUD_VERSION)
from . import views
from corrdb.common import models
from . import filters
| wd15/corr | corr-cloud/cloud/__init__.py | Python | mit | 9,419 |
import random, binascii
class HashOutput(bytearray):
def hexdigest(self) -> str:
return binascii.hexlify(self).decode('ascii')
class PearsonHasher:
def __init__(self, length: int, seed = 'ΑΓΕΩΜΕΤΡΗΤΟΣ ΜΗΔΕΙΣ ΕΙΣΙΤΩ'):
self.length = length
generator = random.Random()
generator.seed(seed)
self.table = list(range(256))
generator.shuffle(self.table)
def hash(self, data: bytes) -> HashOutput:
result = HashOutput()
for byte in range(self.length):
h = self.table[(data[0] + byte) % 256]
for c in data[1:]:
h = self.table[h ^ c]
result.append(h)
return result
if __name__ == '__main__':
for L in (2, 4, 10):
print(L, 'bytes')
hasher = PearsonHasher(L)
for s in ('Lorem ipsum dolor sit amet', 'Morem ipsum dolor sit amet', 'Lorem ipsum dolor sit amet!', 'a', 'b'):
print(s, '\t', hasher.hash(s.encode('utf-8')).hexdigest())
print()
| ze-phyr-us/pearhash | pearhash.py | Python | mit | 908 |
Subsets and Splits