repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
namecoin/namecore | test/functional/wallet_implicitsegwit.py | 48 | 2424 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet implicit segwit feature."""
import test_framework.address as address
from test_framework.test_framework import BitcoinTestFramework
# TODO: Might be nice to test p2pk here too
address_types = ('legacy', 'bech32', 'p2sh-segwit')
def key_to_address(key, address_type):
if address_type == 'legacy':
return address.key_to_p2pkh(key)
elif address_type == 'p2sh-segwit':
return address.key_to_p2sh_p2wpkh(key)
elif address_type == 'bech32':
return address.key_to_p2wpkh(key)
def send_a_to_b(receive_node, send_node):
keys = {}
for a in address_types:
a_address = receive_node.getnewaddress(address_type=a)
pubkey = receive_node.getaddressinfo(a_address)['pubkey']
keys[a] = pubkey
for b in address_types:
b_address = key_to_address(pubkey, b)
send_node.sendtoaddress(address=b_address, amount=1)
return keys
def check_implicit_transactions(implicit_keys, implicit_node):
# The implicit segwit node allows conversion all possible ways
txs = implicit_node.listtransactions(None, 99999)
for a in address_types:
pubkey = implicit_keys[a]
for b in address_types:
b_address = key_to_address(pubkey, b)
assert(('receive', b_address) in tuple((tx['category'], tx['address']) for tx in txs))
class ImplicitSegwitTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Manipulating addresses and sending transactions to all variations")
implicit_keys = send_a_to_b(self.nodes[0], self.nodes[1])
self.sync_all()
self.log.info("Checking that transactions show up correctly without a restart")
check_implicit_transactions(implicit_keys, self.nodes[0])
self.log.info("Checking that transactions still show up correctly after a restart")
self.restart_node(0)
self.restart_node(1)
check_implicit_transactions(implicit_keys, self.nodes[0])
if __name__ == '__main__':
ImplicitSegwitTest().main()
| mit |
humberos/android_kernel_samsung_smdk4412 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
jalavik/harvesting-kit | harvestingkit/scripts/fix_marc_record.py | 3 | 9033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## This file is part of Harvesting Kit.
## Copyright (C) 2014 CERN.
##
## Harvesting Kit is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Harvesting Kit is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import re
import sys
import getopt
import os
import codecs
from xml.dom.minidom import parse
XML_PATH = "/afs/cern.ch/project/inspire/conf-proceedings/contentratechnologies/CONFERENCE_PROCEEDINGs/"
BUFSIZE = 4096
BOMLEN = len(codecs.BOM_UTF8)
def strip_bom(path):
with open(path, "r+b") as fp:
chunk = fp.read(BUFSIZE)
if chunk.startswith(codecs.BOM_UTF8):
i = 0
chunk = chunk[BOMLEN:]
while chunk:
fp.seek(i)
fp.write(chunk)
i += len(chunk)
fp.seek(BOMLEN, os.SEEK_CUR)
chunk = fp.read(BUFSIZE)
fp.seek(-BOMLEN, os.SEEK_CUR)
fp.truncate()
def collapse_initials(name):
""" Removes the space between initials.
eg T. A. --> T.A."""
if len(name.split()) > 1:
name = re.sub(r'([A-Z]\.) +(?=[A-Z]\.)', r'\1', name)
return name
def fix_name_capitalization(lastname, givennames):
""" Converts capital letters to lower keeps first letter capital. """
lastnames = lastname.split()
if len(lastnames) == 1:
if '-' in lastname:
names = lastname.split('-')
names = map(lambda a: a[0] + a[1:].lower(), names)
lastname = '-'.join(names)
else:
lastname = lastname[0] + lastname[1:].lower()
else:
names = []
for name in lastnames:
if re.search(r'[A-Z]\.', name):
names.append(name)
else:
names.append(name[0] + name[1:].lower())
lastname = ' '.join(names)
lastname = collapse_initials(lastname)
names = []
for name in givennames:
if re.search(r'[A-Z]\.', name):
names.append(name)
else:
names.append(name[0] + name[1:].lower())
givennames = ' '.join(names)
return lastname, givennames
def fix_title_capitalization(title):
words = []
for word in title.split():
if word.upper() != word:
words.append(word)
else:
words.append(word.lower())
title = " ".join(words)
title = title[0].upper() + title[1:]
return title
def amend_initials(name):
def repl(match):
try:
if name[match.end()] != '.':
return match.group() + '.'
except IndexError:
## We have reached the end of the string
return match.group() + '.'
return match.group()
return re.sub(r"(\b\w\b)", repl, name)
def fix_authors(marcxml):
datafields = marcxml.getElementsByTagName('datafield')
# fix author names
author_tags = []
for tag in datafields:
if tag.getAttribute('tag') in ['100', '700']:
author_tags.append(tag)
for tag in author_tags:
for subfield in tag.getElementsByTagName('subfield'):
if subfield.getAttribute('code') == 'a':
author = ''
for child in subfield.childNodes:
if child.nodeType == child.TEXT_NODE:
author += child.nodeValue
if author:
author = author.replace(', Rapporteur', '')
if author.find(',') >= 0:
author = amend_initials(author)
lastname, givennames = author.split(',')
lastname = lastname.strip()
givennames = givennames.strip()
initials = r'([A-Z]\.)'
if re.search(initials, lastname) and not \
re.search(initials, givennames):
lastname, givennames = givennames, lastname
lastname, givennames = fix_name_capitalization(
lastname, givennames.split()
)
givennames = collapse_initials(givennames)
subfield.firstChild.nodeValue = "%s, %s" %\
(lastname, givennames)
else:
names = author.split()
lastname, givennames = names[-1], names[:-1]
lastname, givennames = fix_name_capitalization(
lastname, givennames
)
givennames = collapse_initials(givennames)
subfield.firstChild.nodeValue = "%s, %s" %\
(lastname, givennames)
return marcxml
def fix_title(marcxml):
datafields = marcxml.getElementsByTagName('datafield')
title_tags = []
for tag in datafields:
if tag.getAttribute('tag') in ['242', '245', '246', '247']:
title_tags.append(tag)
for tag in title_tags:
for subfield in tag.getElementsByTagName('subfield'):
if subfield.getAttribute('code') in ['a', 'b']:
for child in subfield.childNodes:
if child.nodeType == child.TEXT_NODE:
title = child.nodeValue
title = fix_title_capitalization(title)
child.nodeValue = title.replace(u"—", "-")
return marcxml
def fix_fft(marcxml):
datafields = marcxml.getElementsByTagName('datafield')
fft_tags = []
for tag in datafields:
if tag.getAttribute('tag') in ['FFT']:
fft_tags.append(tag)
for tag in fft_tags:
for subfield in tag.getElementsByTagName('subfield'):
if subfield.getAttribute('code') in ['a']:
for child in subfield.childNodes:
if child.nodeType == child.TEXT_NODE:
child.nodeValue = XML_PATH + child.nodeValue.replace("\\", "/")
return marcxml
def main():
usage = """
save to file:
python fix_marc_record.py marc_file*.xml >> result_file.xml
print to terminal:
python fix_marc_record.py marc_file*.xml
options:
--recid -r
fix the record with the given record id from https://inspireheptest.cern.ch
e.g. python fix_marc_record.py --recid=1291107
--site -s
specify a different site useful only when option --recid or -r enabled
e.g. python fix_marc_record.py -r 1291107 -s http://inspirehep.net
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "r:s:", ["recid=", "site="])
options = map(lambda a: a[0], opts)
if not args and not ('-r' in options or '--recid' in options):
raise getopt.GetoptError("Missing argument record to fix")
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
print(usage)
sys.exit(2)
if '-r' in options or '--recid' in options:
from invenio.invenio_connector import InvenioConnector
from xml.dom.minidom import parseString
site = "http://inspireheptest.cern.ch/"
for o, a in opts:
if o in ['-s', '--site']:
site = a
if o in ['-r', '--recid']:
recid = a
inspiretest = InvenioConnector(site)
record = inspiretest.search(p='001:%s' % recid, of='xm')
marcxml = parseString(record)
try:
marcxml = marcxml.getElementsByTagName('record')[0]
except IndexError:
print("Record not found")
sys.exit(2)
marcxml = fix_authors(marcxml)
marcxml = fix_title(marcxml)
marcxml = fix_fft(marcxml)
sys.stdout.write(marcxml.toxml().encode('utf8'))
else:
print("<collection>")
for filename in args:
try:
strip_bom(filename)
marcxml = parse(filename)
marcxml = fix_authors(marcxml)
marcxml = fix_title(marcxml)
marcxml = fix_fft(marcxml)
sys.stdout.write(marcxml.toxml().encode('utf8'))
except Exception, err:
print("ERROR with file %s: %s. Skipping file...." % (filename, err), file=sys.stderr)
print("</collection>")
if __name__ == '__main__':
main()
| gpl-2.0 |
mfherbst/spack | var/spack/repos/builtin/packages/r-gridextra/package.py | 5 | 1825 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGridextra(RPackage):
"""Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
homepage = "https://cran.r-project.org/package=gridExtra"
url = "https://cran.r-project.org/src/contrib/gridExtra_2.2.1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/gridExtras"
version('2.3', '01e0ea88610756a0fd3b260e83c9bd43')
version('2.2.1', '7076c2122d387c7ef3add69a1c4fc1b2')
depends_on('r-gtable', type=('build', 'run'))
| lgpl-2.1 |
Yuriy-Leonov/nova | nova/virt/vmwareapi/vmware_images.py | 8 | 8147 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer.
"""
import os
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
LOG = logging.getLogger(__name__)
QUEUE_BUFFER_SIZE = 10
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_service=None, image_id=None,
image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_service and image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_service, image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(exc)
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug(_("Uploading iso %s to datastore") % iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug(_("Uploading iso of size : %s ") %
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug(_("Uploaded iso %s to datastore") % iso_path,
instance=instance)
def fetch_image(context, image, instance, **kwargs):
"""Download image from the glance image server."""
LOG.debug(_("Downloading image %s from glance image server") % image,
instance=instance)
(image_service, image_id) = glance.get_remote_image_service(context, image)
metadata = image_service.show(context, image_id)
file_size = int(metadata['size'])
read_iter = image_service.download(context, image_id)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug(_("Downloaded image %s from glance image server") % image,
instance=instance)
def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug(_("Uploading image %s to the Glance image server") % image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
(image_service, image_id) = glance.get_remote_image_service(context, image)
metadata = image_service.show(context, image_id)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "vmdk",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_ostype": kwargs.get("os_type"),
"vmware_image_version":
kwargs.get("image_version"),
"owner_id": instance['project_id']}}
start_transfer(context, read_file_handle, file_size,
image_service=image_service,
image_id=image_id, image_meta=image_metadata)
LOG.debug(_("Uploaded image %s to the Glance image server") % image,
instance=instance)
def get_vmdk_size_and_properties(context, image, instance):
"""
Get size of the vmdk file that is to be downloaded for attach in spawn.
Need this to create the dummy virtual disk for the meta-data file. The
geometry of the disk created depends on the size.
"""
LOG.debug(_("Getting image size for the image %s") % image,
instance=instance)
(image_service, image_id) = glance.get_remote_image_service(context, image)
meta_data = image_service.show(context, image_id)
size, properties = meta_data["size"], meta_data["properties"]
LOG.debug(_("Got image size of %(size)s for the image %(image)s"),
{'size': size, 'image': image}, instance=instance)
return size, properties
| apache-2.0 |
exercism/xpython | exercises/rest-api/example.py | 3 | 3151 | import json
class RestAPI:
def __init__(self, database=None):
self.database = database or {'users': []}
def update(self):
for user in self.database['users']:
owed_by = user['owed_by']
owes = user['owes']
for debtor in list(owed_by.keys()):
if debtor in owes:
diff = 0
if debtor in owes:
diff = owes[debtor]
del owes[debtor]
if debtor in owed_by:
diff -= owed_by[debtor]
del owed_by[debtor]
if diff > 0:
owes[debtor] = diff
elif diff < 0:
owed_by[debtor] = -diff
user['balance'] = sum(owed_by.values()) - sum(owes.values())
def get(self, url, payload=None):
if payload is not None:
payload = json.loads(payload)
if url == '/users':
if payload is None:
return json.dumps(self.database)
else:
return json.dumps({
'users': [
u for u in self.database['users']
if u['name'] in payload['users']
]
})
def post(self, url, payload=None):
result = None
if payload is not None:
payload = json.loads(payload)
if url == '/add':
if payload is not None:
name = payload['user']
users = self.database['users']
user = None
for u in users:
if u['name'] == name:
user = u
break
if user is None:
new_user = {
'name': name,
'owes': {},
'owed_by': {},
'balance': 0
}
users.append(new_user)
self.update()
result = json.dumps(new_user)
elif url == '/iou':
if payload is not None:
lender_name = payload['lender']
borrower_name = payload['borrower']
amount = payload['amount']
lender = borrower = None
for u in self.database['users']:
if u['name'] == lender_name:
lender = u
elif u['name'] == borrower_name:
borrower = u
if lender is not None and borrower is not None:
lender['owed_by'].setdefault(borrower_name, 0)
lender['owed_by'][borrower_name] += amount
borrower['owes'].setdefault(lender_name, 0)
borrower['owes'][lender_name] += amount
self.update()
result = self.get(
'/users',
json.dumps({'users': [lender_name, borrower_name]})
)
return result
| mit |
rec/DMXIS | Macros/Python/pickletools.py | 5 | 76619 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
__all__ = ['dis', 'genops', 'optimize']
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copy_reg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import StringIO
>>> read_uint1(StringIO.StringIO('\xff'))
255
"""
data = f.read(1)
if data:
return ord(data)
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import StringIO
>>> read_uint2(StringIO.StringIO('\xff\x00'))
255
>>> read_uint2(StringIO.StringIO('\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import StringIO
>>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
255
>>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import StringIO
>>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(StringIO.StringIO("\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around ''
>>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
''
>>> read_stringnl(StringIO.StringIO("''\n"))
''
>>> read_stringnl(StringIO.StringIO('"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in "'\"":
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
# I'm not sure when 'string_escape' was added to the std codecs; it's
# crazy not to use it if it's there.
if decode:
data = data.decode('string_escape')
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, decode=False, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import StringIO
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
''
>>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import StringIO
>>> read_string1(StringIO.StringIO("\x00"))
''
>>> read_string1(StringIO.StringIO("\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import StringIO
>>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
u'abc\uabcd'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return unicode(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import StringIO
>>> s = u'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
'abcd\xea\xaf\x8d'
>>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
>>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
>>> s == t
True
>>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return unicode(data, 'utf-8')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import StringIO
>>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
1234
>>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in '1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith("L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == "00":
return False
elif s == "01":
return True
try:
return int(s)
except OverflowError:
return long(s)
def read_decimalnl_long(f):
r"""
>>> import StringIO
>>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' required in '1234'
Someday the trailing 'L' will probably go away from this output.
>>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
1234L
>>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
123456789012345678901234L
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if not s.endswith("L"):
raise ValueError("trailing 'L' required in %r" % s)
return long(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import StringIO, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(StringIO.StringIO(raw + "\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and cPickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import StringIO
>>> read_long1(StringIO.StringIO("\x00"))
0L
>>> read_long1(StringIO.StringIO("\x02\xff\x00"))
255L
>>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
32767L
>>> read_long1(StringIO.StringIO("\x02\x00\xff"))
-256L
>>> read_long1(StringIO.StringIO("\x02\x00\x80"))
-32768L
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import StringIO
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
255L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
32767L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
-256L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
-32768L
>>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
0L
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the long 0L, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=long,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, long, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='str',
obtype=str,
doc="A Python string object.")
pyunicode = StackObject(
name='unicode',
obtype=unicode,
doc="A Python Unicode string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjuction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. IOW:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops two values off the stack and pushes a tuple
of length 2 whose items are those values back onto it. IOW:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops three values off the stack and pushes a tuple
of length 3 whose items are those values back onto it. IOW:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-teriminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If type(callable) is not ClassType, REDUCE complains unless the
callable has been registered with the copy_reg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
This may raise RuntimeError in restricted execution mode (which
disallows access to __dict__ directly); in that case, the object
is updated instead via
for k, v in argument.items():
anyobject[k] = v
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ It's an old-style class object (the type of the class object is
ClassType).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done. In
restricted execution mode it can fail (assignment to __class__ is
disallowed), and I'm not really sure what happens then -- it looks
like the code ends up calling the class object's __init__ anyway,
via falling into the next case.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug; cPickle
requires the attribute to be true). If __safe_for_unpickling__
doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug; cPickle does test __safe_for_unpickling__). See INST for
the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
import pickle, re
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print "skipping %r: it doesn't look like an opcode name" % name
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, str) or len(picklecode) != 1:
if verbose:
print ("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode))
continue
if picklecode in copy:
if verbose:
print "checking name %r w/ code %r for consistency" % (
name, picklecode)
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a string object,
it's wrapped in a StringIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
import cStringIO as StringIO
if isinstance(pickle, str):
pickle = StringIO.StringIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code)
if opcode is None:
if code == "":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == '.':
assert opcode.name == 'STOP'
break
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return ''.join(s)
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg indentlevel is the number of blanks by which to indent
a new MARK level. It defaults to 4.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpicker memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
for opcode, arg, pos in genops(pickle):
if pos is not None:
print >> out, "%5d:" % pos,
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
print >> out, line
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print >> out, "highest protocol among opcodes =", maxproto
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {'abc': u"def"}]
>>> pkl = pickle.dumps(x, 0)
>>> dis(pkl)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: I INT 1
8: a APPEND
9: I INT 2
12: a APPEND
13: ( MARK
14: I INT 3
17: I INT 4
20: t TUPLE (MARK at 13)
21: p PUT 1
24: a APPEND
25: ( MARK
26: d DICT (MARK at 25)
27: p PUT 2
30: S STRING 'abc'
37: p PUT 3
40: V UNICODE u'def'
45: p PUT 4
48: s SETITEM
49: a APPEND
50: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl = pickle.dumps(x, 1)
>>> dis(pkl)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: U SHORT_BINSTRING 'abc'
24: q BINPUT 3
26: X BINUNICODE u'def'
34: q BINPUT 4
36: s SETITEM
37: e APPENDS (MARK at 3)
38: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: i INST 'pickletools _Example' (MARK at 5)
28: p PUT 1
31: ( MARK
32: d DICT (MARK at 31)
33: p PUT 2
36: S STRING 'value'
45: p PUT 3
48: I INT 42
52: s SETITEM
53: b BUILD
54: a APPEND
55: g GET 1
58: a APPEND
59: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: ( MARK
5: c GLOBAL 'pickletools _Example'
27: q BINPUT 1
29: o OBJ (MARK at 4)
30: q BINPUT 2
32: } EMPTY_DICT
33: q BINPUT 3
35: U SHORT_BINSTRING 'value'
42: q BINPUT 4
44: K BININT1 42
46: s SETITEM
47: b BUILD
48: h BINGET 2
50: e APPENDS (MARK at 3)
51: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> from StringIO import StringIO
>>> f = StringIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| artistic-2.0 |
TurboTurtle/sos | sos/report/plugins/snmp.py | 5 | 1026 | # Copyright (C) 2007 Sadique Puthen <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Snmp(Plugin):
short_desc = 'Simple network management protocol'
plugin_name = "snmp"
profiles = ('system', 'sysmgmt')
files = ('/etc/snmp/snmpd.conf',)
def setup(self):
self.add_copy_spec("/etc/snmp")
class RedHatSnmp(Snmp, RedHatPlugin):
packages = ('net-snmp',)
def setup(self):
super(RedHatSnmp, self).setup()
class DebianSnmp(Snmp, DebianPlugin, UbuntuPlugin):
packages = ('snmp',)
def setup(self):
super(DebianSnmp, self).setup()
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
Paul-Ezell/cinder-1 | cinder/tests/unit/test_volume_utils.py | 9 | 40038 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with volume."""
import datetime
import io
import mock
import six
from oslo_concurrency import processutils
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
class NotifyUsageTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('volume', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume, a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('volume', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_error_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.snapshot)
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.snapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.objects.Volume.get_by_id')
def test_usage_from_snapshot(self, volume_get_by_id):
raw_volume = {
'id': '55614621',
'availability_zone': 'nova'
}
ctxt = context.get_admin_context()
volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume)
volume_get_by_id.return_value = volume_obj
raw_snapshot = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'volume': volume_obj,
'volume_id': '55614621',
'volume_size': 1,
'id': '343434a2',
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'deleted': '',
'snapshot_metadata': [{'key': 'fake_snap_meta_key',
'value': 'fake_snap_meta_value'}],
'expected_attrs': ['metadata'],
}
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot)
usage_info = volume_utils._usage_from_snapshot(snapshot_obj)
expected_snapshot = {
'tenant_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'volume_id': '55614621',
'volume_size': 1,
'snapshot_id': '343434a2',
'display_name': '11',
'created_at': 'DONTCARE',
'status': 'pause',
'deleted': '',
'metadata': six.text_type({'fake_snap_meta_key':
u'fake_snap_meta_value'}),
}
self.assertDictMatch(expected_snapshot, usage_info)
@mock.patch('cinder.db.volume_glance_metadata_get')
@mock.patch('cinder.db.volume_attachment_get_used_by_volume_id')
def test_usage_from_volume(self, mock_attachment, mock_image_metadata):
mock_image_metadata.return_value = {'image_id': 'fake_image_id'}
mock_attachment.return_value = [{'instance_uuid': 'fake_instance_id'}]
raw_volume = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'host': 'fake_host',
'availability_zone': 'nova',
'volume_type_id': 'fake_volume_type_id',
'id': 'fake_volume_id',
'size': 1,
'display_name': 'test_volume',
'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1),
'launched_at': datetime.datetime(2015, 1, 1, 1, 1, 1),
'snapshot_id': None,
'replication_status': None,
'replication_extended_status': None,
'replication_driver_data': None,
'status': 'available',
'volume_metadata': {'fake_metadata_key': 'fake_metadata_value'},
}
usage_info = volume_utils._usage_from_volume(
mock.sentinel.context,
raw_volume)
expected_volume = {
'tenant_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'host': 'fake_host',
'availability_zone': 'nova',
'volume_type': 'fake_volume_type_id',
'volume_id': 'fake_volume_id',
'size': 1,
'display_name': 'test_volume',
'created_at': '2015-01-01T01:01:01',
'launched_at': '2015-01-01T01:01:01',
'snapshot_id': None,
'replication_status': None,
'replication_extended_status': None,
'replication_driver_data': None,
'status': 'available',
'metadata': {'fake_metadata_key': 'fake_metadata_value'},
'glance_metadata': {'image_id': 'fake_image_id'},
'volume_attachment': [{'instance_uuid': 'fake_instance_id'}],
}
self.assertEqual(expected_volume, usage_info)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.consistencygroup)
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc,
mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.consistencygroup,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot)
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
def test_usage_from_backup(self):
raw_backup = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'id': 'fake_id',
'host': 'fake_host',
'display_name': 'test_backup',
'created_at': '2014-12-11T10:10:00',
'status': 'available',
'volume_id': 'fake_volume_id',
'size': 1,
'service_metadata': None,
'service': 'cinder.backup.drivers.swift',
'fail_reason': None,
'parent_id': 'fake_parent_id',
'num_dependent_backups': 0,
}
# Make it easier to find out differences between raw and expected.
expected_backup = raw_backup.copy()
expected_backup['tenant_id'] = expected_backup.pop('project_id')
expected_backup['backup_id'] = expected_backup.pop('id')
usage_info = volume_utils._usage_from_backup(raw_backup)
self.assertEqual(expected_backup, usage_info)
class LVMVolumeDriverTestCase(test.TestCase):
def test_convert_blocksize_option(self):
# Test valid volume_dd_blocksize
bs, count = volume_utils._calculate_count(1024, '10M')
self.assertEqual('10M', bs)
self.assertEqual(103, count)
bs, count = volume_utils._calculate_count(1024, '1xBBB')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test 'volume_dd_blocksize' with fraction
bs, count = volume_utils._calculate_count(1024, '1.3M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test zero-size 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '0M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test negative 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '-1M')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
# Test non-digital 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, 'ABM')
self.assertEqual('1M', bs)
self.assertEqual(1024, count)
class OdirectSupportTestCase(test.TestCase):
@mock.patch('cinder.utils.execute')
def test_check_for_odirect_support(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def',
'iflag=direct')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'iflag=direct',
run_as_root=True)
@mock.patch('cinder.utils.execute',
side_effect=processutils.ProcessExecutionError)
def test_check_for_odirect_support_error(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertFalse(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
class ClearVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_conf(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'zero'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024,
'1M', sync=True,
execute=utils.execute, ionice='-c3',
throttle=None, sparse=False)
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_args(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1,
'-c0')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1,
'1M', sync=True,
execute=utils.execute, ionice='-c0',
throttle=None, sparse=False)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 1
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', '-s1MiB', "volume_path", run_as_root=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred_not_clear_size(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = None
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', "volume_path", run_as_root=True)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_invalid_opt(self, mock_conf):
mock_conf.volume_clear = 'non_existent_volume_clearer'
mock_conf.volume_clear_size = 0
mock_conf.volume_clear_ionice = None
self.assertRaises(exception.InvalidConfigurationValue,
volume_utils.clear_volume,
1024, "volume_path")
class CopyVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'conv=fdatasync',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support,
mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_ionice(self, mock_exec,
mock_support, mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice='-c3')
self.assertIsNone(output)
mock_exec.assert_called_once_with('ionice', '-c3', 'dd',
'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_sparse(self, mock_exec,
mock_support, mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
sparse=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync,sparse',
run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_sparse_iflag_and_oflag(self, mock_exec,
mock_support,
mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
sparse=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'iflag=direct', 'oflag=direct',
'conv=sparse', run_as_root=True)
@mock.patch('cinder.volume.utils._copy_volume_with_file')
def test_copy_volume_handles(self, mock_copy):
handle1 = io.RawIOBase()
handle2 = io.RawIOBase()
output = volume_utils.copy_volume(handle1, handle2, 1024, 1)
self.assertIsNone(output)
mock_copy.assert_called_once_with(handle1, handle2, 1024)
@mock.patch('cinder.volume.utils._transfer_data')
@mock.patch('cinder.volume.utils._open_volume_with_path')
def test_copy_volume_handle_transfer(self, mock_open, mock_transfer):
handle = io.RawIOBase()
output = volume_utils.copy_volume('/foo/bar', handle, 1024, 1)
self.assertIsNone(output)
mock_transfer.assert_called_once_with(mock.ANY, mock.ANY,
1073741824, mock.ANY)
class VolumeUtilsTestCase(test.TestCase):
def test_null_safe_str(self):
self.assertEqual('', volume_utils.null_safe_str(None))
self.assertEqual('', volume_utils.null_safe_str(False))
self.assertEqual('', volume_utils.null_safe_str(0))
self.assertEqual('', volume_utils.null_safe_str([]))
self.assertEqual('', volume_utils.null_safe_str(()))
self.assertEqual('', volume_utils.null_safe_str({}))
self.assertEqual('', volume_utils.null_safe_str(set()))
self.assertEqual('a', volume_utils.null_safe_str('a'))
self.assertEqual('1', volume_utils.null_safe_str(1))
self.assertEqual('True', volume_utils.null_safe_str(True))
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning')
def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper):
self.assertEqual(mock_supports_thin.return_value,
volume_utils.supports_thin_provisioning())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
def test_get_all_physical_volumes(self, mock_get_vols, mock_helper):
self.assertEqual(mock_get_vols.return_value,
volume_utils.get_all_physical_volumes())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups')
def test_get_all_volume_groups(self, mock_get_groups, mock_helper):
self.assertEqual(mock_get_groups.return_value,
volume_utils.get_all_volume_groups())
mock_helper.assert_called_once_with()
def test_generate_password(self):
password = volume_utils.generate_password()
self.assertTrue(any(c for c in password if c in '23456789'))
self.assertTrue(any(c for c in password
if c in 'abcdefghijkmnopqrstuvwxyz'))
self.assertTrue(any(c for c in password
if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ'))
self.assertEqual(16, len(password))
self.assertEqual(10, len(volume_utils.generate_password(10)))
@mock.patch('cinder.volume.utils.generate_password')
def test_generate_username(self, mock_gen_pass):
output = volume_utils.generate_username()
self.assertEqual(mock_gen_pass.return_value, output)
def test_extract_host(self):
host = 'Host'
# default level is 'backend'
self.assertEqual(host,
volume_utils.extract_host(host))
self.assertEqual(host,
volume_utils.extract_host(host, 'host'))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend'))
# default_pool_name doesn't work for level other than 'pool'
self.assertEqual(host,
volume_utils.extract_host(host, 'host', True))
self.assertEqual(host,
volume_utils.extract_host(host, 'host', False))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend', True))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend', False))
self.assertEqual(None,
volume_utils.extract_host(host, 'pool'))
self.assertEqual('_pool0',
volume_utils.extract_host(host, 'pool', True))
host = 'Host@Backend'
self.assertEqual('Host@Backend',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual(host,
volume_utils.extract_host(host, 'backend'))
self.assertEqual(None,
volume_utils.extract_host(host, 'pool'))
self.assertEqual('_pool0',
volume_utils.extract_host(host, 'pool', True))
host = 'Host@Backend#Pool'
pool = 'Pool'
self.assertEqual('Host@Backend',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual('Host@Backend',
volume_utils.extract_host(host, 'backend'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool', True))
host = 'Host#Pool'
self.assertEqual('Host',
volume_utils.extract_host(host))
self.assertEqual('Host',
volume_utils.extract_host(host, 'host'))
self.assertEqual('Host',
volume_utils.extract_host(host, 'backend'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool'))
self.assertEqual(pool,
volume_utils.extract_host(host, 'pool', True))
def test_append_host(self):
host = 'Host'
pool = 'Pool'
expected = 'Host#Pool'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
pool = None
expected = 'Host'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = 'pool'
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = None
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
def test_compare_hosts(self):
host_1 = 'fake_host@backend1'
host_2 = 'fake_host@backend1#pool1'
self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2))
host_2 = 'fake_host@backend1'
self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2))
host_2 = 'fake_host2@backend1'
self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2))
def test_check_managed_volume_already_managed(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')
self.assertTrue(result)
@mock.patch('cinder.volume.utils.CONF')
def test_check_already_managed_with_vol_id_vol_pattern(self, conf_mock):
mock_db = mock.Mock()
conf_mock.volume_name_template = 'volume-%s-volume'
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume')
self.assertTrue(result)
@mock.patch('cinder.volume.utils.CONF')
def test_check_already_managed_with_id_vol_pattern(self, conf_mock):
mock_db = mock.Mock()
conf_mock.volume_name_template = '%s-volume'
result = volume_utils.check_already_managed_volume(
mock_db, 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1-volume')
self.assertTrue(result)
def test_check_managed_volume_not_managed_cinder_like_name(self):
mock_db = mock.Mock()
mock_db.volume_get = mock.Mock(
side_effect=exception.VolumeNotFound(
'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'))
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')
self.assertFalse(result)
def test_check_managed_volume_not_managed(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'test-volume')
self.assertFalse(result)
def test_check_managed_volume_not_managed_id_like_uuid(self):
mock_db = mock.Mock()
result = volume_utils.check_already_managed_volume(
mock_db, 'volume-d8cd1fe')
self.assertFalse(result)
def test_convert_config_string_to_dict(self):
test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}"
expected_dict = {'key-1': 'val-1', 'key-2': 'val-2', 'key-3': 'val-3'}
self.assertEqual(
expected_dict,
volume_utils.convert_config_string_to_dict(test_string))
def test_process_reserve_over_quota(self):
ctxt = context.get_admin_context()
ctxt.project_id = 'fake'
overs_one = ['gigabytes']
over_two = ['snapshots']
usages = {'gigabytes': {'reserved': 1, 'in_use': 9},
'snapshots': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
size = 1
self.assertRaises(exception.VolumeSizeExceedsAvailableQuota,
volume_utils.process_reserve_over_quota,
ctxt, overs_one, usages, quotas, size)
self.assertRaises(exception.SnapshotLimitExceeded,
volume_utils.process_reserve_over_quota,
ctxt, over_two, usages, quotas, size)
| apache-2.0 |
xrg/openerp-server | bin/pychart/zap.py | 15 | 2380 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import fill_style
import line_style
import copy
def _draw_zap(can, p1, p2, style, pat):
x = copy.deepcopy(p1)
x.extend(p2)
can.polygon(None, pat, x)
can.lines(style, p1)
can.lines(style, p2)
def zap_horizontally(can, style, pat, x1, y1, x2, y2, xsize, ysize):
"""Draw a horizontal "zapping" symbol on the canvas that shows
that a graph is ripped in the middle.
Parameter <fill_style> specifies the style for the zig-zag lines.
PAT specifies the pattern with which the area is filled.
The symbol is drawn in the rectangle (<x1>, <y1>) - (<x2>, <y2>).
Each "zigzag" has the width <xsize>, height <ysize>."""
assert isinstance(style, line_style.T)
assert isinstance(pat, fill_style.T)
points = []
points2 = []
x = x1
y = y1
while x < x2:
points.append((x, y))
points2.append((x, y + (y2-y1)))
x += xsize
if y == y1:
y += ysize
else:
y -= ysize
points2.reverse()
_draw_zap(can, points, points2, style, pat)
def zap_vertically(can, style, pat, x1, y1, x2, y2, xsize, ysize):
"""Draw a vertical "zapping" symbol on the canvas that shows
that a graph is ripped in the middle.
Parameter <fill_style> specifies the style for the zig-zag lines.
PAT specifies the pattern with which the area is filled.
The symbol is drawn in the rectangle (<x1>, <y1>) - (<x2>, <y2>).
Each "zigzag" has the width <xsize>, height <ysize>."""
points = []
points2 = []
x = x1
y = y1
while y < y2:
points.append((x, y))
points2.append((x + (x2-x1), y))
y += ysize
if x == x1:
x += xsize
else:
x -= xsize
points2.reverse()
_draw_zap(can, points, points2, style, pat)
| agpl-3.0 |
kirca/odoo | addons/stock/procurement.py | 16 | 21333 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with project_mrp
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'move' and procurement.move_ids:
self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context)
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancelation
ctx['cancel_procurement'] = True
for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx):
self.propagate_cancel(cr, uid, procurement, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
already_done_qty_uos = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left,
'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.rule_id.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, context=None):
res = super(procurement_order, self).run(cr, uid, ids, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
done_test_list = []
done_cancel_test_list = []
qty_done = 0
for move in procurement.move_ids:
done_test_list.append(move.state == 'done')
done_cancel_test_list.append(move.state in ('done', 'cancel'))
qty_done += move.product_qty if move.state == 'done' else 0
qty_done = uom_obj._compute_qty(cr, uid, procurement.product_id.uom_id.id, qty_done, procurement.product_uom.id)
at_least_one_done = any(done_test_list)
all_done_or_cancel = all(done_cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and procurement.product_qty == qty_done:
return True
elif at_least_one_done:
#some move cancelled and some validated
self.message_post(cr, uid, [procurement.id], body=_('Some stock moves have been cancelled for this procurement. Run the procurement again to trigger a move for the remaining quantity or change the procurement quantity to finish it directly'), context=context)
else:
#all move are cancelled
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: False or the dbname
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(use_new_cursor).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=False, company_id=company.id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='picking_priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _product_virtual_get(self, cr, uid, order_point):
product_obj = self.pool.get('product.product')
return product_obj._product_available(cr, uid,
[order_point.product_id.id],
context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available']
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Create procurement based on Orderpoint
use_new_cursor: False or the dbname
@return: Dictionary of values
"""
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(use_new_cursor).db.cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
offset = 0
ids = [1]
while ids:
ids = orderpoint_obj.search(cr, uid, [('company_id', '=', company_id)], offset=offset, limit=100)
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if prods < op.product_min_qty:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = qty % op.qty_multiple
if reste > 0:
qty += op.qty_multiple - reste
if qty <= 0:
continue
qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context)
if qty > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty, context=context),
context=context)
self.check(cr, uid, [proc_id])
self.run(cr, uid, [proc_id])
offset += len(ids)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
cr.close()
return {}
| agpl-3.0 |
balthamos/plover | plover/gui/util.py | 7 | 1868 | # Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import sys
if sys.platform.startswith('win32'):
import win32gui
GetForegroundWindow = win32gui.GetForegroundWindow
SetForegroundWindow = win32gui.SetForegroundWindow
def SetTopApp():
# Nothing else is necessary for windows.
pass
elif sys.platform.startswith('darwin'):
from Foundation import NSAppleScript
from AppKit import NSApp, NSApplication
def GetForegroundWindow():
return NSAppleScript.alloc().initWithSource_("""
tell application "System Events"
return unix id of first process whose frontmost = true
end tell""").executeAndReturnError_(None)[0].int32Value()
def SetForegroundWindow(pid):
NSAppleScript.alloc().initWithSource_("""
tell application "System Events"
set the frontmost of first process whose unix id is %d to true
end tell""" % pid).executeAndReturnError_(None)
def SetTopApp():
NSApplication.sharedApplication()
NSApp().activateIgnoringOtherApps_(True)
elif sys.platform.startswith('linux'):
from subprocess import call, check_output, CalledProcessError
def GetForegroundWindow():
try:
output = check_output(['xprop', '-root', '_NET_ACTIVE_WINDOW'])
return output.split()[-1]
except CalledProcessError:
return None
def SetForegroundWindow(w):
try:
call(['wmctrl', '-i', '-a', w])
except CalledProcessError:
pass
def SetTopApp():
try:
call(['wmctrl', '-a', TITLE])
except CalledProcessError:
pass
else:
# These functions are optional so provide a non-functional default
# implementation.
def GetForegroundWindow():
return None
def SetForegroundWindow(w):
pass
def SetTopApp():
pass
| gpl-2.0 |
duoduo369/leaning-frontend | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | 1970 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| mit |
pjenvey/hellanzb | Hellanzb/Daemon.py | 2 | 28607 | """
Daemon (aka Ziplick) - Filesystem queue daemon functions. They're all called from inside
the twisted reactor loop, except for initialization functions
(c) Copyright 2005 Ben Bangert, Philip Jenvey
[See end of file]
"""
import os, re, sys, time, Hellanzb, PostProcessor, PostProcessorUtil
from shutil import copy, move, rmtree
from twisted.internet import reactor
from Hellanzb.HellaXMLRPC import initXMLRPCServer, HellaXMLRPCServer
from Hellanzb.Log import *
from Hellanzb.Logging import prettyException, LogOutputStream
from Hellanzb.NZBQueue import dequeueNZBs, recoverStateFromDisk, parseNZB, \
scanQueueDir, writeStateXML
from Hellanzb.Util import archiveName, daemonize, ensureDirs, getMsgId, hellaRename, \
isWindows, prettyElapsed, prettySize, touch, validNZB, IDPool
__id__ = '$Id$'
def ensureDaemonDirs():
""" Ensure that all the required directories exist and are writable, otherwise attempt to
create them """
dirNames = {}
for arg in dir(Hellanzb):
if arg.endswith("_DIR") and arg == arg.upper():
dirName = getattr(Hellanzb, arg)
if dirName is None:
raise FatalError('Required directory not defined in config file: Hellanzb.' + arg)
dirNames[arg] = dirName
ensureDirs(dirNames)
if hasattr(Hellanzb, 'QUEUE_LIST'):
if not hasattr(Hellanzb, 'STATE_XML_FILE'):
Hellanzb.STATE_XML_FILE = Hellanzb.QUEUE_LIST
if not hasattr(Hellanzb, 'STATE_XML_FILE'):
raise FatalError('Hellanzb.STATE_XML_FILE not defined in config file')
elif os.path.isfile(Hellanzb.STATE_XML_FILE) and not os.access(Hellanzb.STATE_XML_FILE, os.W_OK):
raise FatalError('hellanzb does not have write access to the Hellanzb.STATE_XML_FILE file')
def ensureCleanDir(dirName):
""" Nuke and recreate the specified directory """
# Clear out the old dir and create a fresh one
if os.path.exists(dirName):
if not os.access(dirName, os.W_OK):
raise FatalError('Cannot continue: hellanzb needs write access to ' + dirName)
rmtree(dirName)
# ensureDaemonDirs already guaranteed us write access to the parent TEMP_DIR
os.makedirs(dirName)
def ensureCleanDirs():
""" This must be called just after the XMLRPCServer initialization, thus it's separated
from ensureDaemonDirs(). We don't want to touch/nuke these kinds of dirs until we know
we are the only queue daemon running (if we aren't, initXMLRPCServer will throw an
exception) """
for var, dirName in {'DOWNLOAD_TEMP_DIR': 'download-tmp',
'DEQUEUED_NZBS_DIR': 'dequeued-nzbs'}.iteritems():
fullPath = os.path.join(Hellanzb.TEMP_DIR, dirName)
setattr(Hellanzb, var, fullPath)
try:
ensureCleanDir(fullPath)
except FatalError:
# del the var so Core.shutdown() does not attempt to rmtree() the dir
delattr(Hellanzb, var)
raise
except OSError, ose:
if var != 'DOWNLOAD_TEMP_DIR':
raise
def initDaemon():
""" Start the daemon """
Hellanzb.isDaemon = True
Hellanzb.nzbQueue = []
Hellanzb.queueDirIgnore = []
Hellanzb.loggedIdleMessage = True
try:
ensureDaemonDirs()
initXMLRPCServer()
ensureCleanDirs() # needs to be called AFTER initXMLRPCServer
except FatalError, fe:
error('Exiting', fe)
from Hellanzb.Core import shutdownAndExit
shutdownAndExit(1)
reactor.callLater(0, info, 'hellanzb - Now monitoring queue...')
reactor.callLater(0, notify, 'Queue', 'hellanzb', 'Now monitoring queue..', False)
# Twisted does not guarantee callLater(0, first); callLater(0, second) will run in
# that order: http://twistedmatrix.com/trac/ticket/1396
# This is especially problematic on some platforms (cygwin):
# http://hellanzb.com/trac/hellanzb/ticket/196
def recoverStateAndBegin():
recoverStateFromDisk()
resumePostProcessors()
scanQueueDir(True)
reactor.callLater(0, recoverStateAndBegin)
if not isWindows() and Hellanzb.DAEMONIZE:
daemonize()
if not isWindows() and hasattr(Hellanzb, 'UMASK'):
# umask here, as daemonize() might have just reset the value
os.umask(Hellanzb.UMASK)
if hasattr(Hellanzb, 'HELLAHELLA_CONFIG'):
initHellaHella(Hellanzb.HELLAHELLA_CONFIG)
from Hellanzb.NZBLeecher import initNZBLeecher, startNZBLeecher
initNZBLeecher()
startNZBLeecher()
def initHellaHella(configFile, verbose=False):
""" Initialize hellahella, the web UI """
Hellanzb.HELLAHELLA_PORT = 8750
try:
import cgi
from paste.deploy import loadapp
from twisted.web2.server import Request
def _parseURL(self):
if self.uri[0] == '/':
# Can't use urlparse for request_uri because urlparse
# wants to be given an absolute or relative URI, not just
# an abs_path, and thus gets '//foo' wrong.
self.scheme = self.host = self.path = self.params = self.querystring = ''
if '?' in self.uri:
self.path, self.querystring = self.uri.split('?', 1)
else:
self.path = self.uri
if ';' in self.path:
self.path, self.params = self.path.split(';', 1)
else:
# It is an absolute uri, use standard urlparse
(self.scheme, self.host, self.path,
self.params, self.querystring, fragment) = urlparse.urlparse(self.uri)
if self.querystring:
self.args = cgi.parse_qs(self.querystring, True)
else:
self.args = {}
####path = map(unquote, self.path[1:].split('/'))
path = self.path[1:].split('/')
if self._initialprepath:
# We were given an initial prepath -- this is for supporting
# CGI-ish applications where part of the path has already
# been processed
####prepath = map(unquote, self._initialprepath[1:].split('/'))
prepath = self._initialprepath[1:].split('/')
if path[:len(prepath)] == prepath:
self.prepath = prepath
self.postpath = path[len(prepath):]
else:
self.prepath = []
self.postpath = path
else:
self.prepath = []
self.postpath = path
Request._parseURL = _parseURL
twistedWeb01 = False
from twisted.application.service import Application
try:
# twistedWeb 0.1
from twisted.web2.http import HTTPFactory
twistedWeb01 = True
except ImportError:
# twistedWeb 0.2
from twisted.web2.channel import HTTPFactory
from twisted.web2.log import LogWrapperResource, DefaultCommonAccessLoggingObserver
from twisted.web2.server import Site
from twisted.web2.wsgi import FileWrapper, InputStream, ErrorStream, WSGIHandler, \
WSGIResource
# Munge the SCRIPT_NAME to '' when web2 makes it '/'
from twisted.web2.twcgi import createCGIEnvironment
if twistedWeb01:
def setupEnvironment(self, ctx, request):
# Called in IO thread
env = createCGIEnvironment(ctx, request)
if re.compile('\/+').search(env['SCRIPT_NAME']):
env['SCRIPT_NAME'] = ''
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = env['REQUEST_SCHEME']
env['wsgi.input'] = InputStream(request.stream)
env['wsgi.errors'] = ErrorStream()
env['wsgi.multithread'] = True
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
env['wsgi.file_wrapper'] = FileWrapper
self.environment = env
else:
def setupEnvironment(self, request):
# Called in IO thread
env = createCGIEnvironment(request)
if re.compile('\/+').search(env['SCRIPT_NAME']):
env['SCRIPT_NAME'] = ''
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = env['REQUEST_SCHEME']
env['wsgi.input'] = InputStream(request.stream)
env['wsgi.errors'] = ErrorStream()
env['wsgi.multithread'] = True
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
env['wsgi.file_wrapper'] = FileWrapper
self.environment = env
WSGIHandler.setupEnvironment = setupEnvironment
# incase pylons raises deprecation warnings during loadapp, redirect them to the
# debug log
oldStderr = sys.stderr
sys.stderr = LogOutputStream(debug)
# Load the wsgi app via paste
wsgiApp = loadapp('config:' + configFile)
sys.stderr = oldStderr
if verbose:
lwr = LogWrapperResource(WSGIResource(wsgiApp))
DefaultCommonAccessLoggingObserver().start()
Hellanzb.hhHTTPFactory = HTTPFactory(Site(lwr))
else:
Hellanzb.hhHTTPFactory = HTTPFactory(Site(WSGIResource(wsgiApp)))
reactor.listenTCP(Hellanzb.HELLAHELLA_PORT, Hellanzb.hhHTTPFactory)
except Exception, e:
error('Unable to load hellahella', e)
def resumePostProcessors():
""" Pickup left off Post Processors that were cancelled via CTRL-C """
# FIXME: with the new queue, could kill the processing dir sym links (for windows)
from Hellanzb.NZBLeecher.NZBModel import NZB
for archiveDirName in os.listdir(Hellanzb.PROCESSING_DIR):
if archiveDirName[0] == '.':
continue
archive = NZB.fromStateXML('processing', archiveDirName)
troll = PostProcessor.PostProcessor(archive)
info('Resuming post processor: ' + archiveName(archiveDirName))
troll.start()
def beginDownload(nzb=None):
""" Initialize the download. Notify the downloaders to begin their work, etc """
# BEGIN
Hellanzb.loggedIdleMessage = False
writeStateXML()
now = time.time()
if nzb:
nzb.downloadStartTime = now
# The scroll level will flood the console with constantly updating
# statistics -- the logging system can interrupt this scroll
# temporarily (after scrollBegin)
scrollBegin()
# Scan the queue dir intermittently during downloading. Reset the scanner delayed call
# if it's already going
if Hellanzb.downloadScannerID is not None and \
not Hellanzb.downloadScannerID.cancelled and \
not Hellanzb.downloadScannerID.called:
Hellanzb.downloadScannerID.cancel()
Hellanzb.downloadScannerID = reactor.callLater(5, scanQueueDir, False, True)
for nsf in Hellanzb.nsfs:
nsf.beginDownload()
Hellanzb.downloading = True
def endDownload():
""" Finished downloading """
Hellanzb.ht.rate = 0
sessionStartTime = None
sessionReadBytes = 0
for nsf in Hellanzb.nsfs:
sessionReadBytes += nsf.sessionReadBytes
if nsf.fillServerPriority == 0:
sessionStartTime = nsf.sessionStartTime
nsf.endDownload()
Hellanzb.downloading = False
Hellanzb.totalSpeed = 0
Hellanzb.scroller.currentLog = None
scrollEnd()
Hellanzb.downloadScannerID.cancel()
Hellanzb.totalArchivesDownloaded += 1
writeStateXML()
if not len(Hellanzb.queue.currentNZBs()):
# END
return
currentNZB = Hellanzb.queue.currentNZBs()[0]
downloadTime = time.time() - currentNZB.downloadStartTime
speed = sessionReadBytes / 1024.0 / downloadTime
info('Transferred %s in %s at %.1fKB/s (%s)' % \
(prettySize(sessionReadBytes), prettyElapsed(downloadTime), speed,
currentNZB.archiveName))
if not currentNZB.isParRecovery:
currentNZB.downloadTime = downloadTime
else:
currentNZB.downloadTime += downloadTime
# END
def disconnectUnAntiIdleFactories():
""" Disconnect antiIdle==0 factories when there's nothing left to download """
if len(Hellanzb.queue.nzbs):
return
# Nothing left to download. Immediately disconnect antiIdleTimeout factories
for nsf in Hellanzb.nsfs:
debug('Empty NZB queue: disconnecting %s (antiIdle is 0)' % nsf.serverPoolName)
if nsf.antiIdleTimeout == 0:
for client in nsf.clients:
client.transport.loseConnection()
# NOTE: WEIRD: after pool-coop branch, I have to force this to prevent
# fetchNextNZBSegment from re-calling the fetch loop (it gets called
# twice. the parseNZB->beginDownload->fetchNext call is made before
# the client gets to call connectionLost). or has this problem always
# existed??? See r403
client.isLoggedIn = False
client.deactivate()
def handleNZBDone(nzb):
""" Hand-off from the downloader -- make a dir for the NZB with its contents, then post
process it in a separate thread"""
disconnectUnAntiIdleFactories()
if nzb.downloadStartTime:
downloadAndDecodeTime = time.time() - nzb.downloadStartTime
if not nzb.isParRecovery:
nzb.downloadAndDecodeTime = downloadAndDecodeTime
else:
nzb.downloadAndDecodeTime += downloadAndDecodeTime
# Make our new directory, minus the .nzb
processingDir = os.path.join(Hellanzb.PROCESSING_DIR, nzb.archiveName)
# Move our nzb contents to their new location for post processing
hellaRename(processingDir)
move(Hellanzb.WORKING_DIR, processingDir)
nzb.destDir = processingDir
nzb.archiveDir = processingDir
nzbFileName = os.path.join(processingDir, os.path.basename(nzb.nzbFileName))
# We may have downloaded an NZB file of the same name:
# http://hellanzb.com/trac/hellanzb/ticket/425
hellaRename(nzbFileName)
move(nzb.nzbFileName, nzbFileName)
nzb.nzbFileName = nzbFileName
os.mkdir(Hellanzb.WORKING_DIR)
# The list of skipped pars is maintained in the state XML as only the subjects of the
# nzbFiles. PostProcessor only knows to look at the NZB.skippedParSubjects list,
# created here
nzb.skippedParSubjects = nzb.getSkippedParSubjects()
# Finally unarchive/process the directory in another thread, and continue
# nzbing
troll = PostProcessor.PostProcessor(nzb)
# Give NZBLeecher some time (another reactor loop) to killHistory() & scrollEnd()
# without any logging interference from PostProcessor
reactor.callLater(0, troll.start)
reactor.callLater(0, writeStateXML)
reactor.callLater(0, scanQueueDir)
def postProcess(options, isQueueDaemon=False):
""" Call the post processor via twisted """
from Hellanzb.Core import shutdown
if not os.path.isdir(options.postProcessDir):
error('Unable to process, not a directory: ' + options.postProcessDir)
shutdown()
return
if not os.access(options.postProcessDir, os.R_OK):
error('Unable to process, no read access to directory: ' + options.postProcessDir)
shutdown()
return
rarPassword = None
if options.rarPassword:
rarPassword = options.rarPassword
# UNIX: realpath
# FIXME: I don't recall why realpath is even necessary
dirName = os.path.realpath(options.postProcessDir)
archive = PostProcessorUtil.Archive(dirName, rarPassword=rarPassword)
troll = Hellanzb.PostProcessor.PostProcessor(archive, background=False)
reactor.callLater(0, info, '')
reactor.callLater(0, info, 'Starting post processor')
reactor.callLater(0, reactor.callInThread, troll.run)
if isQueueDaemon:
reactor.callLater(0, writeStateXML)
def isActive():
""" Whether or not we're actively downloading """
return len(Hellanzb.queue.currentNZBs()) > 0
def cancelCurrent():
""" Cancel the current d/l, remove the nzb. return False if there was nothing to cancel
"""
if not isActive():
return True
canceled = False
for nzb in Hellanzb.queue.currentNZBs():
# FIXME: should GC here
canceled = True
nzb.cancel()
os.remove(nzb.nzbFileName)
info('Canceling download: ' + nzb.archiveName)
Hellanzb.queue.cancel()
try:
hellaRename(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
move(Hellanzb.WORKING_DIR, os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
os.mkdir(Hellanzb.WORKING_DIR)
rmtree(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
except Exception, e:
error('Problem while canceling WORKING_DIR', e)
if not canceled:
debug('ERROR: isActive was True but canceled nothing (no active nzbs!??)')
for nsf in Hellanzb.nsfs:
clients = nsf.activeClients.copy()
for client in clients:
client.transport.loseConnection()
# NOTE: WEIRD: after pool-coop branch, I have to force this to prevent
# fetchNextNZBSegment from re-calling the fetch loop (it gets called
# twice. the parseNZB->beginDownload->fetchNext call is made before the client
# gets to call connectionLost). or has this problem always existed??? See r403
client.isLoggedIn = False
client.deactivate()
writeStateXML()
reactor.callLater(0, scanQueueDir)
return canceled
def pauseCurrent():
""" Pause the current download """
Hellanzb.downloadPaused = True
for nsf in Hellanzb.nsfs:
for client in nsf.clients:
client.transport.stopReading()
info('Pausing downloader')
return True
def continueCurrent():
""" Continue an already paused download """
if not Hellanzb.downloadPaused:
return True
resetConnections = 0
for nsf in Hellanzb.nsfs:
connectionCount = nsf.connectionCount
# XXX:
debug('%s: connectionCount: %i' % (nsf.serverPoolName, connectionCount))
for client in nsf.clients:
# When we pause a download, we simply stop reading from the socket. That
# causes the connection to become lost fairly quickly. When that happens a new
# client is created with the flag pauseReconnected=True. This new client acts
# normally (anti idles the connection, etc) except it does not enter the
# fetchNextNZBSegment loop. Thus when we encounter these clients we simply
# tell them to begin downloading
if client.pauseReconnected:
debug(str(client) + ' pauseReconnect')
client.pauseReconnected = False
reactor.callLater(0, client.fetchNextNZBSegment)
else:
# Otherwise this was a short pause, the connection hasn't been lost, and
# we can simply continue reading from the socket
debug(str(client) + ' startReading')
client.transport.startReading()
connectionCount -= 1
if nsf.clients:
resetConnections += connectionCount
# XXX:
debug('resetConnections(%s): %i: added %i' % (nsf.serverPoolName, resetConnections,
connectionCount))
# Reconnect (antiIdleTimeout == 0) NZBLeechers
if nsf.antiIdleTimeout == 0:
reconnecting = []
for connector in nsf.leecherConnectors:
# Reconnect if it's the main (fillserver==0) factory, or the
# client explicitly idled out during the connection
# (pauseIdledOut)
if nsf.fillServerPriority == 0 or getattr(connector, 'pauseIdledOut',
False):
debug(str(connector) + ' pauseIdledOut')
connector.pauseIdledOut = False
connector.connect()
reconnecting.append(connector)
for connector in reconnecting:
nsf.leecherConnectors.remove(connector)
resetConnections += len(reconnecting)
# XXX:
debug('>resetConnections(%s): %i: added %i' % (nsf.serverPoolName, resetConnections,
len(reconnecting)))
Hellanzb.downloadPaused = False
if resetConnections:
info('Continuing downloader (%i connections were reset)' % resetConnections)
else:
info('Continuing downloader')
def redoAssembly(nzbFile):
nzbFile.tryAssemble()
nzbFile.interruptedAssembly = False
for nzb in Hellanzb.queue.currentNZBs():
for nzbFile in nzb.nzbFiles:
if nzbFile.interruptedAssembly:
reactor.callInThread(redoAssembly, nzbFile)
return True
def clearCurrent(andCancel):
""" Clear the queue -- optionally clear what's currently being downloaded (cancel it) """
info('Clearing queue')
dequeueNZBs([nzb.id for nzb in Hellanzb.nzbQueue], quiet=True)
if andCancel:
cancelCurrent()
return True
def getRate():
""" Return the current MAX_RATE value """
return Hellanzb.ht.readLimit / 1024
def maxRate(rate):
""" Change the MAX_RATE value. Return the new value """
if rate == 'None' or rate is None:
rate = 0
else:
try:
rate = int(rate)
except:
return getRate()
if rate < 0:
rate = 0
info('Resetting MAX_RATE to: ' + str(rate) + 'KB/s')
rate = rate * 1024
restartCheckRead = False
if rate == 0:
if Hellanzb.ht.unthrottleReadsID is not None and \
not Hellanzb.ht.unthrottleReadsID.cancelled and \
not Hellanzb.ht.unthrottleReadsID.called:
Hellanzb.ht.unthrottleReadsID.cancel()
if Hellanzb.ht.checkReadBandwidthID is not None and \
not Hellanzb.ht.checkReadBandwidthID.cancelled:
Hellanzb.ht.checkReadBandwidthID.cancel()
Hellanzb.ht.unthrottleReads()
elif Hellanzb.ht.readLimit == 0 and rate > 0:
restartCheckRead = True
Hellanzb.ht.readLimit = rate
if restartCheckRead:
Hellanzb.ht.readThisSecond = 0 # nobody's been resetting this value
reactor.callLater(1, Hellanzb.ht.checkReadBandwidth)
return getRate()
def setRarPassword(nzbId, rarPassword):
""" Set the rarPassword on the specified NZB or NZB archive """
try:
nzbId = int(nzbId)
except:
debug('Invalid ID: ' + str(nzbId))
return False
# Find the nzbId in the queued list, processing list, or currently downloading nzb
found = None
for collection in (Hellanzb.queue.currentNZBs(), Hellanzb.postProcessors,
Hellanzb.nzbQueue):
for nzbOrArchive in collection:
if nzbOrArchive.id == nzbId:
found = nzbOrArchive
break
if found:
found.rarPassword = rarPassword
writeStateXML()
return True
return False
def forceNZBId(nzbId):
""" Interrupt the current download, if necessary, to start the specified nzb in the queue
"""
try:
nzbId = int(nzbId)
except:
debug('Invalid ID: ' + str(nzbId))
return False
foundNZB = None
for nzb in Hellanzb.nzbQueue:
if nzb.id == nzbId:
foundNZB = nzb
if not foundNZB:
return False
forceNZB(foundNZB.nzbFileName)
def forceNZB(nzbfilename, notification='Forcing download'):
""" Interrupt the current download, if necessary, to start the specified nzb """
if not validNZB(nzbfilename):
return
if not len(Hellanzb.queue.nzbs):
# No need to actually 'force'
from Hellanzb.NZBLeecher.NZBModel import NZB
return parseNZB(NZB(nzbfilename))
# postpone the current NZB download
for nzb in Hellanzb.queue.currentNZBs():
try:
info('Interrupting: ' + nzb.archiveName)
nzb.postpone()
# remove what we've forced with from the old queue, if it exists
nzb = None
for n in Hellanzb.nzbQueue:
if os.path.normpath(n.nzbFileName) == os.path.normpath(nzbfilename):
nzb = n
if nzb is None:
from Hellanzb.NZBLeecher.NZBModel import NZB
nzb = NZB(nzbfilename)
else:
Hellanzb.nzbQueue.remove(nzb)
# Copy the specified NZB, unless it's already in the queue dir (move it
# instead)
if os.path.normpath(os.path.dirname(nzbfilename)) != os.path.normpath(Hellanzb.QUEUE_DIR):
copy(nzbfilename, os.path.join(Hellanzb.CURRENT_DIR, os.path.basename(nzbfilename)))
else:
move(nzbfilename, os.path.join(Hellanzb.CURRENT_DIR, os.path.basename(nzbfilename)))
nzbfilename = os.path.join(Hellanzb.CURRENT_DIR, os.path.basename(nzbfilename))
nzb.nzbFileName = nzbfilename
# delete everything from the queue. priority will be reset
Hellanzb.queue.postpone()
# load the new file
reactor.callLater(0, parseNZB, nzb, notification)
except NameError, ne:
# GC beat us. that should mean there is either a free spot open, or the next
# nzb in the queue needs to be interrupted????
debug('forceNZB: NAME ERROR', ne)
reactor.callLater(0, scanQueueDir)
def forceNZBParRecover(nzb):
""" Immediately begin (force) downloading recovery blocks (only the nzb.neededBlocks
amount) for the specified NZB """
nzb.isParRecovery = True
if not len(Hellanzb.nzbQueue) and not len(Hellanzb.queue.currentNZBs()):
new = os.path.join(Hellanzb.CURRENT_DIR, os.path.basename(nzb.nzbFileName))
move(nzb.nzbFileName, new)
nzb.nzbFileName = new
# FIXME: Would be nice to include the number of needed recovery blocks in the
# growl notification this triggers
if Hellanzb.downloadScannerID is not None and \
not Hellanzb.downloadScannerID.cancelled and \
not Hellanzb.downloadScannerID.called:
Hellanzb.downloadScannerID.cancel()
nzb.destDir = Hellanzb.WORKING_DIR
parseNZB(nzb, 'Downloading recovery pars')
else:
Hellanzb.nzbQueue.insert(0, nzb)
forceNZB(nzb.nzbFileName, 'Forcing par recovery download')
"""
Copyright (c) 2005 Ben Bangert <[email protected]>
Philip Jenvey <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id$
"""
| bsd-3-clause |
havatv/QGIS | python/plugins/MetaSearch/dialogs/xmldialog.py | 30 | 1350 | # -*- coding: utf-8 -*-
###############################################################################
#
# CSW Client
# ---------------------------------------------------------
# QGIS Catalog Service client.
#
# Copyright (C) 2014 Tom Kralidis ([email protected])
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
from qgis.PyQt.QtWidgets import QDialog
from MetaSearch.util import get_ui_class
BASE_CLASS = get_ui_class('xmldialog.ui')
class XMLDialog(QDialog, BASE_CLASS):
"""Raw XML Dialogue"""
def __init__(self):
"""init"""
QDialog.__init__(self)
self.setupUi(self)
| gpl-2.0 |
sysalexis/kbengine | kbe/src/lib/python/Lib/idlelib/tabbedpages.py | 86 | 18418 | """An implementation of tabbed pages using only standard Tkinter.
Originally developed for use in IDLE. Based on tabpage.py.
Classes exported:
TabbedPageSet -- A Tkinter implementation of a tabbed-page widget.
TabSet -- A widget containing tabs (buttons) in one or more rows.
"""
from tkinter import *
class InvalidNameError(Exception): pass
class AlreadyExistsError(Exception): pass
class TabSet(Frame):
"""A widget containing tabs (buttons) in one or more rows.
Only one tab may be selected at a time.
"""
def __init__(self, page_set, select_command,
tabs=None, n_rows=1, max_tabs_per_row=5,
expand_tabs=False, **kw):
"""Constructor arguments:
select_command -- A callable which will be called when a tab is
selected. It is called with the name of the selected tab as an
argument.
tabs -- A list of strings, the names of the tabs. Should be specified in
the desired tab order. The first tab will be the default and first
active tab. If tabs is None or empty, the TabSet will be initialized
empty.
n_rows -- Number of rows of tabs to be shown. If n_rows <= 0 or is
None, then the number of rows will be decided by TabSet. See
_arrange_tabs() for details.
max_tabs_per_row -- Used for deciding how many rows of tabs are needed,
when the number of rows is not constant. See _arrange_tabs() for
details.
"""
Frame.__init__(self, page_set, **kw)
self.select_command = select_command
self.n_rows = n_rows
self.max_tabs_per_row = max_tabs_per_row
self.expand_tabs = expand_tabs
self.page_set = page_set
self._tabs = {}
self._tab2row = {}
if tabs:
self._tab_names = list(tabs)
else:
self._tab_names = []
self._selected_tab = None
self._tab_rows = []
self.padding_frame = Frame(self, height=2,
borderwidth=0, relief=FLAT,
background=self.cget('background'))
self.padding_frame.pack(side=TOP, fill=X, expand=False)
self._arrange_tabs()
def add_tab(self, tab_name):
"""Add a new tab with the name given in tab_name."""
if not tab_name:
raise InvalidNameError("Invalid Tab name: '%s'" % tab_name)
if tab_name in self._tab_names:
raise AlreadyExistsError("Tab named '%s' already exists" %tab_name)
self._tab_names.append(tab_name)
self._arrange_tabs()
def remove_tab(self, tab_name):
"""Remove the tab named <tab_name>"""
if not tab_name in self._tab_names:
raise KeyError("No such Tab: '%s" % tab_name)
self._tab_names.remove(tab_name)
self._arrange_tabs()
def set_selected_tab(self, tab_name):
"""Show the tab named <tab_name> as the selected one"""
if tab_name == self._selected_tab:
return
if tab_name is not None and tab_name not in self._tabs:
raise KeyError("No such Tab: '%s" % tab_name)
# deselect the current selected tab
if self._selected_tab is not None:
self._tabs[self._selected_tab].set_normal()
self._selected_tab = None
if tab_name is not None:
# activate the tab named tab_name
self._selected_tab = tab_name
tab = self._tabs[tab_name]
tab.set_selected()
# move the tab row with the selected tab to the bottom
tab_row = self._tab2row[tab]
tab_row.pack_forget()
tab_row.pack(side=TOP, fill=X, expand=0)
def _add_tab_row(self, tab_names, expand_tabs):
if not tab_names:
return
tab_row = Frame(self)
tab_row.pack(side=TOP, fill=X, expand=0)
self._tab_rows.append(tab_row)
for tab_name in tab_names:
tab = TabSet.TabButton(tab_name, self.select_command,
tab_row, self)
if expand_tabs:
tab.pack(side=LEFT, fill=X, expand=True)
else:
tab.pack(side=LEFT)
self._tabs[tab_name] = tab
self._tab2row[tab] = tab_row
# tab is the last one created in the above loop
tab.is_last_in_row = True
def _reset_tab_rows(self):
while self._tab_rows:
tab_row = self._tab_rows.pop()
tab_row.destroy()
self._tab2row = {}
def _arrange_tabs(self):
"""
Arrange the tabs in rows, in the order in which they were added.
If n_rows >= 1, this will be the number of rows used. Otherwise the
number of rows will be calculated according to the number of tabs and
max_tabs_per_row. In this case, the number of rows may change when
adding/removing tabs.
"""
# remove all tabs and rows
while self._tabs:
self._tabs.popitem()[1].destroy()
self._reset_tab_rows()
if not self._tab_names:
return
if self.n_rows is not None and self.n_rows > 0:
n_rows = self.n_rows
else:
# calculate the required number of rows
n_rows = (len(self._tab_names) - 1) // self.max_tabs_per_row + 1
# not expanding the tabs with more than one row is very ugly
expand_tabs = self.expand_tabs or n_rows > 1
i = 0 # index in self._tab_names
for row_index in range(n_rows):
# calculate required number of tabs in this row
n_tabs = (len(self._tab_names) - i - 1) // (n_rows - row_index) + 1
tab_names = self._tab_names[i:i + n_tabs]
i += n_tabs
self._add_tab_row(tab_names, expand_tabs)
# re-select selected tab so it is properly displayed
selected = self._selected_tab
self.set_selected_tab(None)
if selected in self._tab_names:
self.set_selected_tab(selected)
class TabButton(Frame):
"""A simple tab-like widget."""
bw = 2 # borderwidth
def __init__(self, name, select_command, tab_row, tab_set):
"""Constructor arguments:
name -- The tab's name, which will appear in its button.
select_command -- The command to be called upon selection of the
tab. It is called with the tab's name as an argument.
"""
Frame.__init__(self, tab_row, borderwidth=self.bw, relief=RAISED)
self.name = name
self.select_command = select_command
self.tab_set = tab_set
self.is_last_in_row = False
self.button = Radiobutton(
self, text=name, command=self._select_event,
padx=5, pady=1, takefocus=FALSE, indicatoron=FALSE,
highlightthickness=0, selectcolor='', borderwidth=0)
self.button.pack(side=LEFT, fill=X, expand=True)
self._init_masks()
self.set_normal()
def _select_event(self, *args):
"""Event handler for tab selection.
With TabbedPageSet, this calls TabbedPageSet.change_page, so that
selecting a tab changes the page.
Note that this does -not- call set_selected -- it will be called by
TabSet.set_selected_tab, which should be called when whatever the
tabs are related to changes.
"""
self.select_command(self.name)
return
def set_selected(self):
"""Assume selected look"""
self._place_masks(selected=True)
def set_normal(self):
"""Assume normal look"""
self._place_masks(selected=False)
def _init_masks(self):
page_set = self.tab_set.page_set
background = page_set.pages_frame.cget('background')
# mask replaces the middle of the border with the background color
self.mask = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
# mskl replaces the bottom-left corner of the border with a normal
# left border
self.mskl = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
self.mskl.ml = Frame(self.mskl, borderwidth=self.bw,
relief=RAISED)
self.mskl.ml.place(x=0, y=-self.bw,
width=2*self.bw, height=self.bw*4)
# mskr replaces the bottom-right corner of the border with a normal
# right border
self.mskr = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
self.mskr.mr = Frame(self.mskr, borderwidth=self.bw,
relief=RAISED)
def _place_masks(self, selected=False):
height = self.bw
if selected:
height += self.bw
self.mask.place(in_=self,
relx=0.0, x=0,
rely=1.0, y=0,
relwidth=1.0, width=0,
relheight=0.0, height=height)
self.mskl.place(in_=self,
relx=0.0, x=-self.bw,
rely=1.0, y=0,
relwidth=0.0, width=self.bw,
relheight=0.0, height=height)
page_set = self.tab_set.page_set
if selected and ((not self.is_last_in_row) or
(self.winfo_rootx() + self.winfo_width() <
page_set.winfo_rootx() + page_set.winfo_width())
):
# for a selected tab, if its rightmost edge isn't on the
# rightmost edge of the page set, the right mask should be one
# borderwidth shorter (vertically)
height -= self.bw
self.mskr.place(in_=self,
relx=1.0, x=0,
rely=1.0, y=0,
relwidth=0.0, width=self.bw,
relheight=0.0, height=height)
self.mskr.mr.place(x=-self.bw, y=-self.bw,
width=2*self.bw, height=height + self.bw*2)
# finally, lower the tab set so that all of the frames we just
# placed hide it
self.tab_set.lower()
class TabbedPageSet(Frame):
"""A Tkinter tabbed-pane widget.
Constains set of 'pages' (or 'panes') with tabs above for selecting which
page is displayed. Only one page will be displayed at a time.
Pages may be accessed through the 'pages' attribute, which is a dictionary
of pages, using the name given as the key. A page is an instance of a
subclass of Tk's Frame widget.
The page widgets will be created (and destroyed when required) by the
TabbedPageSet. Do not call the page's pack/place/grid/destroy methods.
Pages may be added or removed at any time using the add_page() and
remove_page() methods.
"""
class Page(object):
"""Abstract base class for TabbedPageSet's pages.
Subclasses must override the _show() and _hide() methods.
"""
uses_grid = False
def __init__(self, page_set):
self.frame = Frame(page_set, borderwidth=2, relief=RAISED)
def _show(self):
raise NotImplementedError
def _hide(self):
raise NotImplementedError
class PageRemove(Page):
"""Page class using the grid placement manager's "remove" mechanism."""
uses_grid = True
def _show(self):
self.frame.grid(row=0, column=0, sticky=NSEW)
def _hide(self):
self.frame.grid_remove()
class PageLift(Page):
"""Page class using the grid placement manager's "lift" mechanism."""
uses_grid = True
def __init__(self, page_set):
super(TabbedPageSet.PageLift, self).__init__(page_set)
self.frame.grid(row=0, column=0, sticky=NSEW)
self.frame.lower()
def _show(self):
self.frame.lift()
def _hide(self):
self.frame.lower()
class PagePackForget(Page):
"""Page class using the pack placement manager's "forget" mechanism."""
def _show(self):
self.frame.pack(fill=BOTH, expand=True)
def _hide(self):
self.frame.pack_forget()
def __init__(self, parent, page_names=None, page_class=PageLift,
n_rows=1, max_tabs_per_row=5, expand_tabs=False,
**kw):
"""Constructor arguments:
page_names -- A list of strings, each will be the dictionary key to a
page's widget, and the name displayed on the page's tab. Should be
specified in the desired page order. The first page will be the default
and first active page. If page_names is None or empty, the
TabbedPageSet will be initialized empty.
n_rows, max_tabs_per_row -- Parameters for the TabSet which will
manage the tabs. See TabSet's docs for details.
page_class -- Pages can be shown/hidden using three mechanisms:
* PageLift - All pages will be rendered one on top of the other. When
a page is selected, it will be brought to the top, thus hiding all
other pages. Using this method, the TabbedPageSet will not be resized
when pages are switched. (It may still be resized when pages are
added/removed.)
* PageRemove - When a page is selected, the currently showing page is
hidden, and the new page shown in its place. Using this method, the
TabbedPageSet may resize when pages are changed.
* PagePackForget - This mechanism uses the pack placement manager.
When a page is shown it is packed, and when it is hidden it is
unpacked (i.e. pack_forget). This mechanism may also cause the
TabbedPageSet to resize when the page is changed.
"""
Frame.__init__(self, parent, **kw)
self.page_class = page_class
self.pages = {}
self._pages_order = []
self._current_page = None
self._default_page = None
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.pages_frame = Frame(self)
self.pages_frame.grid(row=1, column=0, sticky=NSEW)
if self.page_class.uses_grid:
self.pages_frame.columnconfigure(0, weight=1)
self.pages_frame.rowconfigure(0, weight=1)
# the order of the following commands is important
self._tab_set = TabSet(self, self.change_page, n_rows=n_rows,
max_tabs_per_row=max_tabs_per_row,
expand_tabs=expand_tabs)
if page_names:
for name in page_names:
self.add_page(name)
self._tab_set.grid(row=0, column=0, sticky=NSEW)
self.change_page(self._default_page)
def add_page(self, page_name):
"""Add a new page with the name given in page_name."""
if not page_name:
raise InvalidNameError("Invalid TabPage name: '%s'" % page_name)
if page_name in self.pages:
raise AlreadyExistsError(
"TabPage named '%s' already exists" % page_name)
self.pages[page_name] = self.page_class(self.pages_frame)
self._pages_order.append(page_name)
self._tab_set.add_tab(page_name)
if len(self.pages) == 1: # adding first page
self._default_page = page_name
self.change_page(page_name)
def remove_page(self, page_name):
"""Destroy the page whose name is given in page_name."""
if not page_name in self.pages:
raise KeyError("No such TabPage: '%s" % page_name)
self._pages_order.remove(page_name)
# handle removing last remaining, default, or currently shown page
if len(self._pages_order) > 0:
if page_name == self._default_page:
# set a new default page
self._default_page = self._pages_order[0]
else:
self._default_page = None
if page_name == self._current_page:
self.change_page(self._default_page)
self._tab_set.remove_tab(page_name)
page = self.pages.pop(page_name)
page.frame.destroy()
def change_page(self, page_name):
"""Show the page whose name is given in page_name."""
if self._current_page == page_name:
return
if page_name is not None and page_name not in self.pages:
raise KeyError("No such TabPage: '%s'" % page_name)
if self._current_page is not None:
self.pages[self._current_page]._hide()
self._current_page = None
if page_name is not None:
self._current_page = page_name
self.pages[page_name]._show()
self._tab_set.set_selected_tab(page_name)
def _tabbed_pages(parent):
# test dialog
root=Tk()
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 175))
root.title("Test tabbed pages")
tabPage=TabbedPageSet(root, page_names=['Foobar','Baz'], n_rows=0,
expand_tabs=False,
)
tabPage.pack(side=TOP, expand=TRUE, fill=BOTH)
Label(tabPage.pages['Foobar'].frame, text='Foo', pady=20).pack()
Label(tabPage.pages['Foobar'].frame, text='Bar', pady=20).pack()
Label(tabPage.pages['Baz'].frame, text='Baz').pack()
entryPgName=Entry(root)
buttonAdd=Button(root, text='Add Page',
command=lambda:tabPage.add_page(entryPgName.get()))
buttonRemove=Button(root, text='Remove Page',
command=lambda:tabPage.remove_page(entryPgName.get()))
labelPgName=Label(root, text='name of page to add/remove:')
buttonAdd.pack(padx=5, pady=5)
buttonRemove.pack(padx=5, pady=5)
labelPgName.pack(padx=5)
entryPgName.pack(padx=5)
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_tabbed_pages)
| lgpl-3.0 |
arunkgupta/gramps | gramps/gen/filters/rules/person/_hasalternatename.py | 1 | 1794 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Gramps
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/Person/_HasAlternateName.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasAlternateName
#
#-------------------------------------------------------------------------
class HasAlternateName(Rule):
"""Rule that checks an alternate name"""
name = _('People with an alternate name')
description = _("Matches people with an alternate name")
category = _('General filters')
def apply(self, db, person):
if person.get_alternate_names():
return True
else:
return False
| gpl-2.0 |
pfalcon/ScratchABlock | xform_inter.py | 1 | 2708 | # ScratchABlock - Program analysis and decompilation framework
#
# Copyright (c) 2015-2018 Paul Sokolovsky
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Interprocedural transformation passes"""
from graph import Graph
from core import is_addr
import progdb
from utils import maybesorted
import utils
def build_callgraph():
"Build program callgraph from progdb."
callgraph = Graph()
for addr, props in progdb.FUNC_DB_BY_ADDR.items():
callgraph.add_node(props["label"])
for addr, props in progdb.FUNC_DB_BY_ADDR.items():
for callee in props.get("calls", []):
if callee in callgraph:
callgraph.add_edge(props["label"], callee)
callgraph.number_postorder_forest()
return callgraph
def calc_callsites_live_out(cg, callee):
"""Calculate function's callsites_live_out property.
Go thru function's callers (using callgraph), and union their
calls_live_out information pertinent to this function.
"""
callers = maybesorted(cg.pred(callee))
# If there're no callers, will return empty set, which
# is formally correct - if there're no callers, the
# function is dead. However, realistically that means
# that callers aren't known, and we should treat that
# specially.
call_lo_union = set()
for c in callers:
clo = progdb.FUNC_DB[c].get("calls_live_out", [])
#print(" %s: calls_live_out: %s" % (c, utils.repr_stable(clo)))
for bbaddr, callee_expr, live_out in clo:
if is_addr(callee_expr) and callee_expr.addr == callee:
print(" %s: calls_live_out[%s]: %s" % (c, callee, utils.repr_stable((bbaddr, callee_expr, live_out))))
call_lo_union.update(live_out)
progdb.FUNC_DB[callee]["callsites_live_out"] = call_lo_union
return call_lo_union
def collect_returns():
import progdb
import arch
for addr, props in progdb.FUNC_DB.items():
if "modifieds" in props and "callsites_live_out" in props:
props["returns"] = arch.ret_filter(set(props["modifieds"]) & set(props["callsites_live_out"]))
| gpl-3.0 |
Parsl/parsl | parsl/tests/configs/comet.py | 1 | 1124 | from parsl.config import Config
from parsl.launchers import SrunLauncher
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor
from .user_opts import user_opts
def fresh_config():
return Config(
executors=[
HighThroughputExecutor(
label='Comet_HTEX_multinode',
max_workers=1,
provider=SlurmProvider(
'debug',
launcher=SrunLauncher(),
# string to prepend to #SBATCH blocks in the submit
# script to the scheduler
scheduler_options=user_opts['comet']['scheduler_options'],
# Command to be run before starting a worker, such as:
# 'module load Anaconda; source activate parsl_env'.
worker_init=user_opts['comet']['worker_init'],
walltime='00:10:00',
init_blocks=1,
max_blocks=1,
nodes_per_block=2,
),
)
]
)
config = fresh_config()
| apache-2.0 |
visdesignlab/TulipPaths | tulippaths/connectivityMatrix.py | 2 | 8648 | from . import utils
from .path import *
from .pathFinder import *
import copy
class ConnectivityMatrix:
def __init__(self, graph):
self._graph = graph
self._paths = []
self._nodes = []
self._initialMatrix = []
self._matrix = []
self._rowLabels = []
self._colLabels = []
self._sourcesCollapsed = False
self._targetsCollapsed = False
def _activateMatrix(self, nodes):
self._nodes = list(set(nodes))
for i in range(0, len(self._nodes)):
row = []
for j in range(0, len(self._nodes)):
row.append([])
self._matrix.append(row)
for node in self._nodes:
self._rowLabels.append(utils.getNodeId(node, self._graph))
self._colLabels.append(utils.getNodeId(node, self._graph))
def _addPathToMatrix(self, path):
source = path.nodes[0]
target = path.nodes[len(path.nodes) - 1]
assert source in self._nodes and target in self._nodes, 'Found a path that is not in matrix. WTF?'
self._paths.append(path)
sourceIndex = self._getNodeIndex(source)
targetIndex = self._getNodeIndex(target)
self._matrix[sourceIndex][targetIndex].append(len(self._paths) - 1)
def _getNodeIndex(self, node):
return self._nodes.index(node)
def _getPathAsIndexes(self, path):
pathIndexes = []
for i in range(0, len(path.nodes)):
pathIndexes.append(int(path.nodes[i].id))
if i < len(path.edges):
pathIndexes.append(int(path.edges[i].id))
return pathIndexes
def _getUsedColIndexes(self):
usedColIndexes = []
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) is not 0 and j not in usedColIndexes:
usedColIndexes.append(j)
return usedColIndexes
def _getUsedRowIndexes(self):
usedRowIndexes = []
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) is not 0 and i not in usedRowIndexes:
usedRowIndexes.append(i)
return usedRowIndexes
def activate(self, nodeConstraints, edgeConstraints):
"""
Create a connectivity matrix for the given path node and edge constraints.
_matrix[i][j] holds indexes to paths from _nodes[j] to _nodes[i]
The paths for these indexes can be found in _paths
:param nodeConstraints - regex of all nodes
:param edgeConstraints - regex of all edges
"""
# Matrix is N x N where N is the number of sources and targets.
sources = utils.getNodesByTypeRegex(nodeConstraints[0], self._graph)
targets = utils.getNodesByTypeRegex(nodeConstraints[len(nodeConstraints) - 1], self._graph)
nodes = sources + targets
self._activateMatrix(nodes)
# Find paths for each source. Shove them into the matrix.
for node in sources:
pathFinder = PathFinder(self._graph)
pathFinder.findRegexConstrainedPathsFromSource(node, edgeConstraints, nodeConstraints)
for path in pathFinder.valid:
self._addPathToMatrix(path)
# Cache the initial matrix.
self._initialMatrix = copy.deepcopy(self._matrix)
def collapseSources(self):
"""
Updates _matrix s.t. all rows of the same label get collapsed to a single row.
"""
if self._sourcesCollapsed:
return
sourceTypes = utils.getNodesTypes(self._nodes, self._graph)
newMatrix = []
newCols = []
for node in self._nodes:
newCols.append(utils.getNodeId(node, self._graph))
for sourceType in sourceTypes:
newRow = [[] for node in self._nodes]
for i in range(0, len(self._matrix)):
rowType = utils.getNodeType(self._nodes[i], self._graph)
row = self._matrix[i]
if rowType == sourceType:
for j in range(0, len(row)):
col = row[j]
if len(col) > 0:
newRow[j] += col
newMatrix.append(newRow)
self._matrix = copy.deepcopy(newMatrix)
self._colLabels = newCols
self._rowLabels = sourceTypes
def collapseTargets(self):
"""
Updates _matrix s.t. all cols of the same label get collapsed to a single col.
"""
if self._targetsCollapsed:
return
self._targetsCollapsed = True
targetTypes = utils.getNodesTypes(self._nodes, self._graph)
newMatrix = []
# Initialize newMatrix
for i in range(0, len(self._matrix)):
newRow = []
for j in range(0, len(targetTypes)):
newRow.append([])
newMatrix.append(newRow)
# Populate new matrix
for i in range(0, len(self._matrix)):
row = self._matrix[i]
for j in range(0, len(row)):
col = row[j]
if len(col) > 0:
colType = utils.getNodeType(self._nodes[j], self._graph)
colIndex = targetTypes.index(colType)
newMatrix[i][colIndex] += col
self._matrix = copy.deepcopy(newMatrix)
self._colLabels = targetTypes
def getAsJsonObject(self, removeEmptyGridCells=False, replaceCellIdsWithIndexes=False):
newMatrix = []
rowLabels = []
colLabels = []
if replaceCellIdsWithIndexes and (self._targetsCollapsed or self._sourcesCollapsed):
assert False, "Cannot replace ids with indexes if sources or targets are collapsed"
if removeEmptyGridCells:
usedRows = self._getUsedRowIndexes()
usedCols = self._getUsedColIndexes()
newMatrix = []
for i in range(0, len(usedRows)):
row = []
for j in range(0, len(usedCols)):
row.append([])
newMatrix.append(row)
for i in range(0, len(self._matrix)):
if i in usedRows:
newRowIndex = usedRows.index(i)
row = self._matrix[i]
for j in range(0, len(row)):
if j in usedCols:
newColIndex = usedCols.index(j)
col = row[j]
pathList = []
for k in range(0, len(col)):
pathList.append(self._getPathAsIndexes(self._paths[col[k]]))
newMatrix[newRowIndex][newColIndex] = pathList
for rowIndex in usedRows:
rowLabels.append(self._rowLabels[rowIndex])
for colIndex in usedCols:
colLabels.append(self._colLabels[colIndex])
else:
for row in self._matrix:
newRow = []
for col in row:
pathList = []
for k in range(0, len(col)):
pathList.append(self._getPathAsIndexes(self._paths[col[k]]))
newRow.append(pathList)
newMatrix.append(newRow)
rowLabels = self._rowLabels
colLabels = self._colLabels
if replaceCellIdsWithIndexes:
newRowLabels = []
for label in rowLabels:
newRowLabels.append(int(utils.getNodeById(int(label), self._graph).id))
rowLabels = newRowLabels
newColLabels = []
for label in colLabels:
newColLabels.append(int(utils.getNodeById(int(label), self._graph).id))
colLabels = newColLabels
jsonObject = {}
jsonObject['row_labels'] = rowLabels
jsonObject['col_labels'] = colLabels
jsonObject['matrix'] = newMatrix
return jsonObject
def getPathAt(self, index):
return self._paths[index]
def reset(self):
self._matrix = copy.deepcopy(self._initialMatrix)
self._rowLabels = []
self._colLabels = []
for node in self._nodes:
self._rowLabels.append(utils.getNodeId(node, self._graph))
self._colLabels.append(utils.getNodeId(node, self._graph))
self._targetsCollapsed = False
self._sourcesCollapsed = False
| mit |
jeremiahmarks/sl4a | python-build/python-libs/gdata/src/gdata/alt/app_engine.py | 136 | 3386 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions to persist serialized auth tokens in the datastore.
The get_token and set_token functions should be used in conjunction with
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
to be reused across requests. It is up to your own code to ensure that the
token key's are unique.
"""
__author__ = '[email protected] (Jeff Scudder)'
from google.appengine.ext import db
from google.appengine.api import memcache
class Token(db.Model):
"""Datastore Model which stores a serialized auth token."""
t = db.BlobProperty()
def get_token(unique_key):
"""Searches for a stored token with the desired key.
Checks memcache and then the datastore if required.
Args:
unique_key: str which uniquely identifies the desired auth token.
Returns:
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
convert back into a usable token object. None if the token was not found
in memcache or the datastore.
"""
token_string = memcache.get(unique_key)
if token_string is None:
# The token wasn't in memcache, so look in the datastore.
token = Token.get_by_key_name(unique_key)
if token is None:
return None
return token.t
return token_string
def set_token(unique_key, token_str):
"""Saves the serialized auth token in the datastore.
The token is also stored in memcache to speed up retrieval on a cache hit.
Args:
unique_key: The unique name for this token as a string. It is up to your
code to ensure that this token value is unique in your application.
Previous values will be silently overwitten.
token_str: A serialized auth token as a string. I expect that this string
will be generated by gdata.gauth.token_to_blob.
Returns:
True if the token was stored sucessfully, False if the token could not be
safely cached (if an old value could not be cleared). If the token was
set in memcache, but not in the datastore, this function will return None.
However, in that situation an exception will likely be raised.
Raises:
Datastore exceptions may be raised from the App Engine SDK in the event of
failure.
"""
# First try to save in memcache.
result = memcache.set(unique_key, token_str)
# If memcache fails to save the value, clear the cached value.
if not result:
result = memcache.delete(unique_key)
# If we could not clear the cached value for this token, refuse to save.
if result == 0:
return False
# Save to the datastore.
if Token(key_name=unique_key, t=token_str).put():
return True
return None
def delete_token(unique_key):
# Clear from memcache.
memcache.delete(unique_key)
# Clear from the datastore.
Token(key_name=unique_key).delete()
| apache-2.0 |
tianzhidao28/pyspider | tests/test_fetcher.py | 63 | 12804 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-15 22:10:35
import os
import json
import copy
import time
import umsgpack
import subprocess
import unittest2 as unittest
from multiprocessing import Queue
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
from pyspider.libs import utils
from pyspider.libs.response import rebuild_response
from pyspider.fetcher.tornado_fetcher import Fetcher
class TestFetcher(unittest.TestCase):
sample_task_http = {
'taskid': 'taskid',
'project': 'project',
'url': '',
'fetch': {
'method': 'GET',
'headers': {
'Cookie': 'a=b',
'a': 'b'
},
'cookies': {
'c': 'd',
},
'timeout': 60,
'save': 'abc',
},
'process': {
'callback': 'callback',
'save': [1, 2, 3],
},
}
@classmethod
def setUpClass(self):
import tests.data_test_webpage
import httpbin
self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
self.httpbin = 'http://127.0.0.1:14887'
self.inqueue = Queue(10)
self.outqueue = Queue(10)
self.fetcher = Fetcher(self.inqueue, self.outqueue)
self.fetcher.phantomjs_proxy = '127.0.0.1:25555'
self.rpc = xmlrpc_client.ServerProxy('http://localhost:%d' % 24444)
self.xmlrpc_thread = utils.run_in_thread(self.fetcher.xmlrpc_run, port=24444)
self.thread = utils.run_in_thread(self.fetcher.run)
self.proxy_thread = subprocess.Popen(['pyproxy', '--username=binux',
'--password=123456', '--port=14830',
'--debug'], close_fds=True)
self.proxy = '127.0.0.1:14830'
try:
self.phantomjs = subprocess.Popen(['phantomjs',
os.path.join(os.path.dirname(__file__),
'../pyspider/fetcher/phantomjs_fetcher.js'),
'25555'])
except OSError:
self.phantomjs = None
time.sleep(0.5)
@classmethod
def tearDownClass(self):
self.proxy_thread.terminate()
self.proxy_thread.wait()
self.httpbin_thread.terminate()
self.httpbin_thread.join()
if self.phantomjs:
self.phantomjs.kill()
self.phantomjs.wait()
self.rpc._quit()
self.thread.join()
time.sleep(1)
def test_10_http_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
def test_15_http_post(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
request['fetch']['data'] = 'binux'
request['fetch']['cookies'] = {'c': 'd'}
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['form'].get('binux'), '')
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
def test_20_dataurl_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_30_with_queue(self):
request= copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_40_with_rpc(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = umsgpack.unpackb(self.rpc.fetch(request).data)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_50_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# utf8 encoding 中文
request['fetch']['data'] = "[BASE64-DATA]5Lit5paH[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
self.assertIn(u'中文', response.json['form'], response.json)
def test_55_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# gbk encoding 中文
request['fetch']['data'] = "[BASE64-DATA]1tDOxA==[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
def test_60_timeout(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['timeout'] = 3
start_time = time.time()
self.inqueue.put(request)
task, result = self.outqueue.get()
end_time = time.time()
self.assertGreater(end_time - start_time, 1.5)
self.assertLess(end_time - start_time, 4.5)
def test_65_418(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/status/418'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 418)
self.assertIn('teapot', response.text)
def test_70_phantomjs_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/get'
request['fetch']['fetch_type'] = 'js'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
data = json.loads(response.doc('pre').text())
self.assertIsNotNone(data, response.content)
self.assertEqual(data['headers'].get('A'), 'b', response.json)
self.assertEqual(data['headers'].get('Cookie'), 'c=d', response.json)
def test_80_phantomjs_timeout(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['fetch_type'] = 'js'
request['fetch']['timeout'] = 3
start_time = time.time()
result = self.fetcher.sync_fetch(request)
end_time = time.time()
self.assertGreater(end_time - start_time, 2)
self.assertLess(end_time - start_time, 5)
def test_90_phantomjs_js_script(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['js_script'] = 'function() { document.write("binux") }'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('binux', result['content'])
def test_a100_phantomjs_sharp_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/pyspider/ajax.html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['headers']['User-Agent'] = 'pyspider-test'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertNotIn('loading', result['content'])
self.assertIn('done', result['content'])
self.assertIn('pyspider-test', result['content'])
def test_a110_dns_error(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'http://www.not-exists-site.com/'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
self.inqueue.put(request)
task, result = self.outqueue.get()
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
def test_a120_http_get_with_proxy_fail(self):
self.fetcher.proxy = self.proxy
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 403, result)
self.fetcher.proxy = None
def test_a130_http_get_with_proxy_ok(self):
self.fetcher.proxy = self.proxy
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get?username=binux&password=123456'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
self.fetcher.proxy = None
def test_a140_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect-to?url=/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.url, self.httpbin+'/get')
def test_a150_too_much_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect/10'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 599, result)
self.assertIn('redirects followed', response.error)
def test_a160_cookie(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/cookies/set?k1=v1&k2=v2'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.cookies, {'a': 'b', 'k1': 'v1', 'k2': 'v2', 'c': 'd'}, result)
| apache-2.0 |
kenshay/ImageScript | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/third_party/dns/namedict.py | 248 | 2107 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import dns.name
class NameDict(dict):
"""A dictionary whose keys are dns.name.Name objects.
@ivar max_depth: the maximum depth of the keys that have ever been
added to the dictionary.
@type max_depth: int
"""
def __init__(self, *args, **kwargs):
super(NameDict, self).__init__(*args, **kwargs)
self.max_depth = 0
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
depth = len(key)
if depth > self.max_depth:
self.max_depth = depth
super(NameDict, self).__setitem__(key, value)
def get_deepest_match(self, name):
"""Find the deepest match to I{name} in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of I{name}.
@param name: the name
@type name: dns.name.Name object
@rtype: (key, value) tuple
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if self.has_key(n):
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)
| gpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/tests/__init__.py | 17 | 2578 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import difflib
import os
from matplotlib import rcParams, rcdefaults, use
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
| mit |
rnder/data-science-from-scratch | code/ch15_multiple_regression.py | 60 | 8590 | from __future__ import division
from collections import Counter
from functools import partial
from linear_algebra import dot, vector_add
from statistics import median, standard_deviation
from probability import normal_cdf
from gradient_descent import minimize_stochastic
from simple_linear_regression import total_sum_of_squares
import math, random
def predict(x_i, beta):
return dot(x_i, beta)
def error(x_i, y_i, beta):
return y_i - predict(x_i, beta)
def squared_error(x_i, y_i, beta):
return error(x_i, y_i, beta) ** 2
def squared_error_gradient(x_i, y_i, beta):
"""the gradient corresponding to the ith squared error term"""
return [-2 * x_ij * error(x_i, y_i, beta)
for x_ij in x_i]
def estimate_beta(x, y):
beta_initial = [random.random() for x_i in x[0]]
return minimize_stochastic(squared_error,
squared_error_gradient,
x, y,
beta_initial,
0.001)
def multiple_r_squared(x, y, beta):
sum_of_squared_errors = sum(error(x_i, y_i, beta) ** 2
for x_i, y_i in zip(x, y))
return 1.0 - sum_of_squared_errors / total_sum_of_squares(y)
def bootstrap_sample(data):
"""randomly samples len(data) elements with replacement"""
return [random.choice(data) for _ in data]
def bootstrap_statistic(data, stats_fn, num_samples):
"""evaluates stats_fn on num_samples bootstrap samples from data"""
return [stats_fn(bootstrap_sample(data))
for _ in range(num_samples)]
def estimate_sample_beta(sample):
x_sample, y_sample = zip(*sample) # magic unzipping trick
return estimate_beta(x_sample, y_sample)
def p_value(beta_hat_j, sigma_hat_j):
if beta_hat_j > 0:
return 2 * (1 - normal_cdf(beta_hat_j / sigma_hat_j))
else:
return 2 * normal_cdf(beta_hat_j / sigma_hat_j)
#
# REGULARIZED REGRESSION
#
# alpha is a *hyperparameter* controlling how harsh the penalty is
# sometimes it's called "lambda" but that already means something in Python
def ridge_penalty(beta, alpha):
return alpha * dot(beta[1:], beta[1:])
def squared_error_ridge(x_i, y_i, beta, alpha):
"""estimate error plus ridge penalty on beta"""
return error(x_i, y_i, beta) ** 2 + ridge_penalty(beta, alpha)
def ridge_penalty_gradient(beta, alpha):
"""gradient of just the ridge penalty"""
return [0] + [2 * alpha * beta_j for beta_j in beta[1:]]
def squared_error_ridge_gradient(x_i, y_i, beta, alpha):
"""the gradient corresponding to the ith squared error term
including the ridge penalty"""
return vector_add(squared_error_gradient(x_i, y_i, beta),
ridge_penalty_gradient(beta, alpha))
def estimate_beta_ridge(x, y, alpha):
"""use gradient descent to fit a ridge regression
with penalty alpha"""
beta_initial = [random.random() for x_i in x[0]]
return minimize_stochastic(partial(squared_error_ridge, alpha=alpha),
partial(squared_error_ridge_gradient,
alpha=alpha),
x, y,
beta_initial,
0.001)
def lasso_penalty(beta, alpha):
return alpha * sum(abs(beta_i) for beta_i in beta[1:])
if __name__ == "__main__":
x = [[1,49,4,0],[1,41,9,0],[1,40,8,0],[1,25,6,0],[1,21,1,0],[1,21,0,0],[1,19,3,0],[1,19,0,0],[1,18,9,0],[1,18,8,0],[1,16,4,0],[1,15,3,0],[1,15,0,0],[1,15,2,0],[1,15,7,0],[1,14,0,0],[1,14,1,0],[1,13,1,0],[1,13,7,0],[1,13,4,0],[1,13,2,0],[1,12,5,0],[1,12,0,0],[1,11,9,0],[1,10,9,0],[1,10,1,0],[1,10,1,0],[1,10,7,0],[1,10,9,0],[1,10,1,0],[1,10,6,0],[1,10,6,0],[1,10,8,0],[1,10,10,0],[1,10,6,0],[1,10,0,0],[1,10,5,0],[1,10,3,0],[1,10,4,0],[1,9,9,0],[1,9,9,0],[1,9,0,0],[1,9,0,0],[1,9,6,0],[1,9,10,0],[1,9,8,0],[1,9,5,0],[1,9,2,0],[1,9,9,0],[1,9,10,0],[1,9,7,0],[1,9,2,0],[1,9,0,0],[1,9,4,0],[1,9,6,0],[1,9,4,0],[1,9,7,0],[1,8,3,0],[1,8,2,0],[1,8,4,0],[1,8,9,0],[1,8,2,0],[1,8,3,0],[1,8,5,0],[1,8,8,0],[1,8,0,0],[1,8,9,0],[1,8,10,0],[1,8,5,0],[1,8,5,0],[1,7,5,0],[1,7,5,0],[1,7,0,0],[1,7,2,0],[1,7,8,0],[1,7,10,0],[1,7,5,0],[1,7,3,0],[1,7,3,0],[1,7,6,0],[1,7,7,0],[1,7,7,0],[1,7,9,0],[1,7,3,0],[1,7,8,0],[1,6,4,0],[1,6,6,0],[1,6,4,0],[1,6,9,0],[1,6,0,0],[1,6,1,0],[1,6,4,0],[1,6,1,0],[1,6,0,0],[1,6,7,0],[1,6,0,0],[1,6,8,0],[1,6,4,0],[1,6,2,1],[1,6,1,1],[1,6,3,1],[1,6,6,1],[1,6,4,1],[1,6,4,1],[1,6,1,1],[1,6,3,1],[1,6,4,1],[1,5,1,1],[1,5,9,1],[1,5,4,1],[1,5,6,1],[1,5,4,1],[1,5,4,1],[1,5,10,1],[1,5,5,1],[1,5,2,1],[1,5,4,1],[1,5,4,1],[1,5,9,1],[1,5,3,1],[1,5,10,1],[1,5,2,1],[1,5,2,1],[1,5,9,1],[1,4,8,1],[1,4,6,1],[1,4,0,1],[1,4,10,1],[1,4,5,1],[1,4,10,1],[1,4,9,1],[1,4,1,1],[1,4,4,1],[1,4,4,1],[1,4,0,1],[1,4,3,1],[1,4,1,1],[1,4,3,1],[1,4,2,1],[1,4,4,1],[1,4,4,1],[1,4,8,1],[1,4,2,1],[1,4,4,1],[1,3,2,1],[1,3,6,1],[1,3,4,1],[1,3,7,1],[1,3,4,1],[1,3,1,1],[1,3,10,1],[1,3,3,1],[1,3,4,1],[1,3,7,1],[1,3,5,1],[1,3,6,1],[1,3,1,1],[1,3,6,1],[1,3,10,1],[1,3,2,1],[1,3,4,1],[1,3,2,1],[1,3,1,1],[1,3,5,1],[1,2,4,1],[1,2,2,1],[1,2,8,1],[1,2,3,1],[1,2,1,1],[1,2,9,1],[1,2,10,1],[1,2,9,1],[1,2,4,1],[1,2,5,1],[1,2,0,1],[1,2,9,1],[1,2,9,1],[1,2,0,1],[1,2,1,1],[1,2,1,1],[1,2,4,1],[1,1,0,1],[1,1,2,1],[1,1,2,1],[1,1,5,1],[1,1,3,1],[1,1,10,1],[1,1,6,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,4,1],[1,1,9,1],[1,1,9,1],[1,1,4,1],[1,1,2,1],[1,1,9,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,1,1],[1,1,1,1],[1,1,5,1]]
daily_minutes_good = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
random.seed(0)
beta = estimate_beta(x, daily_minutes_good) # [30.63, 0.972, -1.868, 0.911]
print "beta", beta
print "r-squared", multiple_r_squared(x, daily_minutes_good, beta)
print
print "digression: the bootstrap"
# 101 points all very close to 100
close_to_100 = [99.5 + random.random() for _ in range(101)]
# 101 points, 50 of them near 0, 50 of them near 200
far_from_100 = ([99.5 + random.random()] +
[random.random() for _ in range(50)] +
[200 + random.random() for _ in range(50)])
print "bootstrap_statistic(close_to_100, median, 100):"
print bootstrap_statistic(close_to_100, median, 100)
print "bootstrap_statistic(far_from_100, median, 100):"
print bootstrap_statistic(far_from_100, median, 100)
print
random.seed(0) # so that you get the same results as me
bootstrap_betas = bootstrap_statistic(zip(x, daily_minutes_good),
estimate_sample_beta,
100)
bootstrap_standard_errors = [
standard_deviation([beta[i] for beta in bootstrap_betas])
for i in range(4)]
print "bootstrap standard errors", bootstrap_standard_errors
print
print "p_value(30.63, 1.174)", p_value(30.63, 1.174)
print "p_value(0.972, 0.079)", p_value(0.972, 0.079)
print "p_value(-1.868, 0.131)", p_value(-1.868, 0.131)
print "p_value(0.911, 0.990)", p_value(0.911, 0.990)
print
print "regularization"
random.seed(0)
for alpha in [0.0, 0.01, 0.1, 1, 10]:
beta = estimate_beta_ridge(x, daily_minutes_good, alpha=alpha)
print "alpha", alpha
print "beta", beta
print "dot(beta[1:],beta[1:])", dot(beta[1:], beta[1:])
print "r-squared", multiple_r_squared(x, daily_minutes_good, beta)
print
| unlicense |
destijl/grr | grr/lib/flows/general/windows_vsc_test.py | 2 | 3543 | #!/usr/bin/env python
"""Tests for Windows Volume Shadow Copy flow."""
import stat
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
# needed for ListVolumeShadowCopies pylint: disable=unused-import
from grr.lib.flows.general import windows_vsc
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
class TestClient(object):
"""A test client mock."""
_RESPONSES = {
"Caption": "None",
"ClientAccessible": "True",
"Count": "1",
"Description": "None",
"DeviceObject": r"\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy3",
"Differential": "True",
"ExposedLocally": "False",
"ExposedName": "None",
"ExposedPath": "None",
"ExposedRemotely": "False",
"HardwareAssisted": "False",
"ID": "{4F1D1E03-C7C1-4023-8CE9-5FF4D16E133D}",
"Imported": "False",
"InstallDate": "20130430022911.144000-420",
"Name": "None",
"NoAutoRelease": "True",
"NotSurfaced": "False",
"NoWriters": "False",
"OriginatingMachine": "mic-PC",
"Persistent": "True",
"Plex": "False",
"ProviderID": "{B5946137-7B9F-4925-AF80-51ABD60B20D5}",
"ServiceMachine": "mic-PC",
"SetID": "{9419738B-113C-4ACC-BD64-DADDD3B88381}",
"State": "12",
"Status": "None",
"Transportable": "False",
"VolumeName": r"\\?\Volume{f2180d84-7eb0-11e1-bed0-806e6f6e6963}",
}
def WmiQuery(self, query):
expected_query = "SELECT * FROM Win32_ShadowCopy"
if query.query != expected_query:
raise RuntimeError("Received unexpected query.")
return [rdf_protodict.Dict(**self._RESPONSES)]
def ListDirectory(self, list_directory_request):
"""A mock list directory."""
pathspec = list_directory_request.pathspec
if not pathspec:
raise RuntimeError("Missing pathspec.")
if (pathspec.path != r"\\.\HarddiskVolumeShadowCopy3" or
pathspec.pathtype != rdf_paths.PathSpec.PathType.OS):
raise RuntimeError("Invalid pathspec.")
if not pathspec.nested_path:
raise RuntimeError("Missing nested pathspec.")
if (pathspec.nested_path.path != "/" or
pathspec.nested_path.pathtype != rdf_paths.PathSpec.PathType.TSK):
raise RuntimeError("Invalid nested pathspec.")
result = []
for i in range(10):
mock_pathspec = pathspec.Copy()
mock_pathspec.last.path = "/file %s" % i
result.append(
rdf_client.StatEntry(
pathspec=mock_pathspec, st_mode=stat.S_IFDIR))
return result
class TestListVolumeShadowCopies(test_lib.FlowTestsBaseclass):
"""Test the list Volume Shadow Copies flow."""
def testListVolumeShadowCopies(self):
"""Test the list Volume Shadow Copies flow."""
flow_name = "ListVolumeShadowCopies"
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper(
flow_name, TestClient(), token=self.token, client_id=self.client_id):
pass
fd = aff4.FACTORY.Open(
self.client_id.Add("fs/tsk/\\\\.\\HarddiskVolumeShadowCopy3"),
token=self.token)
children = list(fd.ListChildren())
self.assertEqual(len(children), 10)
self.assertEqual([x.Basename() for x in sorted(children)],
["file %s" % i for i in range(10)])
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
XiaodunServerGroup/xiaodun-platform | i18n/execute.py | 6 | 1552 | import os, subprocess, logging
from i18n.config import BASE_DIR
LOG = logging.getLogger(__name__)
def execute(command, working_directory=BASE_DIR):
"""
Executes shell command in a given working_directory.
Command is a string to pass to the shell.
Output is ignored.
"""
LOG.info("Executing in %s ...", working_directory)
LOG.info(command)
subprocess.check_call(command, cwd=working_directory, stderr=subprocess.STDOUT, shell=True)
def call(command, working_directory=BASE_DIR):
"""
Executes shell command in a given working_directory.
Command is a list of strings to execute as a command line.
Returns a tuple of two strings: (stdout, stderr)
"""
LOG.info(command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory, shell=True)
out, err = p.communicate()
return (out, err)
def create_dir_if_necessary(pathname):
dirname = os.path.dirname(pathname)
if not os.path.exists(dirname):
os.makedirs(dirname)
def remove_file(filename, verbose=True):
"""
Attempt to delete filename.
log is boolean. If true, removal is logged.
Log a warning if file does not exist.
Logging filenames are releative to BASE_DIR to cut down on noise in output.
"""
if verbose:
LOG.info('Deleting file %s' % os.path.relpath(filename, BASE_DIR))
if not os.path.exists(filename):
LOG.warn("File does not exist: %s" % os.path.relpath(filename, BASE_DIR))
else:
os.remove(filename)
| agpl-3.0 |
artscoop/django-extensions | django_extensions/management/commands/dumpscript.py | 24 | 29506 | # -*- coding: UTF-8 -*-
"""
Title: Dumpscript management command
Project: Hardytools (queryset-refactor version)
Author: Will Hardy (http://willhardy.com.au)
Date: June 2008
Usage: python manage.py dumpscript appname > scripts/scriptname.py
$Revision: 217 $
Description:
Generates a Python script that will repopulate the database using objects.
The advantage of this approach is that it is easy to understand, and more
flexible than directly populating the database, or using XML.
* It also allows for new defaults to take effect and only transfers what is
needed.
* If a new database schema has a NEW ATTRIBUTE, it is simply not
populated (using a default value will make the transition smooth :)
* If a new database schema REMOVES AN ATTRIBUTE, it is simply ignored
and the data moves across safely (I'm assuming we don't want this
attribute anymore.
* Problems may only occur if there is a new model and is now a required
ForeignKey for an existing model. But this is easy to fix by editing the
populate script. Half of the job is already done as all ForeingKey
lookups occur though the locate_object() function in the generated script.
Improvements:
See TODOs and FIXMEs scattered throughout :-)
"""
import datetime
import sys
from optparse import make_option
import django
import six
# conditional import, force_unicode was renamed in Django 1.5
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.db.models import (
AutoField, BooleanField, DateField, DateTimeField, FileField, ForeignKey,
)
from django_extensions.management.utils import signalcommand
try:
from django.utils.encoding import smart_unicode, force_unicode # NOQA
except ImportError:
from django.utils.encoding import smart_text as smart_unicode, force_text as force_unicode # NOQA
def orm_item_locator(orm_obj):
"""
This function is called every time an object that will not be exported is required.
Where orm_obj is the referred object.
We postpone the lookup to locate_object() which will be run on the generated script
"""
the_class = orm_obj._meta.object_name
original_class = the_class
pk_name = orm_obj._meta.pk.name
original_pk_name = pk_name
pk_value = getattr(orm_obj, pk_name)
while hasattr(pk_value, "_meta") and hasattr(pk_value._meta, "pk") and hasattr(pk_value._meta.pk, "name"):
the_class = pk_value._meta.object_name
pk_name = pk_value._meta.pk.name
pk_value = getattr(pk_value, pk_name)
clean_dict = make_clean_dict(orm_obj.__dict__)
for key in clean_dict:
v = clean_dict[key]
if v is not None and not isinstance(v, (six.string_types, six.integer_types, float, datetime.datetime)):
clean_dict[key] = six.u("%s" % v)
output = """ importer.locate_object(%s, "%s", %s, "%s", %s, %s ) """ % (
original_class, original_pk_name,
the_class, pk_name, pk_value, clean_dict
)
return output
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--autofield', action='store_false', dest='skip_autofield',
default=True, help='Include Autofields (like pk fields)'),
)
help = 'Dumps the data as a customised python script.'
args = '[appname ...]'
@signalcommand
def handle(self, *app_labels, **options):
# Get the models we want to export
models = get_models(app_labels)
# A dictionary is created to keep track of all the processed objects,
# so that foreign key references can be made using python variable names.
# This variable "context" will be passed around like the town bicycle.
context = {}
# Create a dumpscript object and let it format itself as a string
script = Script(
models=models,
context=context,
stdout=self.stdout,
stderr=self.stderr,
options=options,
)
self.stdout.write(str(script))
self.stdout.write("\n")
def get_models(app_labels):
""" Gets a list of models for the given app labels, with some exceptions.
TODO: If a required model is referenced, it should also be included.
Or at least discovered with a get_or_create() call.
"""
from django.db.models import get_app, get_apps, get_model
from django.db.models import get_models as get_all_models
# These models are not to be output, e.g. because they can be generated automatically
# TODO: This should be "appname.modelname" string
EXCLUDED_MODELS = (ContentType, )
models = []
# If no app labels are given, return all
if not app_labels:
for app in get_apps():
models += [m for m in get_all_models(app) if m not in EXCLUDED_MODELS]
return models
# Get all relevant apps
for app_label in app_labels:
# If a specific model is mentioned, get only that model
if "." in app_label:
app_label, model_name = app_label.split(".", 1)
models.append(get_model(app_label, model_name))
# Get all models for a given app
else:
models += [m for m in get_all_models(get_app(app_label)) if m not in EXCLUDED_MODELS]
return models
class Code(object):
""" A snippet of python script.
This keeps track of import statements and can be output to a string.
In the future, other features such as custom indentation might be included
in this class.
"""
def __init__(self, indent=-1, stdout=None, stderr=None):
if not stdout:
stdout = sys.stdout
if not stderr:
stderr = sys.stderr
self.indent = indent
self.stdout = stdout
self.stderr = stderr
def __str__(self):
""" Returns a string representation of this script.
"""
if self.imports:
self.stderr.write(repr(self.import_lines))
return flatten_blocks([""] + self.import_lines + [""] + self.lines, num_indents=self.indent)
else:
return flatten_blocks(self.lines, num_indents=self.indent)
def get_import_lines(self):
""" Takes the stored imports and converts them to lines
"""
if self.imports:
return ["from %s import %s" % (value, key) for key, value in self.imports.items()]
else:
return []
import_lines = property(get_import_lines)
class ModelCode(Code):
" Produces a python script that can recreate data for a given model class. "
def __init__(self, model, context=None, stdout=None, stderr=None, options=None):
super(ModelCode, self).__init__(indent=0, stdout=stdout, stderr=stderr)
self.model = model
if context is None:
context = {}
self.context = context
self.options = options
self.instances = []
def get_imports(self):
""" Returns a dictionary of import statements, with the variable being
defined as the key.
"""
return {self.model.__name__: smart_unicode(self.model.__module__)}
imports = property(get_imports)
def get_lines(self):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = []
for counter, item in enumerate(self.model._default_manager.all()):
instance = InstanceCode(instance=item, id=counter + 1, context=self.context, stdout=self.stdout, stderr=self.stderr, options=self.options)
self.instances.append(instance)
if instance.waiting_list:
code += instance.lines
# After each instance has been processed, try again.
# This allows self referencing fields to work.
for instance in self.instances:
if instance.waiting_list:
code += instance.lines
return code
lines = property(get_lines)
class InstanceCode(Code):
" Produces a python script that can recreate data for a given model instance. "
def __init__(self, instance, id, context=None, stdout=None, stderr=None, options=None):
""" We need the instance in question and an id """
super(InstanceCode, self).__init__(indent=0, stdout=stdout, stderr=stderr)
self.imports = {}
self.options = options
self.instance = instance
self.model = self.instance.__class__
if context is None:
context = {}
self.context = context
self.variable_name = "%s_%s" % (self.instance._meta.db_table, id)
self.skip_me = None
self.instantiated = False
self.waiting_list = list(self.model._meta.fields)
self.many_to_many_waiting_list = {}
for field in self.model._meta.many_to_many:
self.many_to_many_waiting_list[field] = list(getattr(self.instance, field.name).all())
def get_lines(self, force=False):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
force (True or False): if an attribute object cannot be included,
it is usually skipped to be processed later. With 'force' set, there
will be no waiting: a get_or_create() call is written instead.
"""
code_lines = []
# Don't return anything if this is an instance that should be skipped
if self.skip():
return []
# Initialise our new object
# e.g. model_name_35 = Model()
code_lines += self.instantiate()
# Add each field
# e.g. model_name_35.field_one = 1034.91
# model_name_35.field_two = "text"
code_lines += self.get_waiting_list()
if force:
# TODO: Check that M2M are not affected
code_lines += self.get_waiting_list(force=force)
# Print the save command for our new object
# e.g. model_name_35.save()
if code_lines:
code_lines.append("%s = importer.save_or_locate(%s)\n" % (self.variable_name, self.variable_name))
code_lines += self.get_many_to_many_lines(force=force)
return code_lines
lines = property(get_lines)
def skip(self):
""" Determine whether or not this object should be skipped.
If this model instance is a parent of a single subclassed
instance, skip it. The subclassed instance will create this
parent instance for us.
TODO: Allow the user to force its creation?
"""
if self.skip_me is not None:
return self.skip_me
def get_skip_version():
""" Return which version of the skip code should be run
Django's deletion code was refactored in r14507 which
was just two days before 1.3 alpha 1 (r14519)
"""
if not hasattr(self, '_SKIP_VERSION'):
version = django.VERSION
# no, it isn't lisp. I swear.
self._SKIP_VERSION = (
version[0] > 1 or ( # django 2k... someday :)
version[0] == 1 and ( # 1.x
version[1] >= 4 or # 1.4+
version[1] == 3 and not ( # 1.3.x
(version[3] == 'alpha' and version[1] == 0)
)
)
)
) and 2 or 1 # NOQA
return self._SKIP_VERSION
if get_skip_version() == 1:
try:
# Django trunk since r7722 uses CollectedObjects instead of dict
from django.db.models.query import CollectedObjects
sub_objects = CollectedObjects()
except ImportError:
# previous versions don't have CollectedObjects
sub_objects = {}
self.instance._collect_sub_objects(sub_objects)
sub_objects = sub_objects.keys()
elif get_skip_version() == 2:
from django.db.models.deletion import Collector
from django.db import router
cls = self.instance.__class__
using = router.db_for_write(cls, instance=self.instance)
collector = Collector(using=using)
collector.collect([self.instance], collect_related=False)
# collector stores its instances in two places. I *think* we
# only need collector.data, but using the batches is needed
# to perfectly emulate the old behaviour
# TODO: check if batches are really needed. If not, remove them.
sub_objects = sum([list(i) for i in collector.data.values()], [])
if hasattr(collector, 'batches'):
# Django 1.6 removed batches for being dead code
# https://github.com/django/django/commit/a170c3f755351beb35f8166ec3c7e9d524d9602
for batch in collector.batches.values():
# batch.values can be sets, which must be converted to lists
sub_objects += sum([list(i) for i in batch.values()], [])
sub_objects_parents = [so._meta.parents for so in sub_objects]
if [self.model in p for p in sub_objects_parents].count(True) == 1:
# since this instance isn't explicitly created, it's variable name
# can't be referenced in the script, so record None in context dict
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = None
self.skip_me = True
else:
self.skip_me = False
return self.skip_me
def instantiate(self):
" Write lines for instantiation "
# e.g. model_name_35 = Model()
code_lines = []
if not self.instantiated:
code_lines.append("%s = %s()" % (self.variable_name, self.model.__name__))
self.instantiated = True
# Store our variable name for future foreign key references
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = self.variable_name
return code_lines
def get_waiting_list(self, force=False):
" Add lines for any waiting fields that can be completed now. "
code_lines = []
skip_autofield = self.options.get('skip_autofield', True)
# Process normal fields
for field in list(self.waiting_list):
try:
# Find the value, add the line, remove from waiting list and move on
value = get_attribute_value(self.instance, field, self.context, force=force, skip_autofield=skip_autofield)
code_lines.append('%s.%s = %s' % (self.variable_name, field.name, value))
self.waiting_list.remove(field)
except SkipValue:
# Remove from the waiting list and move on
self.waiting_list.remove(field)
continue
except DoLater:
# Move on, maybe next time
continue
return code_lines
def get_many_to_many_lines(self, force=False):
""" Generates lines that define many to many relations for this instance. """
lines = []
for field, rel_items in self.many_to_many_waiting_list.items():
for rel_item in list(rel_items):
try:
pk_name = rel_item._meta.pk.name
key = '%s_%s' % (rel_item.__class__.__name__, getattr(rel_item, pk_name))
value = "%s" % self.context[key]
lines.append('%s.%s.add(%s)' % (self.variable_name, field.name, value))
self.many_to_many_waiting_list[field].remove(rel_item)
except KeyError:
if force:
item_locator = orm_item_locator(rel_item)
self.context["__extra_imports"][rel_item._meta.object_name] = rel_item.__module__
lines.append('%s.%s.add( %s )' % (self.variable_name, field.name, item_locator))
self.many_to_many_waiting_list[field].remove(rel_item)
if lines:
lines.append("")
return lines
class Script(Code):
" Produces a complete python script that can recreate data for the given apps. "
def __init__(self, models, context=None, stdout=None, stderr=None, options=None):
super(Script, self).__init__(stdout=stdout, stderr=stderr)
self.imports = {}
self.models = models
if context is None:
context = {}
self.context = context
self.context["__avaliable_models"] = set(models)
self.context["__extra_imports"] = {}
self.options = options
def _queue_models(self, models, context):
""" Works an an appropriate ordering for the models.
This isn't essential, but makes the script look nicer because
more instances can be defined on their first try.
"""
# Max number of cycles allowed before we call it an infinite loop.
MAX_CYCLES = 5
model_queue = []
number_remaining_models = len(models)
allowed_cycles = MAX_CYCLES
while number_remaining_models > 0:
previous_number_remaining_models = number_remaining_models
model = models.pop(0)
# If the model is ready to be processed, add it to the list
if check_dependencies(model, model_queue, context["__avaliable_models"]):
model_class = ModelCode(model=model, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options)
model_queue.append(model_class)
# Otherwise put the model back at the end of the list
else:
models.append(model)
# Check for infinite loops.
# This means there is a cyclic foreign key structure
# That cannot be resolved by re-ordering
number_remaining_models = len(models)
if number_remaining_models == previous_number_remaining_models:
allowed_cycles -= 1
if allowed_cycles <= 0:
# Add the remaining models, but do not remove them from the model list
missing_models = [ModelCode(model=m, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) for m in models]
model_queue += missing_models
# Replace the models with the model class objects
# (sure, this is a little bit of hackery)
models[:] = missing_models
break
else:
allowed_cycles = MAX_CYCLES
return model_queue
def get_lines(self):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = [self.FILE_HEADER.strip()]
# Queue and process the required models
for model_class in self._queue_models(self.models, context=self.context):
msg = 'Processing model: %s\n' % model_class.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
code.append(model_class.import_lines)
code.append("")
code.append(model_class.lines)
# Process left over foreign keys from cyclic models
for model in self.models:
msg = 'Re-processing model: %s\n' % model.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
for instance in model.instances:
if instance.waiting_list or instance.many_to_many_waiting_list:
code.append(instance.get_lines(force=True))
code.insert(1, " # Initial Imports")
code.insert(2, "")
for key, value in self.context["__extra_imports"].items():
code.insert(2, " from %s import %s" % (value, key))
return code
lines = property(get_lines)
# A user-friendly file header
FILE_HEADER = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been automatically generated.
# Instead of changing it, create a file called import_helper.py
# and put there a class called ImportHelper(object) in it.
#
# This class will be specially casted so that instead of extending object,
# it will actually extend the class BasicImportHelper()
#
# That means you just have to overload the methods you want to
# change, leaving the other ones inteact.
#
# Something that you might want to do is use transactions, for example.
#
# Also, don't forget to add the necessary Django imports.
#
# This file was generated with the following command:
# %s
#
# to restore it, run
# manage.py runscript module_name.this_script_name
#
# example: if manage.py is at ./manage.py
# and the script is at ./some_folder/some_script.py
# you must make sure ./some_folder/__init__.py exists
# and run ./manage.py runscript some_folder.some_script
import os, sys
from django.db import transaction
class BasicImportHelper(object):
def pre_import(self):
pass
# You probably want to uncomment on of these two lines
# @transaction.atomic # Django 1.6
# @transaction.commit_on_success # Django <1.6
def run_import(self, import_data):
import_data()
def post_import(self):
pass
def locate_similar(self, current_object, search_data):
# You will probably want to call this method from save_or_locate()
# Example:
# new_obj = self.locate_similar(the_obj, {"national_id": the_obj.national_id } )
the_obj = current_object.__class__.objects.get(**search_data)
return the_obj
def locate_object(self, original_class, original_pk_name, the_class, pk_name, pk_value, obj_content):
# You may change this function to do specific lookup for specific objects
#
# original_class class of the django orm's object that needs to be located
# original_pk_name the primary key of original_class
# the_class parent class of original_class which contains obj_content
# pk_name the primary key of original_class
# pk_value value of the primary_key
# obj_content content of the object which was not exported.
#
# You should use obj_content to locate the object on the target db
#
# An example where original_class and the_class are different is
# when original_class is Farmer and the_class is Person. The table
# may refer to a Farmer but you will actually need to locate Person
# in order to instantiate that Farmer
#
# Example:
# if the_class == SurveyResultFormat or the_class == SurveyType or the_class == SurveyState:
# pk_name="name"
# pk_value=obj_content[pk_name]
# if the_class == StaffGroup:
# pk_value=8
search_data = { pk_name: pk_value }
the_obj = the_class.objects.get(**search_data)
#print(the_obj)
return the_obj
def save_or_locate(self, the_obj):
# Change this if you want to locate the object in the database
try:
the_obj.save()
except:
print("---------------")
print("Error saving the following object:")
print(the_obj.__class__)
print(" ")
print(the_obj.__dict__)
print(" ")
print(the_obj)
print(" ")
print("---------------")
raise
return the_obj
importer = None
try:
import import_helper
# We need this so ImportHelper can extend BasicImportHelper, although import_helper.py
# has no knowlodge of this class
importer = type("DynamicImportHelper", (import_helper.ImportHelper, BasicImportHelper ) , {} )()
except ImportError as e:
# From Python 3.3 we can check e.name - string match is for backward compatibility.
if 'import_helper' in str(e):
importer = BasicImportHelper()
else:
raise
import datetime
from decimal import Decimal
from django.contrib.contenttypes.models import ContentType
try:
import dateutil.parser
except ImportError:
print("Please install python-dateutil")
sys.exit(os.EX_USAGE)
def run():
importer.pre_import()
importer.run_import(import_data)
importer.post_import()
def import_data():
""" % " ".join(sys.argv)
# HELPER FUNCTIONS
#-------------------------------------------------------------------------------
def flatten_blocks(lines, num_indents=-1):
""" Takes a list (block) or string (statement) and flattens it into a string
with indentation.
"""
# The standard indent is four spaces
INDENTATION = " " * 4
if not lines:
return ""
# If this is a string, add the indentation and finish here
if isinstance(lines, six.string_types):
return INDENTATION * num_indents + lines
# If this is not a string, join the lines and recurse
return "\n".join([flatten_blocks(line, num_indents + 1) for line in lines])
def get_attribute_value(item, field, context, force=False, skip_autofield=True):
""" Gets a string version of the given attribute's value, like repr() might. """
# Find the value of the field, catching any database issues
try:
value = getattr(item, field.name)
except ObjectDoesNotExist:
raise SkipValue('Could not find object for %s.%s, ignoring.\n' % (item.__class__.__name__, field.name))
# AutoField: We don't include the auto fields, they'll be automatically recreated
if skip_autofield and isinstance(field, AutoField):
raise SkipValue()
# Some databases (eg MySQL) might store boolean values as 0/1, this needs to be cast as a bool
elif isinstance(field, BooleanField) and value is not None:
return repr(bool(value))
# Post file-storage-refactor, repr() on File/ImageFields no longer returns the path
elif isinstance(field, FileField):
return repr(force_unicode(value))
# ForeignKey fields, link directly using our stored python variable name
elif isinstance(field, ForeignKey) and value is not None:
# Special case for contenttype foreign keys: no need to output any
# content types in this script, as they can be generated again
# automatically.
# NB: Not sure if "is" will always work
if field.rel.to is ContentType:
return 'ContentType.objects.get(app_label="%s", model="%s")' % (value.app_label, value.model)
# Generate an identifier (key) for this foreign object
pk_name = value._meta.pk.name
key = '%s_%s' % (value.__class__.__name__, getattr(value, pk_name))
if key in context:
variable_name = context[key]
# If the context value is set to None, this should be skipped.
# This identifies models that have been skipped (inheritance)
if variable_name is None:
raise SkipValue()
# Return the variable name listed in the context
return "%s" % variable_name
elif value.__class__ not in context["__avaliable_models"] or force:
context["__extra_imports"][value._meta.object_name] = value.__module__
item_locator = orm_item_locator(value)
return item_locator
else:
raise DoLater('(FK) %s.%s\n' % (item.__class__.__name__, field.name))
elif isinstance(field, (DateField, DateTimeField)) and value is not None:
return "dateutil.parser.parse(\"%s\")" % value.isoformat()
# A normal field (e.g. a python built-in)
else:
return repr(value)
def make_clean_dict(the_dict):
if "_state" in the_dict:
clean_dict = the_dict.copy()
del clean_dict["_state"]
return clean_dict
return the_dict
def check_dependencies(model, model_queue, avaliable_models):
" Check that all the depenedencies for this model are already in the queue. "
# A list of allowed links: existing fields, itself and the special case ContentType
allowed_links = [m.model.__name__ for m in model_queue] + [model.__name__, 'ContentType']
# For each ForeignKey or ManyToMany field, check that a link is possible
for field in model._meta.fields:
if field.rel and field.rel.to.__name__ not in allowed_links:
if field.rel.to not in avaliable_models:
continue
return False
for field in model._meta.many_to_many:
if field.rel and field.rel.to.__name__ not in allowed_links:
return False
return True
# EXCEPTIONS
#-------------------------------------------------------------------------------
class SkipValue(Exception):
""" Value could not be parsed or should simply be skipped. """
class DoLater(Exception):
""" Value could not be parsed or should simply be skipped. """
| mit |
david-lassonde-adsk/pymel | pymel/util/namedtuple.py | 8 | 5952 | """ Cookbook recipe 500261, Raymond Hettinger, planned for inclusion in 2.6 :
http://docs.python.org/dev/library/collections.html#collections.namedtuple """
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
__all__ = ['namedtuple']
def namedtuple(typename, field_names, docAppend="", verbose=False):
""" Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x, y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
docAppend = docAppend.encode('string_escape')
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(field_names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
# note in 2.6 collections use
# def __getnewargs__(self):
# return tuple(self)
# instead of __reduce__ to provide unpickling capabilities
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)%(docAppend)s' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(cls, %(argtxt)s):
return tuple.__new__(cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %(field_names)r, self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __reduce__(self) :
return ( self.__class__, tuple(self) ) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace
namespace = dict(itemgetter=_itemgetter)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals['__name__']
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5), Point(9. / 7, 6):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| bsd-3-clause |
onceuponatimeforever/oh-mainline | vendor/packages/kombu/examples/complete_send.py | 31 | 1152 | """
Example producer that sends a single message and exits.
You can use `complete_receive.py` to receive the message sent.
"""
from kombu import Connection, Producer, Exchange, Queue
#: By default messages sent to exchanges are persistent (delivery_mode=2),
#: and queues and exchanges are durable.
exchange = Exchange('kombu_demo', type='direct')
queue = Queue('kombu_demo', exchange, routing_key='kombu_demo')
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: Producers are used to publish messages.
#: a default exchange and routing key can also be specifed
#: as arguments the Producer, but we rather specify this explicitly
#: at the publish call.
producer = Producer(connection)
#: Publish the message using the json serializer (which is the default),
#: and zlib compression. The kombu consumer will automatically detect
#: encoding, serialization and compression used and decode accordingly.
producer.publish({'hello': 'world'},
exchange=exchange,
routing_key='kombu_demo',
serializer='json', compression='zlib')
| agpl-3.0 |
ahua/pythondotorg | pages/management/commands/fix_success_story_images.py | 14 | 2607 | import re
import os
import requests
from urllib.parse import urlparse
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.files import File
from ...models import Page, Image, page_image_path
class Command(BaseCommand):
""" Fix success story page images """
def get_success_pages(self):
return Page.objects.filter(path__startswith='about/success/')
def image_url(self, path):
"""
Given a full filesystem path to an image, return the proper media
url for it
"""
new_url = path.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
return new_url.replace('//', '/')
def fix_image(self, path, page):
url = 'http://legacy.python.org{}'.format(path)
# Retrieve the image
r = requests.get(url)
if r.status_code != 200:
print("ERROR Couldn't load {}".format(url))
return
# Create new associated image and generate ultimate path
img = Image()
img.page = page
filename = os.path.basename(urlparse(url).path)
output_path = page_image_path(img, filename)
# Make sure our directories exist
directory = os.path.dirname(output_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Write image data to our location
with open(output_path, 'wb') as f:
f.write(r.content)
# Re-open the image as a Django File object
reopen = open(output_path, 'rb')
new_file = File(reopen)
img.image.save(filename, new_file, save=True)
return self.image_url(output_path)
def find_image_paths(self, page):
content = page.content.raw
paths = set(re.findall(r'(/files/success.*)\b', content))
if paths:
print("Found {} matches in {}".format(len(paths), page.path))
return paths
def process_success_story(self, page):
""" Process an individual success story """
image_paths = self.find_image_paths(page)
for path in image_paths:
new_url = self.fix_image(path, page)
print(" Fixing {} -> {}".format(path, new_url))
content = page.content.raw
new_content = content.replace(path, new_url)
page.content = new_content
page.save()
def handle(self, *args, **kwargs):
self.pages = self.get_success_pages()
print("Found {} success pages".format(len(self.pages)))
for p in self.pages:
self.process_success_story(p)
| apache-2.0 |
andreugrimalt/Theano-Tutorials | 5_convolutional_net.py | 1 | 3899 | import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
from load import mnist
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import cPickle
srng = RandomStreams()
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def rectify(X):
return T.maximum(X, 0.)
def softmax(X):
e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x'))
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
def dropout(X, p=0.):
if p > 0:
retain_prob = 1 - p
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='full'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
l3a = rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
l4 = rectify(T.dot(l3, w4))
l4 = dropout(l4, p_drop_hidden)
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l4, pyx
trX, teX, trY, teY = mnist(onehot=True)
trX = trX.reshape(-1, 1, 28, 28)
teX = teX.reshape(-1, 1, 28, 28)
X = T.ftensor4()
Y = T.fmatrix()
w = init_weights((32, 1, 3, 3))
w2 = init_weights((64, 32, 3, 3))
w3 = init_weights((128, 64, 3, 3))
w4 = init_weights((128 * 3 * 3, 625))
w_o = init_weights((625, 10))
noise_l1, noise_l2, noise_l3, noise_l4, noise_py_x = model(X, w, w2, w3, w4, 0.2, 0.5)
l1, l2, l3, l4, py_x = model(X, w, w2, w3, w4, 0., 0.)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))
params = [w, w2, w3, w4, w_o]
updates = RMSprop(cost, params, lr=0.001)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)
for i in range(50):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print np.mean(np.argmax(teY, axis=1) == predict(teX))
f = file('objects.save', 'wb')
for obj in [l1, l2, l3, py_x]:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def fillim(c):
im = w[0:784,c].eval()*50
im.shape = 28,28
return im
def plotWights():
im = w2[0,c,0:3,0:3].eval()*50
im.shape = 3,3
fig = plt.figure(1, (5., 5.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 16), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for c in range(32):
grid[c].imshow(fillim(c),cmap=plt.cm.gray)
plt.show()
#todo: refactor
def plotConvImage():
input=floatX(trX[0:784])
out=conv2d(input, w, border_mode='full')
out=out[0,0,0:28,0:28].eval()
fig = plt.figure(1, (5., 5.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 16), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
grid[0].imshow(out,cmap=plt.cm.gray)
plt.show()
| mit |
prechelt/pyth | pyth/encodings/symbol.py | 1 | 4338 | """
Maps Symbol typeface to Unicode, extracted from http://en.wikipedia.org/wiki/Symbol_(typeface)
"""
from __future__ import absolute_import
import codecs
import six
from six.moves import map
decodeTable = {
32: 32,
33: 33, 34: 8704, 35: 35, 36: 8707, 37: 37, 38: 38, 39: 8717, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46,
47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61,
62: 62, 63: 63, 64: 8773, 65: 913, 66: 914, 67: 935, 68: 916, 69: 917, 70: 934, 71: 915, 72: 919, 73: 921, 74: 977,
75: 922, 76: 923, 77: 924, 78: 925, 79: 927, 80: 928, 81: 920, 82: 929, 83: 931, 84: 932, 85: 933, 86: 962, 87: 937,
88: 926, 89: 936, 90: 918, 91: 91, 92: 8756, 93: 93, 94: 8869, 95: 95, 96: 63717, 97: 945, 98: 946, 99: 967, 100: 948,
101: 949, 102: 981, 103: 947, 104: 951, 105: 953, 106: 966, 107: 954, 108: 955, 109: 956, 110: 957, 111: 959, 112: 960,
113: 952, 114: 961, 115: 963, 116: 964, 117: 965, 118: 982, 119: 969, 120: 958, 121: 968, 122: 950, 123: 123, 124: 124,
125: 125, 126: 126, 160: 8364, 161: 978, 162: 697, 163: 8804, 164: 8260, 165: 8734, 166: 402, 167: 9827, 168: 9830,
169: 9829, 170: 9824, 171: 8596, 172: 8592, 173: 8593, 174: 8594, 175: 8595, 176: 176, 177: 177, 178: 698, 179: 8805,
180: 215, 181: 8733, 182: 8706, 183: 8226, 184: 247, 185: 8800, 186: 8801, 187: 8776, 188: 8230, 189: 9168, 190: 9135,
191: 8629, 192: 8501, 193: 8465, 194: 8476, 195: 8472, 196: 8855, 197: 8853, 198: 8709, 199: 8745, 200: 8746, 201: 8835,
202: 8839, 203: 8836, 204: 8834, 205: 8838, 206: 8712, 207: 8713, 208: 8736, 209: 8711, 210: 174, 211: 169, 212: 8482,
213: 8719, 214: 8730, 215: 8901, 216: 172, 217: 8743, 218: 8744, 219: 8660, 220: 8656, 221: 8657, 222: 8658, 223: 8659,
224: 9674, 225: 12296, 226: 174, 227: 169, 228: 8482, 229: 8721, 230: 9115, 231: 9116, 232: 9117, 233: 9121, 234: 9122,
235: 9123, 236: 9127, 237: 9128, 238: 9129, 239: 9130, 241: 12297, 242: 8747, 243: 8992, 244: 9134, 245: 8993, 246: 9118,
247: 9119, 248: 9120, 249: 9124, 250: 9125, 251: 9126, 252: 9131, 253: 9132, 254: 9133}
encodeTable = dict((v, k) for (k, v) in six.iteritems(decodeTable))
ERROR_STRING = "Ordinal not in known range 32-254"
def symbol_decode(input, errors='strict'):
chars = []
#for (i, c) in enumerate(input):
for i in range(len(input)):
try:
chars.append(decodeTable[six.indexbytes(input, i)])
except KeyError:
if errors == 'replace':
chars.append(ord(u'?'))
else:
raise UnicodeDecodeError("symbol", input, i, i+1, ERROR_STRING)
return (u"".join(map(six.unichr, chars)), len(input))
def symbol_encode(input, errors='strict'):
chars = []
for (i, c) in enumerate(input):
try:
chars.append(encodeTable[ord(c)])
except KeyError:
if errors == 'replace':
chars.append(ord('?'))
else:
raise UnicodeEncodeError("symbol", input, i, i+1, ERROR_STRING)
return ("".join(map(chr, chars)), len(input))
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return symbol_encode(input, errors)
def decode(self, input,errors='strict'):
return symbol_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
try:
return symbol_encode(input)[0]
except UnicodeEncodeError:
raise ValueError(ERROR_STRING)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
try:
return symbol_decode(input)[0]
except UnicodeDecodeError:
raise ValueError(ERROR_STRING)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
info = codecs.CodecInfo(
name='symbol',
encode=symbol_encode,
decode=symbol_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
def search(name):
if name == info.name:
return info
return None
codecs.register(search)
| mit |
hobarrera/django | tests/template_tests/syntax_tests/test_numpy.py | 353 | 1429 | import warnings
from unittest import skipIf
from django.test import SimpleTestCase
from ..utils import setup
try:
import numpy
except ImportError:
numpy = False
@skipIf(numpy is False, "Numpy must be installed to run these tests.")
class NumpyTests(SimpleTestCase):
# Ignore numpy deprecation warnings (#23890)
warnings.filterwarnings(
"ignore",
"Using a non-integer number instead of an "
"integer will result in an error in the future",
DeprecationWarning
)
@setup({'numpy-array-index01': '{{ var.1 }}'})
def test_numpy_array_index01(self):
"""
Numpy's array-index syntax allows a template to access a certain
item of a subscriptable object.
"""
output = self.engine.render_to_string(
'numpy-array-index01',
{'var': numpy.array(["first item", "second item"])},
)
self.assertEqual(output, 'second item')
@setup({'numpy-array-index02': '{{ var.5 }}'})
def test_numpy_array_index02(self):
"""
Fail silently when the array index is out of range.
"""
output = self.engine.render_to_string(
'numpy-array-index02',
{'var': numpy.array(["first item", "second item"])},
)
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
hronoses/vispy | vispy/app/inputhook.py | 21 | 2446 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Support for interactive mode to allow VisPy's event loop to be run alongside
a console terminal, without using threads. This code relies on inputhooks
built-in to the Python interpreter, and supports IPython too. The underlying
inputhook implementation is from IPython 3.x.
Note that IPython notebook integration is not supported, as the browser does
not use Python's PyOS_InputHook functionality.
"""
from ..ext.ipy_inputhook import inputhook_manager, InputHookBase, stdin_ready
def set_interactive(enabled=True, app=None):
"""Activate the IPython hook for VisPy. If the app is not specified, the
default is used.
"""
if enabled:
inputhook_manager.enable_gui('vispy', app)
else:
inputhook_manager.disable_gui()
@inputhook_manager.register('vispy')
class VisPyInputHook(InputHookBase):
"""Implementation of an IPython 3.x InputHook for VisPy. This is loaded
by default when you call vispy.app.run() in a console-based interactive
session, but you can also trigger it manually by importing this module
then typing:
>>> %enable_gui vispy
"""
def enable(self, app=None):
"""Activate event loop integration with this VisPy application.
Parameters
----------
app : instance of Application
The VisPy application that's being used. If None, then the
default application is retrieved.
Notes
-----
This methods sets the ``PyOS_InputHook`` to this implementation,
which allows Vispy to integrate with terminal-based applications
running in interactive mode (Python or IPython).
"""
from .. import app as _app
self.app = app or _app.use_app()
self.manager.set_inputhook(self._vispy_inputhook)
return app
def _vispy_inputhook(self):
try:
while not stdin_ready():
self.app.process_events()
# for more context.
# we need to wait out on the event loop to prevent CPU stress
# but not wait too much, to maintain fluidity.
# refer https://github.com/vispy/vispy/issues/945
self.app.sleep(duration_sec=0.03)
except KeyboardInterrupt:
pass
return 0
| bsd-3-clause |
mbauskar/tele-frappe | frappe/model/docfield.py | 61 | 1451 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""docfield utililtes"""
import frappe
def rename(doctype, fieldname, newname):
"""rename docfield"""
df = frappe.db.sql("""select * from tabDocField where parent=%s and fieldname=%s""",
(doctype, fieldname), as_dict=1)
if not df:
return
df = df[0]
if frappe.db.get_value('DocType', doctype, 'issingle'):
update_single(df, newname)
else:
update_table(df, newname)
update_parent_field(df, newname)
def update_single(f, new):
"""update in tabSingles"""
frappe.db.begin()
frappe.db.sql("""update tabSingles set field=%s where doctype=%s and field=%s""",
(new, f['parent'], f['fieldname']))
frappe.db.commit()
def update_table(f, new):
"""update table"""
query = get_change_column_query(f, new)
if query:
frappe.db.sql(query)
def update_parent_field(f, new):
"""update 'parentfield' in tables"""
if f['fieldtype']=='Table':
frappe.db.begin()
frappe.db.sql("""update `tab%s` set parentfield=%s where parentfield=%s""" \
% (f['options'], '%s', '%s'), (new, f['fieldname']))
frappe.db.commit()
def get_change_column_query(f, new):
"""generate change fieldname query"""
desc = frappe.db.sql("desc `tab%s`" % f['parent'])
for d in desc:
if d[0]== f['fieldname']:
return 'alter table `tab%s` change `%s` `%s` %s' % \
(f['parent'], f['fieldname'], new, d[1]) | mit |
BellScurry/gem5-fault-injection | configs/ruby/MESI_Three_Level.py | 18 | 12369 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Nilay Vaish
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L0Cache(RubyCache): pass
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
def define_options(parser):
parser.add_option("--num-clusters", type = "int", default = 1,
help = "number of clusters in a design in which there are shared\
caches private to clusters")
return
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
fatal("This script requires the MESI_Three_Level protocol to be\
built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
l0_cntrl_nodes = []
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
assert (options.num_cpus % options.num_clusters == 0)
num_cpus_per_cluster = options.num_cpus / options.num_clusters
assert (options.num_l2caches % options.num_clusters == 0)
num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
l2_bits = int(math.log(num_l2caches_per_cluster, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
l2_index_start = block_size_bits + l2_bits
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
for i in xrange(options.num_clusters):
for j in xrange(num_cpus_per_cluster):
#
# First create the Ruby objects associated with this cpu
#
l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
start_index_bit = block_size_bits,
replacement_policy = LRUReplacementPolicy())
l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
start_index_bit = block_size_bits,
replacement_policy = LRUReplacementPolicy())
# the ruby random tester reuses num_cpus to specify the
# number of cpu ports connected to the tester object, which
# is stored in system.cpu. because there is only ever one
# tester object, num_cpus is not necessarily equal to the
# size of system.cpu; therefore if len(system.cpu) == 1
# we use system.cpu[0] to set the clk_domain, thereby ensuring
# we don't index off the end of the cpu list.
if len(system.cpu) == 1:
clk_domain = system.cpu[0].clk_domain
else:
clk_domain = system.cpu[i].clk_domain
l0_cntrl = L0Cache_Controller(
version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
Dcache = l0d_cache, send_evictions = send_evicts(options),
clk_domain = clk_domain, ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
icache = l0i_cache,
clk_domain = clk_domain,
dcache = l0d_cache,
ruby_system = ruby_system)
l0_cntrl.sequencer = cpu_seq
l1_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
l1_cntrl = L1Cache_Controller(
version = i * num_cpus_per_cluster + j,
cache = l1_cache, l2_select_num_bits = l2_bits,
cluster_id = i, ruby_system = ruby_system)
exec("ruby_system.l0_cntrl%d = l0_cntrl"
% ( i * num_cpus_per_cluster + j))
exec("ruby_system.l1_cntrl%d = l1_cntrl"
% ( i * num_cpus_per_cluster + j))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L0 and L1 controllers
l0_cntrl.mandatoryQueue = MessageBuffer()
l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
# Connect the L1 controllers and the network
l1_cntrl.requestToL2 = MessageBuffer()
l1_cntrl.requestToL2.master = ruby_system.network.slave
l1_cntrl.responseToL2 = MessageBuffer()
l1_cntrl.responseToL2.master = ruby_system.network.slave
l1_cntrl.unblockToL2 = MessageBuffer()
l1_cntrl.unblockToL2.master = ruby_system.network.slave
l1_cntrl.requestFromL2 = MessageBuffer()
l1_cntrl.requestFromL2.slave = ruby_system.network.master
l1_cntrl.responseFromL2 = MessageBuffer()
l1_cntrl.responseFromL2.slave = ruby_system.network.master
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(
version = i * num_l2caches_per_cluster + j,
L2cache = l2_cache, cluster_id = i,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl"
% (i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = MessageBuffer()
l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
l2_cntrl.unblockToL2Cache = MessageBuffer()
l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = MessageBuffer()
l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.responseToL2Cache = MessageBuffer()
l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain = ruby_system.clk_domain, clk_divider = 3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer()
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer()
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
dma_cntrl.requestToDir = MessageBuffer()
dma_cntrl.requestToDir.master = ruby_system.network.slave
all_cntrls = l0_cntrl_nodes + \
l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
io_controller.responseFromDir = MessageBuffer(ordered = True)
io_controller.responseFromDir.slave = ruby_system.network.master
io_controller.requestToDir = MessageBuffer()
io_controller.requestToDir.master = ruby_system.network.slave
all_cntrls = all_cntrls + [io_controller]
ruby_system.network.number_of_virtual_networks = 3
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| bsd-3-clause |
sparsebase/stromx | opencv/methodgenerator.py | 1 | 61358 | # -*- coding: utf-8 -*-
import document
import interface
import package
import rulegenerator
import testgenerator
class ArgumentVisitorBase(interface.ArgumentVisitor):
def visitReturnValue(self, retValue):
self.visitAllocation(retValue)
class SingleArgumentVisitor(ArgumentVisitorBase):
"""
Visitor which handles compound arguments and calls the accept methods
of each component of the compound arguments. I.e. derived visitors do not
have to care about compound arguments they only have to consider "single"
arguments.
"""
def visitCompound(self, compound):
for arg in compound.args:
if not arg is None:
arg.accept(self)
class CollectVisitor(SingleArgumentVisitor):
"""
Visitor stores all arguments it visits the common set self.args.
"""
def __init__(self):
self.args = set()
def visitInput(self, inputArg):
self.args.add(inputArg)
def visitParameter(self, parameter):
self.args.add(parameter)
def visitConstant(self, const):
self.args.add(const)
def visitRefInput(self, refInput):
self.args.add(refInput)
def visitAllocation(self, allocation):
self.args.add(allocation)
def visitInputOutput(self, inputOutput):
self.args.add(inputOutput)
def visitOutput(self, output):
self.args.add(output)
class MethodGenerator(object):
"""
Abstract base class of all generators which output files depending on
operators. It provides utility functions used by the derived classes.
"""
class CollectParametersVisitor(SingleArgumentVisitor):
def __init__(self):
self.params = []
def visitParameter(self, parameter):
self.params.append(parameter)
class DocVisitor(SingleArgumentVisitor):
"""
Visitor which holds a document.
"""
def __init__(self, doc):
self.doc = doc
def __init__(self):
self.p = None
self.m = None
self.doc = None
def save(self, package, method, printResult = False):
"""
Writes the output of the generator for the input method to the current
document and optionally prints it to the standard output.
"""
self.p = package
self.m = method
self.doc = document.Document()
self.optionParam = self.createOptionParameter()
self.generate()
if printResult:
print self.doc.string()
def createOptionParameter(self):
"""
Creates and returns an enum parameters which provides one value for
each option of the method.
"""
p = package.EnumParameter("dataFlow", "Data flow")
p.isInit = True
for opt in self.m.options:
desc = package.EnumDescription(opt.ident.constant(), str(opt.name))
desc.name = opt.name
p.descriptions.append(desc)
return p
def visitAll(self, visitor, visitOptionParam = True):
"""
Collects all arguments of all options and removes duplicates (i.e.
arguments with common identifier). Then the visitor visits all
remaining arguments and the option parameter if the according flag is
set to true.
"""
v = CollectVisitor()
for opt in self.m.options:
for arg in opt.args:
arg.accept(v)
args = v.args
argIdents = set()
filteredArgs = set()
for arg in args:
if str(arg.ident) not in argIdents:
argIdents.add(str(arg.ident))
filteredArgs.add(arg)
for arg in sorted(filteredArgs, key=lambda arg: str(arg.ident)):
arg.accept(visitor)
if visitOptionParam and self.optionParam:
self.optionParam.accept(visitor)
def visitOption(self, opt, visitor):
"""
The visitor visits all arguments of the given option.
"""
for arg in opt.args:
arg.accept(visitor)
def namespaceEnter(self):
"""
Enters the namespace of the package the method belongs to.
"""
self.doc.namespaceEnter("stromx")
self.doc.namespaceEnter(self.p.ident)
def namespaceExit(self):
"""
Exits the package namespace.
"""
self.doc.namespaceExit(self.p.ident)
self.doc.namespaceExit("stromx")
self.doc.blank()
class OpHeaderGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ConnectorEnumVisitor(SingleArgumentVisitor):
"""
Exports the enumeration of the IDs of all visited input and output
connectors.
"""
def __init__(self):
self.connectors = set()
def visitRefInput(self, refInputArg):
self.connectors.add(refInputArg)
def visitInput(self, inputArg):
self.connectors.add(inputArg)
def visitInputOutput(self, arg):
self.connectors.add(arg)
def visitOutput(self, output):
self.connectors.add(output)
def visitAllocation(self, allocation):
self.connectors.add(allocation)
def export(self, doc):
connectorIds = [i.ident.constant() for i in self.connectors]
doc.enum("ConnectorId", set(connectorIds))
class ParameterEnumVisitor(MethodGenerator.CollectParametersVisitor):
"""
Exports the enumeration of the parameter IDs of all visited parameters.
"""
def export(self, doc):
paramIds = [p.ident.constant() for p in self.params]
doc.enum("ParameterId", set(paramIds))
class DataMemberVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = "{0} {1};".format(parameter.dataType.concreteTypeId(),
parameter.ident.attribute())
self.doc.line(l)
class DescriptionsVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the parameter description of all visited
parameters.
"""
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.doc.line(("runtime::Parameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.ENUM:
self.doc.line(("runtime::EnumParameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.NUMERIC:
self.doc.line(("runtime::NumericParameter<{1}>* m_{0}Parameter;"
).format(parameter.ident,
parameter.dataType.typeId()))
elif parameter.argType == package.ArgType.MATRIX:
self.doc.line(("runtime::MatrixParameter* m_{0}Parameter;"
).format(parameter.ident))
else:
assert(False)
def visitOutput(self, arg):
self.visitInput(arg)
def visitInputOutput(self, arg):
self.visitInput(arg)
def visitAllocation(self, arg):
self.visitInput(arg)
def visitRefInput(self, arg):
self.visitInput(arg)
def visitInput(self, arg):
if arg.argType == package.ArgType.MATRIX:
self.doc.line((
"runtime::MatrixDescription* m_{0}Description;"
).format(arg.ident))
else:
self.doc.line((
"runtime::Description* m_{0}Description;"
).format(arg.ident))
class EnumParameterIdVisitor(MethodGenerator.DocVisitor):
"""
Exports enumerations for the IDs of all visited enumeration parameters.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
keys = []
for desc in parameter.descriptions:
keys.append(desc.ident)
enumName = "{0}Id".format(parameter.ident.className())
self.doc.enum(enumName, keys)
class EnumConversionDeclVisitor(MethodGenerator.DocVisitor):
"""
Exports declarations of conversion functions for each visited
enumeration parameter.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = "int convert{0}(const runtime::Enum & value);".format(name)
self.doc.line(l)
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__public()
v = OpHeaderGenerator.EnumParameterIdVisitor(self.doc)
self.visitAll(v)
v = OpHeaderGenerator.ConnectorEnumVisitor()
self.visitAll(v)
v.export(self.doc)
v = OpHeaderGenerator.ParameterEnumVisitor()
self.visitAll(v)
v.export(self.doc)
self.__constructor()
self.__kernelOverloads()
self.__private()
self.__statics()
self.__setupFunctions()
v = OpHeaderGenerator.EnumConversionDeclVisitor(self.doc)
self.visitAll(v, False)
self.doc.blank()
v = OpHeaderGenerator.DataMemberVisitor(self.doc)
self.visitAll(v)
v = OpHeaderGenerator.DescriptionsVisitor(self.doc)
self.visitAll(v)
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/{1}.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/runtime/Enum.h>')
self.doc.line('#include <stromx/runtime/EnumParameter.h>')
self.doc.line('#include <stromx/runtime/List.h>')
self.doc.line('#include <stromx/runtime/MatrixDescription.h>')
self.doc.line('#include <stromx/runtime/MatrixParameter.h>')
self.doc.line('#include <stromx/runtime/NumericParameter.h>')
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/OperatorKernel.h>')
self.doc.line('#include <stromx/runtime/Primitive.h>')
self.doc.blank()
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __classEnter(self):
self.doc.line("class {0} {1} : public runtime::OperatorKernel".format(
self.__apiDecl(), self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __public(self):
self.doc.label("public")
def __constructor(self):
self.doc.line("{0}();".format(self.m.ident.className()))
def __kernelOverloads(self):
self.doc.line("virtual OperatorKernel* clone() const "
"{{ return new {0}; }}".format(self.m.ident.className()))
self.doc.line("virtual void setParameter(const unsigned int id, "
"const runtime::Data& value);")
self.doc.line("virtual const runtime::DataRef getParameter("
"const unsigned int id) const;")
self.doc.line("void initialize();")
self.doc.line("virtual void execute(runtime::DataProvider& provider);")
self.doc.blank()
def __private(self):
self.doc.label("private")
def __statics(self):
self.doc.line("static const std::string PACKAGE;")
self.doc.line("static const runtime::Version VERSION;")
self.doc.line("static const std::string TYPE;")
self.doc.blank()
def __setupFunctions(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupInitParameters();")
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupParameters();")
self.doc.line("const std::vector<const runtime::Description*> "
"setupInputs();")
self.doc.line("const std::vector<const runtime::Description*> "
"setupOutputs();")
self.doc.blank()
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __apiDecl(self):
return "STROMX_{0}_API".format(self.p.ident.upper())
class OpImplGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ParameterInitVisitor(MethodGenerator.CollectParametersVisitor):
"""
Exports the constructor initialization for all visited parameter data
members .
"""
def export(self, doc):
for i, p in enumerate(self.params):
defaultValue = p.default if p.default != None else ""
defaultValue = document.pythonToCpp(defaultValue)
init = "{0}({1})".format(p.ident.attribute(), defaultValue)
if i != len(self.params) - 1:
doc.line("{0},".format(init))
else:
doc.line(init)
class GetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which return the values of all visited
parameters.
"""
def visitParameter(self, parameter):
self.doc.label("case {0}".format(parameter.ident.constant()))
self.doc.line("return {0};".format(parameter.ident.attribute()))
class SetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which set the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = ""
if parameter.argType == package.ArgType.PLAIN:
pass
elif parameter.argType == package.ArgType.ENUM:
l = ("cvsupport::checkEnumValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.NUMERIC:
l = ("cvsupport::checkNumericValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.MATRIX:
l = ("cvsupport::checkMatrixValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
else:
assert(False)
self.__setParameterWithCheck(parameter, l)
def __setParameterWithCheck(self, parameter, check):
self.doc.label("case {0}".format(parameter.ident.constant()))
self.doc.scopeEnter()
self.doc.line(("const {0} & castedValue = runtime::data_cast<{1}>(value);"
).format(parameter.dataType.typeId(),
parameter.dataType.typeId()))
l = ("if(! castedValue.variant().isVariant({0}))".format(
parameter.dataType.variant()))
self.doc.line(l)
self.doc.scopeEnter()
l = 'throw runtime::WrongParameterType(parameter(id), *this);'
self.doc.line(l)
self.doc.scopeExit()
if check != "":
self.doc.line(check)
checkParams = rulegenerator.CheckParameterVisitor(self.doc,
parameter)
for rule in parameter.rules:
rule.accept(checkParams)
self.doc.line(("{0} = castedValue;"
).format(parameter.ident.attribute()))
self.doc.scopeExit()
self.doc.line("break;")
class SetupParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited parameters.
"""
def __init__(self, doc, isInit = False):
super(OpImplGenerator.SetupParametersVisitor, self).__init__(doc)
self.isInit = isInit
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.__visitPlainParameter(parameter)
elif parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
elif parameter.argType == package.ArgType.MATRIX:
self.__visitMatrixParameter(parameter)
elif parameter.argType == package.ArgType.NUMERIC:
self.__visitNumericParameter(parameter)
else:
assert(False)
def __visitPlainParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::Parameter({1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitEnumParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::EnumParameter({1});"
).format(ident, parameter.ident.constant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
for desc in parameter.descriptions:
d = 'runtime::Enum({0})'.format(desc.ident)
l = '{0}->add(runtime::EnumDescription({1}, L_("{2}")));'\
.format(ident, d, desc.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitMatrixParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::MatrixParameter({1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
self.doc.line("{0}->setRows({1});".format(ident, parameter.rows))
self.doc.line("{0}->setCols({1});".format(ident, parameter.cols))
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitNumericParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::NumericParameter<{2}>({1});"
).format(ident, parameter.ident.constant(),
parameter.dataType.typeId())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'\
.format(ident, parameter.name)
self.doc.line(l)
if parameter.maxValue != None:
l = "{0}->setMax({1});".format(ident,
parameter.dataType.cast(parameter.maxValue))
self.doc.line(l)
if parameter.minValue != None:
l = "{0}->setMin({1});".format(ident,
parameter.dataType.cast(parameter.minValue))
self.doc.line(l)
if parameter.step != None:
l = "{0}->setStep({1});".format(ident,
parameter.dataType.cast(parameter.step))
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __accessMode(self, ident):
if self.isInit:
accessMode = "NONE_WRITE"
else:
accessMode = "ACTIVATED_WRITE"
l = "{0}->setAccessMode(runtime::Parameter::{1});"\
.format(ident, accessMode)
self.doc.line(l)
class SetupOutputsVistor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited outputs.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
if output.argType == package.ArgType.PLAIN:
self.__setupDescription(output)
elif output.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(output)
else:
assert(False)
def visitAllocation(self, allocation):
self.visitOutput(allocation)
def __setupDescription(self, arg):
l = "runtime::Description* {0} = new runtime::Description({1}, {2});"\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg):
l = "runtime::MatrixDescription* {0} = new runtime::MatrixDescription({1}, {2});"\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
l = '{0}->setRows({1});'.format(arg.ident, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(arg.ident, arg.cols)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
class SetupInputsVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited inputs.
"""
def visitOutput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, True)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, True)
else:
assert(False)
def visitInput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, False)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, False)
else:
assert(False)
def visitInputOutput(self, arg):
self.visitInput(arg)
def __setupDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = "{0} = new runtime::Description({1}, {2});"\
.format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'\
.format(description, arg.name)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = (
"{0} = new "
"runtime::MatrixDescription({1}, {2});"
).format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle("{1}");'.format(description, arg.name)
self.doc.line(l)
l = '{0}->setRows({1});'.format(description, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(description, arg.cols)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __getVariant(self, arg, isOutput):
if isOutput:
return arg.dataType.canBeCreatedFromVariant()
else:
return arg.dataType.variant()
class InputMapperVisitor(MethodGenerator.DocVisitor):
"""
Exports input mappers for all visited inputs and outputs.
"""
def visitInput(self, arg):
self.__visit(arg)
def visitOutput(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def __visit(self, arg):
ident = arg.ident
constant = arg.ident.constant()
l = "runtime::Id2DataPair {0}InMapper({1});".format(ident, constant)
self.doc.line(l)
class ReceiveInputDataVisitor(SingleArgumentVisitor):
"""
Exports the receive input command for all visited inputs and outputs.
"""
def __init__(self):
self.line = ""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.receiveInputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}InMapper".format(arg.ident)
else:
self.line += " && {0}InMapper".format(arg.ident)
class InDataVisitor(MethodGenerator.DocVisitor):
"""
Exports stromx::Data* variables for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("const runtime::Data* "
"{0}Data = 0;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.doc.line("runtime::Data* {0}Data = 0;".format(output.ident))
class AccessVisitor(MethodGenerator.DocVisitor):
"""
Exports data accessors for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("runtime::ReadAccess "
"{0}ReadAccess;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
mapper = "{0}InMapper".format(output.ident)
data = "{0}Data".format(output.ident)
self.doc.line(("runtime::DataContainer inContainer = "
"{0}.data();").format(mapper))
self.doc.line("runtime::WriteAccess writeAccess(inContainer);")
self.doc.line("{0} = &writeAccess.get();".format(data))
class CopyWriteAccessVisitor(SingleArgumentVisitor):
"""
Exports the if-conditions which either create a read access or
reference an existing write access to read each visited input.
"""
def __init__(self):
self.output = None
self.inputs = []
def visitInput(self, inputArg):
self.inputs.append(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
assert(self.output == None)
self.output = output
def export(self, doc):
# no danger of reading a write access if there is no output (i.e.
# no write access)
if self.output == None:
for i in self.inputs:
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.blank()
return
# check if a read access refers to the same data as the write
# acess and handle this situation accordingly
for i in self.inputs:
l = "if({0}InMapper.data() == inContainer)".format(i.ident)
doc.line(l)
doc.scopeEnter()
if i.inPlace:
doc.line("srcData = &writeAccess.get();")
else:
message = '"Can not operate in place."'
ex = (
"throw runtime::InputError({0}, *this, {1});"
).format(i.ident.constant(), message)
doc.line(ex)
doc.scopeExit()
doc.line("else")
doc.scopeEnter()
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.scopeExit()
doc.blank()
class CheckVariantVisitor(MethodGenerator.DocVisitor):
"""
Exports the variant check for each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
l = (
"if(! {0}Data->variant().isVariant({1}Description->variant()))"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
self.doc.scopeEnter()
l = (
'throw runtime::InputError({0}, *this, "Wrong input data '
'variant.");'
).format(arg.ident.constant())
self.doc.line(l)
self.doc.scopeExit()
class CastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the cast to a concrete stromx data type for each visited
input and output.
"""
def visitInput(self, inputArg):
l = ("const {1}* {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(inputArg.ident,
inputArg.dataType.typeId())
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = ("{1} * {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(output.ident,
output.dataType.typeId())
self.doc.line(l)
class CheckCastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the data check for the data check of each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
if arg.argType == package.ArgType.MATRIX:
l = (
"cvsupport::checkMatrixValue(*{0}CastedData, {1}Description, *this);"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
else:
pass
class InitInVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the argument before the OpenCV
function is called.
"""
def visitConstant(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
self.doc.document(arg.initIn)
class CvDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the conversion to a native or OpenCV data type for each visited
argument.
"""
def visitInput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitAllocation(self, allocation):
cvData = "{0} {1}CvData;".format(allocation.cvType.typeId(),
allocation.ident)
self.doc.line(cvData)
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
else:
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
parameter.ident)
castedData = parameter.cvType.cast(parameter.ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def __visitEnumParameter(self, parameter):
ident = parameter.ident
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
ident)
castedData = "convert{0}({1})".format(ident.className(),
ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def visitRefInput(self, refInput):
cvData = "{0} {1}CvData".format(refInput.cvType.typeId(),
refInput.ident)
rhs = "{0}CvData".format(refInput.refArg.ident)
self.doc.line("{0} = {1};".format(cvData, rhs))
class MethodArgumentVisitor(ArgumentVisitorBase):
"""
Exports the argument of the OpenCV function for each visited argument.
"""
def __init__(self):
self.args = []
def visitInput(self, inputArg):
self.visit(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.visit(output)
def visitAllocation(self, allocation):
self.visit(allocation)
def visitParameter(self, parameter):
self.visit(parameter)
def visitConstant(self, constant):
value = constant.value
value = document.pythonToCpp(value)
self.args.append(str(value))
def visitRefInput(self, refInput):
self.visit(refInput)
def visitReturnValue(self, retValue):
pass
def visit(self, arg):
self.args.append("{0}CvData".format(arg.ident))
def visitCompound(self, compound):
self.args.append(compound.create())
def export(self):
argStr = ""
for i, arg in enumerate(self.args):
argStr += arg
if i < len(self.args) - 1:
argStr += ", "
return argStr
class MethodReturnValueVisitor(ArgumentVisitorBase):
"""
Exports the return value of the OpenCV function out of each visited argument.
"""
def __init__(self):
self.returnValue = ""
def visitReturnValue(self, retVal):
self.returnValue = "{0}CvData = ".format(retVal.ident)
def export(self):
return self.returnValue
class OutDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the wrapping of the result data into a data container for
each visited output or allocation.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = "runtime::DataContainer {0}OutContainer = inContainer;".format(output.ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper({1}, "
"{0}OutContainer);").format(output.ident, output.ident.constant());
self.doc.line(l)
def visitAllocation(self, allocation):
dataType = allocation.dataType.typeId()
ident = allocation.ident
cvData = "{0}CvData".format(ident)
newObject = allocation.dataType.allocate(cvData)
l = "{0}* {1}CastedData = {2};".format(dataType, ident, newObject)
self.doc.line(l)
l = ("runtime::DataContainer {0}OutContainer = "
"runtime::DataContainer({0}CastedData);").format(ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper({1}, "
"{0}OutContainer);").format(ident, allocation.ident.constant())
self.doc.line(l)
class InitOutVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the output argument after the OpenCV
function is called.
"""
def visitAllocation(self, allocation):
self.doc.document(allocation.initOut)
class SendOutputDataVisitor(SingleArgumentVisitor):
"""
Exports the send output command for all visited outputs.
"""
def __init__(self):
self.line = ""
def visitAllocation(self, output):
self.__visit(output)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.sendOutputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}OutMapper".format(arg.ident)
else:
self.line += " && {0}OutMapper".format(arg.ident)
class EnumConversionDefVisitor(MethodGenerator.DocVisitor):
"""
Exports the function which converts an enumeration value to its
OpenCV value for each visited enumeration parameter.
"""
def __init__(self, doc, m):
super(OpImplGenerator.EnumConversionDefVisitor, self).__init__(doc)
self.m = m
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = ("int {1}::convert{0}(const runtime::Enum & value)"
).format(name, self.m.ident.className())
self.doc.line(l)
self.doc.scopeEnter()
self.doc.line("switch(int(value))")
self.doc.scopeEnter()
for desc in parameter.descriptions:
self.doc.label("case {0}".format(desc.ident))
self.doc.line("return {0};".format(desc.cvIdent))
self.doc.label("default")
self.doc.line(("throw runtime::WrongParameterValue(parameter({0}),"
" *this);").format(parameter.ident.constant()))
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.namespaceEnter()
self.__statics()
self.__constructor()
self.__getParameter()
self.__setParameter()
self.__setupInitParameters()
self.__setupParameters()
self.__setupInputs()
self.__setupOutputs()
self.__initialize()
self.__execute()
self.__convertEnumValues()
self.namespaceExit()
filename = "stromx/{0}/{1}.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
cvModule = str(self.p.ident)[2:]
self.doc.line('#include "stromx/{0}/{1}.h"'\
.format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include "stromx/{0}/Locale.h"'.format(self.p.ident))
self.doc.line('#include "stromx/{0}/Utility.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Image.h>')
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/cvsupport/Utilities.h>')
self.doc.line('#include <stromx/runtime/DataContainer.h>')
self.doc.line('#include <stromx/runtime/DataProvider.h>')
self.doc.line('#include <stromx/runtime/Id2DataComposite.h>')
self.doc.line('#include <stromx/runtime/Id2DataPair.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include <stromx/runtime/VariantComposite.h>')
self.doc.line('#include <stromx/runtime/WriteAccess.h>')
self.doc.line('#include <opencv2/{0}/{0}.hpp>'.format(cvModule))
self.doc.blank()
def __statics(self):
method = self.m.ident.className()
package = self.p.ident.upper()
self.doc.line(("const std::string {0}::PACKAGE(STROMX_{1}_PACKAGE_"
"NAME);").format(method, package))
self.doc.line(("const runtime::Version {0}::VERSION("
"STROMX_{1}_VERSION_MAJOR, STROMX_{1}_VERSION_MINOR, "
"STROMX_{1}_VERSION_PATCH);".format(method, package)))
self.doc.line('const std::string {0}::TYPE("{0}");'.format(method))
self.doc.blank()
def __constructor(self):
self.doc.line("{0}::{0}()".format(self.m.ident.className()))
self.doc.line(" : runtime::OperatorKernel(TYPE, PACKAGE, VERSION, "
"setupInitParameters()),")
self.doc.increaseIndent()
v = OpImplGenerator.ParameterInitVisitor()
self.visitAll(v)
v.export(self.doc)
self.doc.decreaseIndent()
self.doc.scopeEnter()
self.doc.scopeExit()
self.doc.blank()
def __getParameter(self):
self.doc.line("const runtime::DataRef {0}::getParameter"
"(unsigned int id) const"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.GetParametersVisitor(self.doc)
self.visitAll(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setParameter(self):
self.doc.line("void {0}::setParameter"
"(unsigned int id, const runtime::Data& value)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("try")
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.SetParametersVisitor(self.doc)
self.visitAll(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.line("catch(runtime::BadCast&)")
self.doc.scopeEnter()
self.doc.line("throw runtime::WrongParameterType(parameter(id), *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setupInitParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupInitParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
if len(self.m.options) > 1:
v = OpImplGenerator.SetupParametersVisitor(self.doc, isInit = True)
self.optionParam.accept(v)
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupParametersVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupInputs(self):
self.doc.line("const std::vector<const runtime::Description*> "
"{0}::setupInputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Description*> inputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupInputsVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return inputs;")
self.doc.scopeExit()
self.doc.blank()
def __setupOutputs(self):
self.doc.line("const std::vector<const runtime::Description*> "
"{0}::setupOutputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Description*> outputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupOutputsVistor(self.doc)
self.visitOption(o, v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return outputs;")
self.doc.scopeExit()
self.doc.blank()
def __initialize(self):
self.doc.line("void {0}::initialize()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("runtime::OperatorKernel::initialize(setupInputs(), "
"setupOutputs(), setupParameters());")
self.doc.scopeExit()
self.doc.blank()
def __execute(self):
self.doc.line("void {0}::execute(runtime::DataProvider & provider)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.InputMapperVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.ReceiveInputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.blank()
v = OpImplGenerator.InDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.AccessVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CopyWriteAccessVisitor()
self.visitOption(o, v)
v.export(self.doc)
v = OpImplGenerator.CheckVariantVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CastedDataVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.CheckCastedDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
if o.inputCheck != None:
self.doc.document(o.inputCheck)
self.doc.blank()
v = OpImplGenerator.InitInVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CvDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.MethodReturnValueVisitor()
self.visitOption(o, v)
retVal = v.export()
v = OpImplGenerator.MethodArgumentVisitor()
self.visitOption(o, v)
argStr = v.export()
namespace = ""
if self.m.namespace != "":
namespace = "{0}::".format(self.m.namespace)
self.doc.line("{3}{2}{0}({1});".format(self.m.ident, argStr,
namespace, retVal))
if o.postCall != None:
self.doc.document(o.postCall)
self.doc.blank()
v = OpImplGenerator.OutDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.InitOutVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.SendOutputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __convertEnumValues(self):
v = OpImplGenerator.EnumConversionDefVisitor(self.doc, self.m)
self.visitAll(v, False)
class OpTestGenerator(object):
"""
Abstract base class of all generators which output operator tests.
"""
def testNames(self):
l = []
for o in self.m.options:
for i in range(len(o.tests)):
l.append("test{0}{1}".format(o.ident.className(), i))
return l
class OpTestHeaderGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the header of an operator test.
"""
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__testSuite()
self.doc.blank()
self.doc.label("public")
self.__constructor()
self.doc.line("void setUp();")
self.doc.line("void tearDown();")
self.doc.blank()
self.doc.label("protected")
self.__testMethods()
self.doc.blank()
self.doc.label("private")
self.doc.line("runtime::OperatorTester* m_operator;")
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/test/{1}Test.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.blank()
self.doc.line('#include <cppunit/extensions/HelperMacros.h>')
self.doc.line('#include <cppunit/TestFixture.h>')
self.doc.blank()
self.doc.line('#include "stromx/runtime/OperatorTester.h"')
self.doc.blank()
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}TEST_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __classEnter(self):
self.doc.line((
"class {0}Test : public CPPUNIT_NS::TestFixture"
).format(self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE({0}Test);"
).format(self.m.ident.className()))
for test in self.testNames():
self.doc.line("CPPUNIT_TEST({0});".format(test))
self.doc.line("CPPUNIT_TEST_SUITE_END();")
def __constructor(self):
self.doc.line((
"{0}Test() : m_operator(0) {{}}"
).format(self.m.ident.className()))
def __testMethods(self):
for test in self.testNames():
self.doc.line("void {0}();".format(test))
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
class OpTestImplGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the implementation of an operator test.
"""
def __includes(self):
self.doc.line((
'#include "stromx/{0}/test/{1}Test.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include <boost/lexical_cast.hpp>')
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include "stromx/cvsupport/Image.h"')
self.doc.line((
'#include "stromx/{0}/{1}.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE_REGISTRATION (stromx::{0}::{1}Test);"
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __setUp(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::setUp()".format(className))
self.doc.scopeEnter()
self.doc.line((
"m_operator = new stromx::runtime::OperatorTester(new {0});"
).format(self.m.ident.className()))
self.doc.scopeExit()
self.doc.blank()
def __tearDown(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::tearDown()".format(className))
self.doc.scopeEnter()
self.doc.line("delete m_operator;")
self.doc.scopeExit()
self.doc.blank()
def __testMethods(self):
className = self.m.ident.className()
for o in self.m.options:
for i, test in enumerate(o.tests):
testName = "test{0}{1}".format(o.ident.className(), i)
self.doc.line(
"void {0}Test::{1}()".format(className, testName)
)
self.doc.scopeEnter()
if len(self.m.options) > 1:
index = "{0}::DATA_FLOW".format(self.m.ident.className())
value = (
"runtime::Enum({0}::{1})"
).format(self.m.ident.className(), o.ident.constant())
l = "m_operator->setParameter({0}, {1});".format(index, value)
self.doc.line(l)
self.doc.line("m_operator->initialize();")
self.doc.line("m_operator->activate();")
self.doc.blank();
testgenerator.generate(self.doc, self.m, o.args,
test, testName)
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.__testSuite()
self.namespaceEnter()
self.__setUp()
self.__tearDown()
self.__testMethods()
self.namespaceExit()
filename = "stromx/{0}/test/{1}Test.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def generateMethodFiles(package, method):
"""
Generates the operator and the operator tests for the given method.
"""
g = OpHeaderGenerator()
g.save(package, method)
g = OpImplGenerator()
g.save(package, method)
g = OpTestHeaderGenerator()
g.save(package, method)
g = OpTestImplGenerator()
g.save(package, method)
if __name__ == "__main__":
import doctest
doctest.testmod() | apache-2.0 |
nmercier/linux-cross-gcc | linux/lib/python2.7/rlcompleter.py | 21 | 5991 | """Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import __builtin__
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError,'namespace must be a dictionary'
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if hasattr(val, '__call__'):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
seen = {"__builtins__"}
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
seen.add(word)
matches.append(word)
for nspace in [self.namespace, __builtin__.__dict__]:
for word, val in nspace.items():
if word[:n] == text and word not in seen:
seen.add(word)
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = set(dir(thisobject))
words.discard("__builtins__")
if hasattr(thisobject, '__class__'):
words.add('__class__')
words.update(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr:
try:
val = getattr(thisobject, word)
except Exception:
continue # Exclude properties that are not set
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
matches.sort()
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
| bsd-3-clause |
kxliugang/edx-platform | lms/lib/courseware_search/lms_filter_generator.py | 58 | 5634 | """
This file contains implementation override of SearchFilterGenerator which will allow
* Filter by all courses in which the user is enrolled in
"""
from microsite_configuration import microsite
from student.models import CourseEnrollment
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from search.filter_generator import SearchFilterGenerator
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
from courseware.access import get_user_role
INCLUDE_SCHEMES = [CohortPartitionScheme, RandomUserPartitionScheme, ]
SCHEME_SUPPORTS_ASSIGNMENT = [RandomUserPartitionScheme, ]
class LmsSearchFilterGenerator(SearchFilterGenerator):
""" SearchFilterGenerator for LMS Search """
_user_enrollments = {}
def _enrollments_for_user(self, user):
""" Return the specified user's course enrollments """
if user not in self._user_enrollments:
self._user_enrollments[user] = CourseEnrollment.enrollments_for_user(user)
return self._user_enrollments[user]
def filter_dictionary(self, **kwargs):
""" LMS implementation, adds filtering by user partition, course id and user """
def get_group_for_user_partition(user_partition, course_key, user):
""" Returns the specified user's group for user partition """
if user_partition.scheme in SCHEME_SUPPORTS_ASSIGNMENT:
return user_partition.scheme.get_group_for_user(
course_key,
user,
user_partition,
assign=False,
)
else:
return user_partition.scheme.get_group_for_user(
course_key,
user,
user_partition,
)
def get_group_ids_for_user(course, user):
""" Collect user partition group ids for user for this course """
partition_groups = []
for user_partition in course.user_partitions:
if user_partition.scheme in INCLUDE_SCHEMES:
group = get_group_for_user_partition(user_partition, course.id, user)
if group:
partition_groups.append(group)
partition_group_ids = [unicode(partition_group.id) for partition_group in partition_groups]
return partition_group_ids if partition_group_ids else None
filter_dictionary = super(LmsSearchFilterGenerator, self).filter_dictionary(**kwargs)
if 'user' in kwargs:
user = kwargs['user']
if 'course_id' in kwargs and kwargs['course_id']:
try:
course_key = CourseKey.from_string(kwargs['course_id'])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(kwargs['course_id'])
# Staff user looking at course as staff user
if get_user_role(user, course_key) in ('instructor', 'staff'):
return filter_dictionary
# Need to check course exist (if course gets deleted enrollments don't get cleaned up)
course = modulestore().get_course(course_key)
if course:
filter_dictionary['content_groups'] = get_group_ids_for_user(course, user)
else:
user_enrollments = self._enrollments_for_user(user)
content_groups = []
for enrollment in user_enrollments:
course = modulestore().get_course(enrollment.course_id)
if course:
enrollment_group_ids = get_group_ids_for_user(course, user)
if enrollment_group_ids:
content_groups.extend(enrollment_group_ids)
filter_dictionary['content_groups'] = content_groups if content_groups else None
return filter_dictionary
def field_dictionary(self, **kwargs):
""" add course if provided otherwise add courses in which the user is enrolled in """
field_dictionary = super(LmsSearchFilterGenerator, self).field_dictionary(**kwargs)
if not kwargs.get('user'):
field_dictionary['course'] = []
elif not kwargs.get('course_id'):
user_enrollments = self._enrollments_for_user(kwargs['user'])
field_dictionary['course'] = [unicode(enrollment.course_id) for enrollment in user_enrollments]
# if we have an org filter, only include results for this org filter
course_org_filter = microsite.get_value('course_org_filter')
if course_org_filter:
field_dictionary['org'] = course_org_filter
return field_dictionary
def exclude_dictionary(self, **kwargs):
""" If we are not on a microsite, then exclude any microsites that are defined """
exclude_dictionary = super(LmsSearchFilterGenerator, self).exclude_dictionary(**kwargs)
course_org_filter = microsite.get_value('course_org_filter')
# If we have a course filter we are ensuring that we only get those courses above
if not course_org_filter:
org_filter_out_set = microsite.get_all_orgs()
if org_filter_out_set:
exclude_dictionary['org'] = list(org_filter_out_set)
return exclude_dictionary
| agpl-3.0 |
foobert/ansible-modules-core | cloud/amazon/ec2_ami_find.py | 77 | 9793 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: '2.0'
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
aleisalem/Aion | data_generation/stimulation/Garfield.py | 1 | 20528 | #!/usr/bin/python
# Python modules
import sys, os, shutil, glob, io
# Aion modules
from Aion.utils.graphics import *
from Aion.utils.data import *
from Aion.shared.constants import *
# Third-party libraries
from androguard.session import Session
from androguard.misc import AXMLPrinter
class Garfield():
""" Garfield is a lazy stimulation engine based on fuzzing via Monkey(runner) and Genymotion """
def __init__(self, pathToAPK, APKType="goodware"):
if not os.path.exists(pathToAPK):
prettyPrint("Could not find the APK \"%s\"" % pathToAPK, "warning")
return None
self.APKPath = pathToAPK
self.APK, self.DEX, self.VMAnalysis = None, None, None
self.activitiesInfo, self.servicesInfo, self.receiversInfo = {}, {}, {}
self.runnerScript = ""
self.APKType = APKType
def analyzeAPK(self):
""" Uses androguard to retrieve metadata about the app e.g. activities, permissions, intent filters, etc. """
try:
prettyPrint("Analyzing app")
logEvent("Analyzing app: \"%s\"" % self.APKPath)
# 1. Load the APK using androguard
analysisSession = Session()
analysisSession.add(self.APKPath, open(self.APKPath).read())
# 2. Retrieve handles to APK and its dex code
self.APK = analysisSession.analyzed_apk.values()[0]
self.DEX = analysisSession.analyzed_dex.values()[0][0]
self.VMAnalysis = analysisSession.analyzed_dex.values()[0][1]
# 3. Retrieve information for each activity
prettyPrint("Analyzing activities")
self.activitiesInfo = analyzeActivities(self.APK, self.DEX)
# 4. Do the same for services and broadcast receivers
prettyPrint("Analyzing services")
self.servicesInfo = analyzeServices(self.APK, self.DEX)
prettyPrint("Analyzing broadcast receivers")
self.receiversInfo = analyzeReceivers(self.APK, self.DEX)
except Exception as e:
prettyPrintError(e)
return False
prettyPrint("Success")
return True
def generateRunnerScript(self, scriptPath="", runningTime=60):
"""Generates a python script to be run by Monkeyrunner"""
try:
# Check whether the APK has been analyzed first
if not self.APK:
prettyPrint("APK needs to be analyzed first", "warning")
return False
self.runnerScript = "%s/files/scripts/%s.py" % (getProjectDir(), getRandomAlphaNumeric()) if scriptPath == "" else scriptPath
print self.runnerScript
monkeyScript = open(self.runnerScript, "w")
# Preparation
monkeyScript.write("#!/usr/bin/python\n\n")
monkeyScript.write("from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice\n")
monkeyScript.write("import time, os, random\n\n")
monkeyScript.write("keyEvents = %s\n" % keyEvents)
monkeyScript.write("keyEventTypes = [MonkeyDevice.UP, MonkeyDevice.DOWN, MonkeyDevice.DOWN_AND_UP]\n")
monkeyScript.write("activityActions = %s\n" % activityActions)
monkeyScript.write("activities = %s\n" % self.activitiesInfo)
monkeyScript.write("services = %s\n" % self.servicesInfo)
monkeyScript.write("receivers = %s\n\n" % self.receiversInfo)
# Connect to the current device and install package
monkeyScript.write("print \"[*] Connecting to device.\"\n")
monkeyScript.write("device = MonkeyRunner.waitForConnection(\"[ANDROID_VIRTUAL_DEVICE_ID]\")\n")
monkeyScript.write("package = '%s'\n" % self.APK.package)
monkeyScript.write("print \"[*] Uninstalling package %s (if exists)\"\n" % self.APK.package)
monkeyScript.write("device.removePackage(package)\n")
monkeyScript.write("print \"[*] Installing package %s\"\n" % self.APK.package)
monkeyScript.write("device.installPackage('%s')\n" % self.APKPath)
# Configure introspy for hooking and monitoring
monkeyScript.write("print \"[*] Configuring Introspy\"\n")
monkeyScript.write("device.shell(\"echo 'GENERAL CRYPTO,KEY,HASH,FS,IPC,PREF,URI,WEBVIEW,SSL' > /data/data/%s/introspy.config\" % package)\n")
monkeyScript.write("device.shell(\"chmod 664 /data/data/%s/introspy.config\" % package)\n")
# Get a handle to a file to store the commands issued during runtime
monkeyScript.write("commandsFile = open(\"%s/files/scripts/%s_%s.command\", \"w\")\n" % (getProjectDir(), self.APK.package.replace('.','_'), getRandomAlphaNumeric()))
# Start app
#monkeyScript.write("mainActivity = '%s'\n" % APK.APK.get_main_activity())
#monkeyScript.write("device.startActivity(component=package + '/' + mainActivity)\n")
# Starting the fuzzing phase for [runningTime] seconds<F12>
monkeyScript.write("endTime = time.time() + %s\n" % runningTime)
monkeyScript.write("print \"[*] Fuzzing app for %s seconds\"\n" % runningTime)
monkeyScript.write("while time.time() < endTime:\n")
# 1. Choose a random component
monkeyScript.write("\tcomponentType = [\"activity\", \"service\", \"receiver\"][random.randint(0,2)]\n")
# 2.a. Activities
monkeyScript.write("\tif componentType == \"activity\":\n")
monkeyScript.write("\t\tcurrentActivity = activities.keys()[random.randint(0,len(activities)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting activity: %s\" % currentActivity\n")
monkeyScript.write("\t\tdevice.startActivity(component=package + '/' + currentActivity)\n")
monkeyScript.write("\t\tcommandsFile.write(\"device.startActivity('%s/%s')\\n\" % (package, currentActivity))\n")
# Choose an action
monkeyScript.write("\t\tcurrentAction = activityActions[random.randint(0,len(activityActions)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Current action: %s\" % currentAction\n")
# Touch in a random X,Y position on the screen
monkeyScript.write("\t\tif currentAction == \"touch\":\n")
monkeyScript.write("\t\t\twidth, height = int(device.getProperty(\"display.width\")), int(device.getProperty(\"display.height\"))\n")
monkeyScript.write("\t\t\tX, Y = random.randint(0, width-1), random.randint(0, height-1)\n")
monkeyScript.write("\t\t\tprint \"[*] Touching screen at (%s,%s)\" % (X,Y)\n")
monkeyScript.write("\t\t\teventType = keyEventTypes[random.randint(0,2)]\n")
monkeyScript.write("\t\t\tdevice.touch(X, Y, eventType)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.touch(%s, %s, %s)\\n\" % (X, Y, eventType))\n")
# Type something random
monkeyScript.write("\t\telif currentAction == \"type\":\n")
monkeyScript.write("\t\t\ttext = \"%s\"\n" % getRandomString(random.randint(0,100)))
monkeyScript.write("\t\t\tprint \"[*] Typing %s\" % text\n")
monkeyScript.write("\t\t\tdevice.type(text)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.type('%s')\\n\" % text)\n")
# Press a random key up/down
monkeyScript.write("\t\telif currentAction == \"press\":\n")
monkeyScript.write("\t\t\taction = keyEvents[random.randint(0, len(keyEvents)-1)]\n")
monkeyScript.write("\t\t\taType = keyEventTypes[random.randint(0,2)]\n")
monkeyScript.write("\t\t\tprint \"[*] Pressing: %s as %s\" % (action, aType)\n")
monkeyScript.write("\t\t\tdevice.press(action, aType)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.press(%s, %s)\\n\" % (action, aType)) \n")
# Randomly drag the screen
monkeyScript.write("\t\telif currentAction == \"drag\":\n")
monkeyScript.write("\t\t\twidth, height = int(device.getProperty(\"display.width\")), int(device.getProperty(\"display.height\"))\n")
monkeyScript.write("\t\t\tstart = (random.randint(0, width-1), random.randint(0, height-1))\n")
monkeyScript.write("\t\t\tend = (random.randint(0, width-1), random.randint(0, height-1))\n")
monkeyScript.write("\t\t\tprint \"[*] Dragging screen from %s to %s\" % (start, end)\n")
monkeyScript.write("\t\t\tdevice.drag(start, end)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.drag(%s, %s)\\n\" % (start, end))\n")
# 2.b.Services
monkeyScript.write("\telif componentType == \"service\":\n")
monkeyScript.write("\t\tcurrentService = services.keys()[random.randint(0, len(services)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting Service: %s\" % currentService\n")
monkeyScript.write("\t\tif \"intent-filters\" in services[currentService].keys():\n")
monkeyScript.write("\t\t\tif \"action\" in services[currentService][\"intent-filters\"].keys():\n")
monkeyScript.write("\t\t\t\tintentAction = services[currentService][\"intent-filters\"][\"action\"][0]\n")
monkeyScript.write("\t\t\t\tprint \"[*] Broadcasting intent: %s\" % intentAction\n")
monkeyScript.write("\t\t\t\tdevice.broadcastIntent(currentService, intentAction)\n")
monkeyScript.write("\t\t\t\tcommandsFile.write(\"device.broadcastIntent('%s', '%s')\\n\" % (currentService, intentAction)) \n")
# 2.c. Broadcast receivers
monkeyScript.write("\telif componentType == \"receiver\":\n")
monkeyScript.write("\t\tcurrentReceiver = receivers.keys()[random.randint(0, len(receivers)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting Receiver: %s\" % currentReceiver\n")
monkeyScript.write("\t\tif \"intent-filters\" in receivers[currentReceiver].keys():\n")
monkeyScript.write("\t\t\tif \"action\" in receivers[currentReceiver][\"intent-filters\"].keys():\n")
monkeyScript.write("\t\t\t\tintentAction = receivers[currentReceiver][\"intent-filters\"][\"action\"][0]\n")
monkeyScript.write("\t\t\t\tprint \"[*] Broadcasting intent: %s\" % intentAction\n")
monkeyScript.write("\t\t\t\tdevice.broadcastIntent(currentReceiver, intentAction)\n")
monkeyScript.write("\t\t\t\tcommandsFile.write(\"device.broadcastIntent('%s', '%s')\\n\" % (currentReceiver, intentAction))\n")
# Sleep for 0.5 a second
monkeyScript.write("\ttime.sleep(1)\n")
# Uninstall package (Still need to fetch the introspy.db file from app directory before uninstallation)
#monkeyScript.write("device.removePackage(package)\n")
monkeyScript.write("commandsFile.close()")
except Exception as e:
prettyPrintError(e)
return False
return True
def analyzeActivities(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve the elements within every activity """
try:
info = {}
for activity in APK.get_activities():
info[activity] = {}
# 1. Add the intent filters
info[activity]["intent-filters"] = APK.get_intent_filters("activity", activity)
# 2. Get all classes belonging to current activity
allClasses, tempList, layoutFiles = DEX.get_classes(), [], []
# 2.a. Get all classes that inherit class "Activity" i.e. corresponding to an activity
for c in allClasses:
if c.get_superclassname().lower().find("activity") != -1:
tempList.append(c)
# 2.b. Get classes belonging to CURRENT activity
info[activity]["classes"] = []
for c in tempList:
if c.get_name()[1:-1].replace('/','.') == activity:
info[activity]["classes"].append(c)
if loggingON():
prettyPrint("Activity: %s, class: %s" % (activity, c), "debug")
# 3. Get UI elements in every activity
# 3.a. Identify the layout file's ID in the class' setContentView function call
if len(info[activity]["classes"]) < 1:
prettyPrint("Could not retrieve any Activity classes. Skipping", "warning")
continue
source = info[activity]["classes"][0].get_source()
info[activity].pop("classes") # TODO: Do we really need a reference to the class object?
index1 = source.find("void onCreate(")
index2 = source.find("setContentView(", index1) + len("setContentView(")
layoutID = ""
while str.isdigit(source[index2]):
layoutID += source[index2]
index2 += 1
# layoutID retrieved?
if len(layoutID) < 1:
prettyPrint("Could not retrieve layout ID from activity class. Skipping", "warning")
continue
# 3.b. Look for the corresponding layout name in the R$layout file
layoutClass = DEX.get_class(str("L%s/R$layout;" % APK.package.replace('.','/')))
if layoutClass:
layoutContent = layoutClass.get_source()
eIndex = layoutContent.find(layoutID)
sIndex = layoutContent.rfind("int", 0, eIndex)
layoutName = layoutContent[sIndex+len("int"):eIndex].replace(' ','').replace('=','')
else:
# No layout class was found: Check the public.xml file
prettyPrint("Could not find a \"R$layout\" class. Checking \"public.xml\"", "warning")
apkResources = APK.get_android_resources()
publicResources = apkResources.get_public_resources(APK.package).split('\n')
layoutIDHex = hex(int(layoutID))
for line in publicResources:
if line.find(layoutIDHex) != -1:
sIndex = line.find("name=\"") + len("name=\"")
eIndex = line.find("\"", sIndex)
layoutName = line[sIndex:eIndex]
# 3.c. Retrieve layout file and get XML object
if len(layoutName) < 1:
prettyPrint("Could not retrieve a layout file for \"%s\". Skipping" % activity, "warning")
else:
if loggingON():
prettyPrint("Retrieving UI elements from %s.xml" % layoutName, "debug")
info[activity]["elements"] = _parseActivityLayout("res/layout/%s.xml" % layoutName, APK)
except Exception as e:
prettyPrintError(e)
return {}
return info
def analyzeServices(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve information about an app's services """
try:
info = {}
for service in APK.get_services():
info[service] = {}
info[service]["intent-filters"] = APK.get_intent_filters("service", service)
except Exception as e:
prettyPrintError(e)
return {}
return info
def analyzeReceivers(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve information about an app's broadcast receivers """
try:
info = {}
for receiver in APK.get_receivers():
info[receiver] = {}
info[receiver]["intent-filters"] = APK.get_intent_filters("receiver", receiver)
except Exception as e:
prettyPrintError(e)
return {}
return info
def _parseActivityLayout(layoutFilePath, APK):
""" Parses an XML layout file of an activity and returns information about the found elements """
try:
elements = {}
# Read the contents of the layout file
activityXML = AXMLPrinter(APK.get_file(layoutFilePath)).get_xml_obj()
logEvent("Parsing the XML layout %s" % layoutFilePath)
# Iterate over the elements and parse them
for currentNode in activityXML.firstChild.childNodes:
if currentNode.nodeName == "Button" or currentNode.nodeName == "ImageButton" or currentNode.nodeName == "RadioButton":
# Handling buttons
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:clickable" in currentNode.attributes.keys():
attr["clickable"] = currentNode.attributes["android:clickable"].value
if "android:longClickable" in currentNode.attributes.keys():
attr["longclickable"] = currentNode.attributes["android:longClickable"].value
elements[eID] = attr
elif currentNode.nodeName == "CheckBox" or currentNode.nodeName == "CheckedTextView":
# Handling checkbox-like elements
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:checked" in currentNode.attributes.keys():
attr["checked"] = currentNode.attributes["android:checked"].value
elements[eID] = attr
elif currentNode.nodeName == "DatePicker":
# Handling date pickers
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:minDate" in currentNode.attributes.keys():
attr["mindate"] = currentNode.attributes["android:minDate"]
if "android:maxDate" in currentNode.attributes.keys():
attr["maxDate"] = currentNode.attributes["android:maxDate"]
elements[eID] = attr
elif currentNode.nodeName == "EditText":
# Handling edit texts
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:editable" in currentNode.attributes.keys():
attr["editable"] = currentNode.attributes["android:editable"]
if "android:inputType" in currentNode.attributes.keys():
attr["inputtype"] = currentNode.attributes["android:inputType"]
elements[eID] = attr
#elif currentNode.nodeName == "NumberPicker":
elif currentNode.nodeName == "RadioGroup":
# Handle radio group
# 1. Get radio buttons
buttons = currentNode.childNodes
for button in buttons:
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:clickable" in currentNode.attributes.keys():
attr["clickable"] = currentNode.attributes["android:clickable"].value
if "android:longClickable" in currentNode.attributes.keys():
attr["longclickable"] = currentNode.attributes["android:longClickable"].value
elements[eID] = attr
#elif currentNode.nodeName == "Spinner":
except Exception as e:
prettyPrintError(e)
return {}
return elements
| gpl-3.0 |
tkruse/rosinstall | src/rosinstall/simple_checkout.py | 1 | 2400 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import vcstools
from wstool.config_yaml import get_path_spec_from_yaml
def checkout_rosinstall(rosinstall_data, verbose=False):
"""
:param rosinstall_data: yaml dict in rosinstall format
:raises: rosinstall.common.MultiProjectException for incvalid yaml
"""
for frag in rosinstall_data:
path_spec = get_path_spec_from_yaml(frag)
if verbose:
print(path_spec.get_scmtype(),
path_spec.get_path(),
path_spec.get_uri(),
path_spec.get_version())
vcs_client = vcstools.get_vcs_client(path_spec.get_scmtype(),
path_spec.get_path())
vcs_client.checkout(path_spec.get_uri(),
path_spec.get_version())
| bsd-3-clause |
dnet/suds | suds/sax/__init__.py | 200 | 3253 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
@var encoder: A I{pluggable} XML special character processor used to
encode/decode strings.
@type encoder: L{Encoder}
"""
from suds.sax.enc import Encoder
#
# pluggable XML special character encoder.
#
encoder = Encoder()
def splitPrefix(name):
"""
Split the name into a tuple (I{prefix}, I{name}). The first element in
the tuple is I{None} when the name does't have a prefix.
@param name: A node name containing an optional prefix.
@type name: basestring
@return: A tuple containing the (2) parts of I{name}
@rtype: (I{prefix}, I{name})
"""
if isinstance(name, basestring) \
and ':' in name:
return tuple(name.split(':', 1))
else:
return (None, name)
class Namespace:
"""
The namespace class represents XML namespaces.
"""
default = (None, None)
xmlns = ('xml', 'http://www.w3.org/XML/1998/namespace')
xsdns = ('xs', 'http://www.w3.org/2001/XMLSchema')
xsins = ('xsi', 'http://www.w3.org/2001/XMLSchema-instance')
all = (xsdns, xsins)
@classmethod
def create(cls, p=None, u=None):
return (p, u)
@classmethod
def none(cls, ns):
return ( ns == cls.default )
@classmethod
def xsd(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema')
except:
pass
return False
@classmethod
def xsi(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema-instance')
except:
pass
return False
@classmethod
def xs(cls, ns):
return ( cls.xsd(ns) or cls.xsi(ns) )
@classmethod
def w3(cls, ns):
try:
return ns[1].startswith('http://www.w3.org')
except:
pass
return False
@classmethod
def isns(cls, ns):
try:
return isinstance(ns, tuple) and len(ns) == len(cls.default)
except:
pass
return False
| lgpl-3.0 |
Stavitsky/nova | nova/servicegroup/drivers/db.py | 23 | 3982 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova.i18n import _, _LE
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class DbDriver(base.Driver):
def __init__(self, *args, **kwargs):
self.service_down_time = CONF.service_down_time
def join(self, member, group, service=None):
"""Add a new member to a service group.
:param member: the joined member ID/name
:param group: the group ID/name, of the joined member
:param service: a `nova.service.Service` object
"""
LOG.debug('DB_Driver: join new ServiceGroup member %(member)s to '
'the %(group)s group, service = %(service)s',
{'member': member, 'group': group,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
# Keep checking 'updated_at' if 'last_seen_up' isn't set.
# Should be able to use only 'last_seen_up' in the M release
last_heartbeat = (service_ref.get('last_seen_up') or
service_ref['updated_at'] or service_ref['created_at'])
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= self.service_down_time
if not is_up:
LOG.debug('Seems service is down. Last heartbeat was %(lhb)s. '
'Elapsed time is %(el)s',
{'lhb': str(last_heartbeat), 'el': str(elapsed)})
return is_up
def _report_state(self, service):
"""Update the state of this service in the datastore."""
try:
service.service_ref.report_count += 1
service.service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception:
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.exception(_LE('model server went away'))
| apache-2.0 |
youdonghai/intellij-community | python/helpers/pycharm/noserunner.py | 30 | 2391 | import sys
import os
helpers_dir = os.getenv("PYCHARM_HELPERS_DIR", sys.path[0])
if sys.path[0] != helpers_dir:
sys.path.insert(0, helpers_dir)
from nose_utils import TeamcityPlugin
from pycharm_run_utils import debug, import_system_module
from pycharm_run_utils import adjust_sys_path
adjust_sys_path(False)
shlex = import_system_module("shlex")
try:
from nose.core import TestProgram
from nose.config import Config
from nose.plugins.manager import DefaultPluginManager
except:
raise NameError("Please, install nosetests")
teamcity_plugin = TeamcityPlugin()
class MyConfig(Config):
def __init__(self, **kw):
super(MyConfig, self).__init__(**kw)
def __setstate__(self, state):
super(MyConfig, self).__setstate__(state)
self.plugins.addPlugin(teamcity_plugin)
def process_args():
tests = []
opts = None
if sys.argv[-1].startswith("-"):
test_names = sys.argv[1:-1]
opts = sys.argv[-1]
else:
test_names = sys.argv[1:]
for arg in test_names:
arg = arg.strip()
if len(arg) == 0:
return
a = arg.split("::")
if len(a) == 1:
# From module or folder
a_splitted = a[0].split(";")
if len(a_splitted) != 1:
# means we have pattern to match against
if a_splitted[0].endswith("/"):
debug("/ from folder " + a_splitted[0] + ". Use pattern: " + a_splitted[1])
tests.append(a_splitted[0])
else:
if a[0].endswith("/"):
debug("/ from folder " + a[0])
tests.append(a[0])
else:
debug("/ from module " + a[0])
tests.append(a[0])
elif len(a) == 2:
# From testcase
debug("/ from testcase " + a[1] + " in " + a[0])
tests.append(a[0] + ":" + a[1])
else:
# From method in class or from function
debug("/ from method " + a[2] + " in testcase " + a[1] + " in " + a[0])
if a[1] == "":
# test function, not method
tests.append(a[0] + ":" + a[2])
else:
tests.append(a[0] + ":" + a[1] + "." + a[2])
argv = ['nosetests']
argv.extend(tests)
if opts:
options = shlex.split(opts)
argv.extend(options)
manager = DefaultPluginManager()
manager.addPlugin(teamcity_plugin)
config = MyConfig(plugins=manager)
config.configure(argv)
TestProgram(argv=argv, config=config, exit=False)
if __name__ == "__main__":
process_args() | apache-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/contourf_log.py | 9 | 1350 | '''
Demonstrate use of a log color scale in contourf
'''
from matplotlib import pyplot as P
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = (bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0)
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0))
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z<= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
cs = P.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = P.contourf(X, Y, z, levs, norm=colors.LogNorm())
#The 'extend' kwarg does not work yet with a log scale.
cbar = P.colorbar()
P.show()
| apache-2.0 |
jhjguxin/PyCDC | obp.lovelykarrigell/module/Karrigell_QuickForm.py | 2 | 18595 | #@ignore
#@language python
#@ignore
#@language python
"""
Package: Karrigell_QuickForm-1.0.1-alpha
Requirements: Karrigell HTTP Server - http://karrigell.sourceforge.net
Description: - A simple class that generates html forms with some basic javascript validations.
- It is similar to HTML_QuickForm from PEAR (http://pear.php.net).
Author: Marcelo Santos Araujo <[email protected]>
Date: 23 November 2005
Version: $Revision Karrigell_QuickForm-1.0.1-alfa
Credits: Special thanks to Pierre Quentel and Karrigell's developers.
Contributor: Zoom.Quiet <[email protected]>
- Chinese handbook
- addCheckboxGrp()
- addRadioList()
- add JSValidation support:
addJSValidation()
addJSRule()
saveJSRule()
"""
class Karrigell_QuickForm:
"""Simple HTML Forms Generation - Karrigell_QuickForm"""
def __init__(self,name,method,action,legend):
"""Form name, request method, file target,legend - Karrigell_QuickForm('contact_form','POST','contact.py','form legend')"""
self.JSvXMLtmpl="""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE validation-config SYSTEM "validation-config.dtd">
<validation-config lang="auto">
<form id="%s" show-error="errorMessage" onfail=""
show-type="first">
%s
</form>
</validation-config>
"""
self.JSvMXLnode = """
<field name="%s"
display-name="%s" onfail="">
<depend name="required" />
</field>
"""
self.JSvRules = []
self.form_list = []
self.css_list = []
self.js_list =[]
self.name = name
self.legend_name = legend
self.method = method
self.action = action
self.form_list.append("<div><form method='"+self.method+"' name='"+self.name+"' action='"+self.action+"' id='"+self.name+"' enctype='multipart/form-data'><fieldset><legend><b>"+self.legend_name+"</b></legend><table>")
def addElement(self,element,name,options=None):
"""add a form element: text,file,image,submit,reset,header
- addElement('text'
,'full_name'
,{'size':20
,'maxlength':'40'})
available elements:
text, checkbox, submit, reset, file,image,header
"""
if element == 'text':
size = options['size']
size = str(size)
maxlength = options['maxlength']
maxlength = str(maxlength)
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"' size='"+size+"' maxlength='"+maxlength+"'><td></tr>")
elif element == 'node':
self.form_list.append(name)
elif element == 'checkbox':
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td> <td valign='top' align='left'>")
checkbox = ""
dic_check = options.items()
for a,b in dic_check:
checkbox = checkbox + "<input type='checkbox' name='"+name+"' value='"+a+"'>"+"<label><font face=verdana size=2> "+b+"</font></label><br>"
self.form_list.append(checkbox+"</td>")
self.form_list.append("</tr>")
elif element == 'submit':
value = options['value']
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"' value='"+value+"'><td></tr>")
elif element == 'reset':
value= options['value']
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"' value='"+value+"'></td></tr>")
elif element == 'file':
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title()+":</b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"'><td></tr>")
elif element == 'image':
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><img src='"+name+"'></td></tr>")
elif element == 'header':
self.form_list.append("<tr><td align='left' valign='top' colspan='2'><b><center>"+name.title()+"</center></b></td></tr>")
def addHtmNode(self,element,name,desc,options=None):
"""add a form element: text,file,image,submit,reset,header
- addElement('text'
,'object name'
,'print name'
,{'size':20
,'maxlength':'40'})
available elements:
text, checkbox, submit, reset, file,image,header
"""
if element == 'text':
size = options['size']
size = str(size)
maxlength = options['maxlength']
maxlength = str(maxlength)
htm = """<tr>
<td align='right' valign='top'>
<b>%s:</b></td>
<td valign='top' align='left'>
<input type='%s'
name='%s'
size='%s'
maxlength='%s'><td>
</tr>
"""
self.form_list.append(htm%(desc
,element
,name
,size
,maxlength)
)
#name.title().replace('_',' ')
elif element == 'node':
self.form_list.append(name)
elif element == 'checkbox':
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td> <td valign='top' align='left'>")
checkbox = ""
dic_check = options.items()
for a,b in dic_check:
checkbox = checkbox + "<input type='checkbox' name='"+name+"' value='"+a+"'>"+"<label><font face=verdana size=2> "+b+"</font></label><br>"
self.form_list.append(checkbox+"</td>")
self.form_list.append("</tr>")
elif element == 'submit':
value = options['value']
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"' value='"+value+"'><td></tr>")
elif element == 'reset':
value= options['value']
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"' value='"+value+"'></td></tr>")
elif element == 'file':
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title()+":</b></td><td valign='top' align='left'><input type='"+element+"' name='"+name+"'><td></tr>")
elif element == 'image':
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'><img src='"+name+"'></td></tr>")
elif element == 'header':
self.form_list.append("<tr><td align='left' valign='top' colspan='2'><b><center>"+name.title()+"</center></b></td></tr>")
def addTextArea(self,name,rows,cols):
""" add a textarea element - addTextArea('message','10','90')"""
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td><td valign='top' align='left'><textarea name='"+name+"' cols='"+cols+"' rows='"+rows+"'></textarea><td></tr>")
def addGroup(self,*t):
""" add a button group
"""
htm = """<input
type='%s'
name='%s'
value='%s'
class='%s'>
"""
group = ""
self.form_list.append("<tr><td align='right' valign='top'><b></b></td><td valign='top' align='left'>")
for a,b,c,d in t:
group += htm%(a,b,c,d)
self.form_list.append(group+"</td></tr>")
def addComboBox(self,name,t):
""" add a combobox element
- addComboBox('fruits'
,{'apple':'Apple'
,'pear':'Pear'
,'orange':'Orange'})
"""
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td> <td align='left' valign='top'><select name='"+name+"[]'>")
combo = ""
t= t.items()
for a,b in t:
combo = combo + "<option value='"+a+"'>"+b+"</option>"
self.form_list.append(combo)
self.form_list.append("</select></td></tr>")
def addRadioGroup(self,name,value):
"""add a radio element export in TABLE
- addRadioGroup('genre'
,{'male':'Male'
,'female':'Female'})
"""
self.form_list.append("<tr><td align='right' valign='top'><b>"+name.title().replace('_',' ')+":</b></td> <td valign='top' align='left'>")
radio = ""
t = value.items()
for a,b in t:
radio = radio + "<input type='radio' name='"+name+"' value='"+a+"'>"+"<label><font face=verdana size=2>"+b+"</font></label><br>"
self.form_list.append(radio+"</td>")
self.form_list.append("</tr>")
def addRadioList(self,name,desc,value,id=""):
"""add a radio element export as UL LI group
- addRadioGroup('name','desc'
,{'male':'Male'
,'female':'Female'})
"""
htm = """
<li id='%s'><b>%s:</b>
<ul>"""
self.form_list.append(htm%(id,desc))
radio = ""
t = value.items()
tmpl = """<li>
<input type='radio'
name='%s'
value='%s'>
<label>%s</label>
"""
for a,b in t:
radio = radio + tmpl%(name,a,b)
self.form_list.append(radio+"</ul></li>")
#self.form_list.append("</tr>")
def addRadioGrp(self,name,desc,value):
"""add a radio element in TABLE
- addRadioGroup('name','desc'
,{'male':'Male'
,'female':'Female'})
"""
htm = """
<tr><td align='right' valign='top'>
<b>%s:</b></td>
<td valign='top' align='left'>"""
self.form_list.append(htm%desc)
radio = ""
t = value.items()
tmpl = """<input type='radio'
name='%s'
value='%s'>
<label>%s</label>
<br>
"""
for a,b in t:
radio = radio + tmpl%(name,a,b)
self.form_list.append(radio+"</td>")
self.form_list.append("</tr>")
def addChkboxGrp(self,name,desc,value):
"""add a radio element Export in TABLE
- addRadioGroup('name','desc'
,{'male':'Male'
,'female':'Female'})
"""
htm = """
<tr><td align='right' valign='top'>
<b>%s:</b></td>
<td valign='top' align='left'>"""
self.form_list.append(htm%desc)
radio = ""
t = value.items()
tmpl = """<input type='checkbox'
name='%s[]'
value='%s'>
<label>%s</label>
<br>
"""
for a,b in t:
radio = radio + tmpl%(name,a,b)
self.form_list.append(radio+"</td>")
self.form_list.append("</tr>")
def showFormList(self):
""" returns the whole form list """
return self.form_list
def display(self):
""" displays the html form"""
self.form_list.append("</table></fieldset></form></div>")
self.js_list.append("<script type='text/javascript'>")
print self.js_list.pop()
print "function validate_%s(frm){"% self.name
print """var value='';
var errFlag=new Array();
var _qfGroups={};
_qfMsg='';"""
for k in self.js_list:
print k+"\n"
self.js_list.append("</script>")
final_js_function = """
if (_qfMsg != ''){
_qfMsg = 'Form is not correct!' + _qfMsg;
_qfMsg = _qfMsg+'<break>Please, checkout instructions above';
alert(_qfMsg);
return false;
}
return true; }
"""
print final_js_function.replace("<break>","\\n")
print self.js_list.pop()
for c in self.css_list:
print c+"\n"
for i in self.form_list:
print i+"\n"
def export(self):
""" export the html form code
so people can do something for them self
"""
exp = ""
self.form_list.append("</table></fieldset></form></div>")
self.js_list.append("<script type='text/javascript'>")
exp += self.js_list.pop()
exp += "function validate_%s(frm){"% self.name
exp += """var value='';
var errFlag=new Array();
var _qfGroups={};
_qfMsg='';"""
for k in self.js_list:
exp += k+"\n"
self.js_list.append("</script>")
final_js_function = """
if (_qfMsg != ''){
_qfMsg = 'Form is not correct!' + _qfMsg;
_qfMsg = _qfMsg+'<break>Please, checkout instructions above';
alert(_qfMsg);
return false;
}
return true; }
"""
exp += final_js_function.replace("<break>","\\n")
exp += self.js_list.pop()
for c in self.css_list:
exp += c+"\n"
for i in self.form_list:
exp += i+"\n"
return exp
def addStyleSheets(self,t):
"""add a basic stylesheet - simple CSS parameters"""
css = "<style type='text/css'>textarea { background-color:"+t['bgcolor']+";font-family:"+t['font']+"; font-size:"+t['size']+"px; border-style:solid;border-color:"+t['border-color']+";border-width:1px;} option { background-color:"+t['bgcolor']+";font-family:"+t['font']+";border-style:solid;border-color:"+t['border-color']+";border-width:1px;} input { background-color:"+t['bgcolor']+";font-family:"+t['font']+";border-style:solid;border-color:"+t['border-color']+";border-width:1px;} option { background-color:"+t['bgcolor']+";font-family:"+t['font']+";border-style:solid;border-color:"+t['border-color']+";border-width:1px;} select { background-color:"+t['bgcolor']+";font-family:"+t['font']+";border-style:solid;border-color:"+t['border-color']+";border-width:1px;} td { font-size:"+t['size']+"px; font-family:"+t['font']+"}</style>"
self.css_list.append(css)
def addRule(self,elem_name,rule_type,message):
"""add a javascript rule in order to validate a form field
- addRule('elem_name','required','Name is required!')
"""
orig = "enctype='multipart/form-data"
repl = """enctype='multipart/form-data'
onsubmit='try {
var myValidator = validate_%s;
}
catch(e) { return true; }
return myValidator(this);"""
if rule_type == "required":
begin_form=self.form_list[0].replace(orig
,repl%self.name)
self.form_list[0] = begin_form
js_string = """
obj = frm.elements['%s'];
//alert(obj.type);
value=frm.elements['%s'].value;
if(value==''&&!errFlag['%s']){
errFlag['%s']=true;
_qfMsg=_qfMsg + '<break>- %s';
}
""" % (elem_name
,elem_name
,elem_name
,elem_name
,message)
js_string = js_string.replace("<break>","\\n")
self.js_list.append(js_string)
else:
pass
def addJSValidation(self):
"""add a javascript rule in order to validate a form field
- addRule('elem_name','required','Name is required!')
"""
orig = "enctype='multipart/form-data'"
repl = """
onsubmit='return doValidate("%s");'
"""
begin_form=self.form_list[0].replace(orig
,repl%self.name)
self.form_list[0] = begin_form
def addJSRule(self,name,message):
"""add a xml rule for javascript checking
"""
exp = self.JSvMXLnode%(name,message)
self.JSvRules.append(exp)
def saveJSRule(self,xml):
"""exp and save a xml rule for javascript checking
"""
exp = ""
for node in self.JSvRules:
exp+= node
#exp = self.JSvXMLtmpl%(form,exp)
open(xml,'w').write(self.JSvXMLtmpl%(self.name
,exp)
)
"""
Overview - Karrigell_QuickForm
p = Karrigell_QuickForm('teste','POST','teste.py')
p.addElement('text','nome',{'size':80,'maxlength':20})
p.addElement('text','email',{'size':80,'maxlength':20})
p.addRule('nome','required','campo nome obrigario!')
p.addComboBox('combo',{'a':'A','b':'B'})
p.addCheckBox('fuda',{'a':'Letra A','b':'Letra B'})
p.addElement('image','python.gif')
p.addElement('file','foto')
p.addElement('submit','botao_enviar',{'value':'Enviar A'})
p.addComboBox('sexo',['Masculino','Masculino'],['Feminino','Feminino'])
p.addTextArea('mensagem','20','80')
p.addGroup(["submit","botao_enviar","Enviar"],["reset","botao_limpar","Limpar"])
p.addStyleSheets({'bgcolor':'lightblue','font':'verdana','border-color':'black'})
p.display()
"""
| gpl-3.0 |
arnaud-morvan/QGIS | scripts/pyuic-wrapper.py | 32 | 1138 | # -*- coding: utf-8 -*-
"""
***************************************************************************
pyuic-wrapper.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Juergen E. Fischer
Email : jef at norbit dot de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Juergen E. Fischer'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Juergen E. Fischer'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis.PyQt.uic.pyuic
| gpl-2.0 |
favll/pogom | pogom/pgoapi/protos/POGOProtos/Networking/Responses/CheckAwardedBadgesResponse_pb2.py | 16 | 3622 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/CheckAwardedBadgesResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import BadgeType_pb2 as POGOProtos_dot_Enums_dot_BadgeType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/CheckAwardedBadgesResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n@POGOProtos/Networking/Responses/CheckAwardedBadgesResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Enums/BadgeType.proto\"\x80\x01\n\x1a\x43heckAwardedBadgesResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x33\n\x0e\x61warded_badges\x18\x02 \x03(\x0e\x32\x1b.POGOProtos.Enums.BadgeType\x12\x1c\n\x14\x61warded_badge_levels\x18\x03 \x03(\x05\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_BadgeType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CHECKAWARDEDBADGESRESPONSE = _descriptor.Descriptor(
name='CheckAwardedBadgesResponse',
full_name='POGOProtos.Networking.Responses.CheckAwardedBadgesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='POGOProtos.Networking.Responses.CheckAwardedBadgesResponse.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='awarded_badges', full_name='POGOProtos.Networking.Responses.CheckAwardedBadgesResponse.awarded_badges', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='awarded_badge_levels', full_name='POGOProtos.Networking.Responses.CheckAwardedBadgesResponse.awarded_badge_levels', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=264,
)
_CHECKAWARDEDBADGESRESPONSE.fields_by_name['awarded_badges'].enum_type = POGOProtos_dot_Enums_dot_BadgeType__pb2._BADGETYPE
DESCRIPTOR.message_types_by_name['CheckAwardedBadgesResponse'] = _CHECKAWARDEDBADGESRESPONSE
CheckAwardedBadgesResponse = _reflection.GeneratedProtocolMessageType('CheckAwardedBadgesResponse', (_message.Message,), dict(
DESCRIPTOR = _CHECKAWARDEDBADGESRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.CheckAwardedBadgesResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.CheckAwardedBadgesResponse)
))
_sym_db.RegisterMessage(CheckAwardedBadgesResponse)
# @@protoc_insertion_point(module_scope)
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/tests/test_patheffects.py | 10 | 5445 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_equal
@image_comparison(baseline_images=['patheffect1'], remove_text=True)
def test_patheffect1():
ax1 = plt.subplot(111)
ax1.imshow([[1, 2], [2, 3]])
txt = ax1.annotate("test", (1., 1.), (0., 0),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3", lw=2),
size=20, ha="center",
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
txt.arrow_patch.set_path_effects([path_effects.Stroke(linewidth=5,
foreground="w"),
path_effects.Normal()])
ax1.grid(True, linestyle="-")
pe = [path_effects.withStroke(linewidth=3, foreground="w")]
for l in ax1.get_xgridlines() + ax1.get_ygridlines():
l.set_path_effects(pe)
@image_comparison(baseline_images=['patheffect2'], remove_text=True)
def test_patheffect2():
ax2 = plt.subplot(111)
arr = np.arange(25).reshape((5, 5))
ax2.imshow(arr)
cntr = ax2.contour(arr, colors="k")
plt.setp(cntr.collections,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
clbls = ax2.clabel(cntr, fmt="%2.0f", use_clabeltext=True)
plt.setp(clbls,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
@image_comparison(baseline_images=['patheffect3'])
def test_patheffect3():
p1, = plt.plot([1, 3, 5, 4, 3], 'o-b', lw=4)
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.title(r'testing$^{123}$',
path_effects=[path_effects.withStroke(linewidth=1, foreground="r")])
leg = plt.legend([p1], [r'Line 1$^2$'], fancybox=True, loc=2)
leg.legendPatch.set_path_effects([path_effects.withSimplePatchShadow()])
text = plt.text(2, 3, 'Drop test', color='white',
bbox={'boxstyle': 'circle,pad=0.1', 'color': 'red'})
pe = [path_effects.Stroke(linewidth=3.75, foreground='k'),
path_effects.withSimplePatchShadow((6, -3), shadow_rgbFace='blue')]
text.set_path_effects(pe)
text.get_bbox_patch().set_path_effects(pe)
pe = [path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', facecolor='black',
lw=1.1)]
t = plt.gcf().text(0.02, 0.1, 'Hatch shadow', fontsize=75, weight=1000,
va='center')
t.set_path_effects(pe)
@cleanup
def test_PathEffect_get_proxy():
pe = path_effects.AbstractPathEffect()
fig = plt.gcf()
renderer = fig.canvas.get_renderer()
with mock.patch('matplotlib.cbook.deprecated') as dep:
proxy_renderer = pe.get_proxy_renderer(renderer)
assert_equal(proxy_renderer._renderer, renderer)
assert_equal(proxy_renderer._path_effects, [pe])
dep.assert_called()
@cleanup
def test_PathEffect_points_to_pixels():
fig = plt.figure(dpi=150)
p1, = plt.plot(range(10))
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
renderer = fig.canvas.get_renderer()
pe_renderer = path_effects.SimpleLineShadow().get_proxy_renderer(renderer)
assert isinstance(pe_renderer, path_effects.PathEffectRenderer), (
'Expected a PathEffectRendere instance, got '
'a {} instance.'.format(type(pe_renderer)))
# Confirm that using a path effects renderer maintains point sizes
# appropriately. Otherwise rendered font would be the wrong size.
assert_equal(renderer.points_to_pixels(15),
pe_renderer.points_to_pixels(15))
def test_SimplePatchShadow_offset_xy():
with mock.patch('matplotlib.cbook.deprecated') as dep:
pe = path_effects.SimplePatchShadow(offset_xy=(4, 5))
assert_equal(pe._offset, (4, 5))
dep.assert_called()
@image_comparison(baseline_images=['collection'])
def test_collection():
x, y = np.meshgrid(np.linspace(0, 10, 150), np.linspace(-5, 5, 100))
data = np.sin(x) + np.cos(y)
cs = plt.contour(data)
pe = [path_effects.PathPatchEffect(edgecolor='black', facecolor='none',
linewidth=12),
path_effects.Stroke(linewidth=5)]
for collection in cs.collections:
collection.set_path_effects(pe)
for text in plt.clabel(cs, colors='white'):
text.set_path_effects([path_effects.withStroke(foreground='k',
linewidth=3)])
text.set_bbox({'boxstyle': 'sawtooth', 'facecolor': 'none',
'edgecolor': 'blue'})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
flamusdiu/oauthlib | tests/oauth2/rfc6749/clients/test_base.py | 15 | 8489 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ....unittest import TestCase
import datetime
from oauthlib import common
from oauthlib.oauth2.rfc6749 import utils
from oauthlib.oauth2 import Client
from oauthlib.oauth2 import InsecureTransportError
from oauthlib.oauth2.rfc6749.clients import AUTH_HEADER, URI_QUERY, BODY
class ClientTest(TestCase):
client_id = "someclientid"
uri = "https://example.com/path?query=world"
body = "not=empty"
headers = {}
access_token = "token"
mac_key = "secret"
bearer_query = uri + "&access_token=" + access_token
bearer_header = {
"Authorization": "Bearer " + access_token
}
bearer_body = body + "&access_token=" + access_token
mac_00_header = {
"Authorization": 'MAC id="' + access_token + '", nonce="0:abc123",' +
' bodyhash="Yqyso8r3hR5Nm1ZFv+6AvNHrxjE=",' +
' mac="0X6aACoBY0G6xgGZVJ1IeE8dF9k="'
}
mac_01_header = {
"Authorization": 'MAC id="' + access_token + '", ts="123456789",' +
' nonce="abc123", mac="Xuk+9oqaaKyhitkgh1CD0xrI6+s="'
}
def test_add_bearer_token(self):
"""Test a number of bearer token placements"""
# Invalid token type
client = Client(self.client_id, token_type="invalid")
self.assertRaises(ValueError, client.add_token, self.uri)
# Case-insensitive token type
client = Client(self.client_id, access_token=self.access_token, token_type="bEAreR")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Missing access token
client = Client(self.client_id)
self.assertRaises(ValueError, client.add_token, self.uri)
# The default token placement, bearer in auth header
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Setting default placements of tokens
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=AUTH_HEADER)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=URI_QUERY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=BODY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Asking for specific placement in the add_token method
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=AUTH_HEADER)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=URI_QUERY)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=BODY)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Invalid token placement
client = Client(self.client_id, access_token=self.access_token)
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers, token_placement="invalid")
client = Client(self.client_id, access_token=self.access_token,
default_token_placement="invalid")
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers)
def test_add_mac_token(self):
# Missing access token
client = Client(self.client_id, token_type="MAC")
self.assertRaises(ValueError, client.add_token, self.uri)
# Invalid hash algorithm
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-2")
self.assertRaises(ValueError, client.add_token, self.uri)
orig_generate_timestamp = common.generate_timestamp
orig_generate_nonce = common.generate_nonce
orig_generate_age = utils.generate_age
self.addCleanup(setattr, common, 'generage_timestamp', orig_generate_timestamp)
self.addCleanup(setattr, common, 'generage_nonce', orig_generate_nonce)
self.addCleanup(setattr, utils, 'generate_age', orig_generate_age)
common.generate_timestamp = lambda: '123456789'
common.generate_nonce = lambda: 'abc123'
utils.generate_age = lambda *args: 0
# Add the Authorization header (draft 00)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, issue_time=datetime.datetime.now())
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_00_header)
# Add the Authorization header (draft 00)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, draft=1)
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_01_header)
def test_revocation_request(self):
client = Client(self.client_id)
url = 'https://example.com/revoke'
token = 'foobar'
# Valid request
u, h, b = client.prepare_token_revocation_request(url, token)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=access_token' % token)
# Non-HTTPS revocation endpoint
self.assertRaises(InsecureTransportError,
client.prepare_token_revocation_request,
'http://example.com/revoke', token)
u, h, b = client.prepare_token_revocation_request(
url, token, token_type_hint='refresh_token')
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=refresh_token' % token)
# JSONP
u, h, b = client.prepare_token_revocation_request(
url, token, callback='hello.world')
self.assertURLEqual(u, url + '?callback=hello.world&token=%s&token_type_hint=access_token' % token)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, '')
| bsd-3-clause |
martin31242/rccar | draft7/main.py | 1 | 6464 | import web
from web import form
import socket
import RPi.GPIO as GPIO
import time
from threading import Thread
from threading import Lock
localhost = "http://" + socket.gethostbyname(socket.gethostname()) + ":8080"
print(localhost)
global infrared, ultrasonic, servomotor, motor_L1, motor_L2, motor_R1, motor_R2, servo_turning_time, outputpin, carspeed, sensor_infrared, sensor_on, gpiodidstartup
global move_left,move_right,move_forward,move_backward,sensor_on,move_netural,move_stop,autostop
ultrasonic = 8
infrared = 11
servomotor = 12
motor_L1 = 19
motor_L2 = 21
motor_R1 = 24
motor_R2 = 26
servo_turning_time = 1
carspeed = 100
sensor_infrared = False
sensor_on = True
move_left = False
move_right = False
move_netural = False
move_forward = False
move_backward = False
sensor_on = False
move_stop = False
autostop = True
gpiodidstartup = False
urls = (
'/','Login',
'/control','Page_one',
'/left', 'Left',
'/right', 'Right',
'/forward', 'Forward',
'/start', 'Start',
'/stop', 'Stop',
'/backward', 'Backward',
'/about', 'About',
'/setting', 'Setting',
'/updatelog', 'Updatelog',
'/source_code', 'Sourcecode',
'/logoff', 'Logoff',
'/stopserver', 'Stopserver',
'/result_sensor_ultrasonic', 'Result_sensor_ultrasonic',
)
app = web.application(urls, globals())
loginform = form.Form(
form.Textbox("USERNAME",
form.notnull,
form.Validator('wrong', lambda x: x == "martin")),
form.Textbox("PASSWORD",
form.notnull,
form.Validator('wrong', lambda x: x == "12341234")),
form.Checkbox('I am not a robot'))
render = web.template.render('templates/')
def gpio_startup():
global a1,a2,b1,b2,gpiodidstartup, p
GPIO.setmode(GPIO.BOARD)
inputpin = [infrared,ultrasonic]
outputpin = [servomotor,motor_L1,motor_L2,motor_R1,motor_R2]
GPIO.setup(inputpin,GPIO.IN)
GPIO.setup(outputpin,GPIO.OUT,initial = GPIO.LOW)
a1 = GPIO.PWM(motor_L1, 50)
a2 = GPIO.PWM(motor_L2, 50)
b1 = GPIO.PWM(motor_R1, 50)
b2 = GPIO.PWM(motor_R2, 50)
gpiodidstartup = True
p = GPIO.PWM(servomotor,50)
def gpio_end():
GPIO.cleanup()
print("all GPIO pin is clean up")
def sensor_if():
global sensor_infrared
if GPIO.IN(infrared) == 1:
sensor_infrared = True
elif GPIO.IN(infrared) == 0:
sensor_infrared = False
else:
print("infrared sensor error")
pass
def sensor_ultrasonic():
global detect_distance
GPIO.setup(ultrasonic, GPIO.OUT)
GPIO.output(ultrasonic, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(ultrasonic, GPIO.LOW)
start = time.time()
count = time.time()
GPIO.setup(ultrasonic, GPIO.IN)
while GPIO.input(ultrasonic) == 0 and time.time() - count < 0.1:
start = time.time()
count = time.time()
stop = count
while GPIO.input(ultrasonic) == 1 and time.time() - count < 0.1:
stop = time.time()
# Calculate pulse length
elapsed = stop - start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s)
distance = elapsed * 34000
# That was the distance there and back so halve the value
distance = distance / 2
detect_distance = distance
if detect_distance < 5:
motor_stop()
return detect_distance
def motor_turn_left():
p.start(0)
p.ChangeDutyCycle(5)
time.sleep(servo_turning_time)
p.start(0)
def motor_turn_right():
p.start(0)
p.ChangeDutyCycle(10)
time.sleep(servo_turning_time)
p.start(0)
def motor_netural():
p.start(0)
p.ChangeDutyCycle(7.5)
time.sleep(servo_turning_time)
p.start(0)
def motor_stop():
a1.stop()
a2.stop()
b1.stop()
b2.stop()
def motor_forward(carspeed = carspeed):
a1.start(carspeed)
b1.start(carspeed)
def motor_backward(carspeed = carspeed):
a2.start(carspeed)
b2.start(carspeed)
def collision_prevention_system():
if detect_distance < 10:
a1.stop()
b1.stop()
a2.stop()
b2.stop()
class Login:
def GET(self):
return render.login(loginform)
def POST(self):
if not loginform.validates():
return render.login(loginform)
else:
gpio_startup()
return web.seeother('/control')
class Page_one:
def GET(self):
return render.page_one()
def POST(self):
return render.page_one()
class Left:
def GET(self):
motor_turn_left()
print("left")
return "left"
class Right:
def GET(self):
motor_turn_right()
print("right")
return "right"
class Forward:
def GET(self):
motor_stop()
t3 = Thread(target=motor_netural)
t5 = Thread(target=motor_forward)
t3.daemon = True
t5.daemon = True
t3.start()
t5.start()
t3.join()
t5.join()
print("forward")
return "forward"
class Start:
def GET(self):
print("start")
return "start"
class Backward:
def GET(self):
motor_stop()
motor_backward()
print("backward")
return "backward"
class Stop:
def GET(self):
motor_stop()
print("stop")
return "stop"
class About:
def GET(self):
motor_stop()
return render.about()
class Setting:
def GET(self):
motor_stop()
return render.setting()
class Updatelog:
def GET(self):
motor_stop()
return render.updatelog()
class Sourcecode:
def GET(self):
motor_stop()
return render.source_code()
class Logoff:
def GET(self):
motor_stop()
gpio_end()
return render.logoff()
class Stopserver:
def GET(self):
motor_stop()
return exit()
class Result_sensor_ultrasonic:
def GET(self):
return sensor_ultrasonic()
if __name__ == "__main__" :
app.run() | mit |
ronakkhunt/kuma | vendor/packages/translate/lang/el.py | 24 | 2066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2009,2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Greek language.
.. seealso:: http://en.wikipedia.org/wiki/Greek_language
"""
import re
from translate.lang import common
class el(common.Common):
"""This class represents Greek."""
# Greek uses ; as question mark and the middot instead
sentenceend = u".!;…"
sentencere = re.compile(ur"""
(?s) # make . also match newlines
.*? # anything, but match non-greedy
[%s] # the puntuation for sentence ending
\s+ # the spacing after the puntuation
(?=[^a-zά-ώ\d]) # lookahead that next part starts with caps
""" % sentenceend, re.VERBOSE | re.UNICODE)
puncdict = {
u"?": u";",
u";": u"·",
}
# Valid latin characters for use as accelerators
valid_latin_accel = u"abcdefghijklmnopqrstuvwxyz" + \
u"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + \
u"1234567890"
# Valid greek characters for use as accelerators (accented characters
# and "ς" omitted)
valid_greek_accel = u"αβγδεζηθικλμνξοπρστυφχψω" + \
u"ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩ"
# Valid accelerators
validaccel = u"".join([valid_latin_accel, valid_greek_accel])
| mpl-2.0 |
miing/mci_migo | identityprovider/tests/test_models_person.py | 1 | 2438 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
from random import randint
from identityprovider.models.account import Account, LPOpenIdIdentifier
from identityprovider.models.const import (
AccountCreationRationale,
AccountStatus,
)
from identityprovider.models.person import Person
from identityprovider.models.team import TeamParticipation
from identityprovider.tests.utils import SSOBaseTestCase
class PersonTestCase(SSOBaseTestCase):
def setUp(self):
super(PersonTestCase, self).setUp()
lp_account = randint(100, 10000)
LPOpenIdIdentifier.objects.create(identifier='oid',
lp_account=lp_account)
self.person1 = Person.objects.create(
displayname='Person', name='person', lp_account=lp_account)
self.person2 = Person.objects.create(displayname='Other', name='other')
self.team1 = Person.objects.create(name='team',
teamowner=self.person2.id)
self.team2 = Person.objects.create(name='other-team',
teamowner=self.person1.id)
def test_unicode(self):
self.assertEqual(unicode(self.person1), u'Person')
def test_in_team_no_team(self):
self.assertFalse(self.person1.in_team('no-team'))
def test_in_team(self):
TeamParticipation.objects.create(team=self.team1, person=self.person1)
self.assertTrue(self.person1.in_team('team'))
def test_in_team_object(self):
TeamParticipation.objects.create(team=self.team1, person=self.person1)
self.assertTrue(self.person1.in_team(self.team1))
def test_not_in_team(self):
self.assertFalse(self.person1.in_team('team'))
def test_in_team_no_teamparticipation_same_owner(self):
Person.objects.create(name='otherteam', teamowner=self.person1.id)
self.assertTrue(self.person1.in_team('otherteam'))
def test_account_when_no_account(self):
self.assertEqual(self.person1.account, None)
def test_account_when_account(self):
account = Account.objects.create(
creation_rationale=AccountCreationRationale.USER_CREATED,
status=AccountStatus.ACTIVE, displayname='Person',
openid_identifier='oid')
self.assertEqual(self.person1.account, account)
| agpl-3.0 |
biocore/qiime | tests/test_estimate_observation_richness.py | 15 | 26872 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2013, The QIIME Project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
"""Test suite for the estimate_observation_richness.py module."""
from collections import defaultdict
from StringIO import StringIO
from biom.parse import parse_biom_table
from biom.table import Table
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from numpy import asarray, array
from qiime.estimate_observation_richness import (AbstractPointEstimator,
Chao1MultinomialPointEstimator, EmptySampleError,
EmptyTableError, ObservationRichnessEstimator,
RichnessEstimatesResults)
class ObservationRichnessEstimatorTests(TestCase):
"""Tests for the ObservationRichnessEstimator class."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
# Single sample, 6 observations, one of which isn't observed in sample.
self.biom_table1 = parse_biom_table(biom_table_str1)
self.estimator1 = ObservationRichnessEstimator(self.biom_table1,
Chao1MultinomialPointEstimator)
def test_constructor(self):
"""Test instantiating an ObservationRichnessEstimator."""
self.assertTrue(isinstance(self.estimator1,
ObservationRichnessEstimator))
def test_constructor_empty_table(self):
"""Test instantiating an estimator with an empty table."""
empty_table = Table(array([]), [], [])
self.assertRaises(EmptyTableError, ObservationRichnessEstimator,
empty_table, Chao1MultinomialPointEstimator)
def test_getSampleCount(self):
"""Test estimator returns correct number of samples."""
self.assertEqual(self.estimator1.getSampleCount(), 1)
def test_call_interpolate(self):
"""Test __call__ computes correct estimates (interpolation)."""
# Verified with iNEXT (http://glimmer.rstudio.com/tchsieh/inext/).
# SE estimates differ because they use a different technique. SE
# estimates have been verified against values in Colwell 2012 instead
# (in separate unit tests).
# Just reference.
obs = self.estimator1(start=15, stop=15, num_steps=1)
self.assertEqual(obs.getSampleCount(), 1)
assert_almost_equal(obs.getEstimates('S1'),
[(15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])
# start=1 and reference.
obs = self.estimator1(start=1, stop=1, num_steps=1)
self.assertEqual(obs.getSampleCount(), 1)
assert_almost_equal(obs.getEstimates('S1'),
[(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),
(15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])
# Points in between start=1 and reference.
obs = self.estimator1(start=1, stop=15, num_steps=3)
self.assertEqual(obs.getSampleCount(), 1)
assert_almost_equal(obs.getEstimates('S1'),
[(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),
(5, 3.40326340326, 0.655024590447, 2.119438797,
4.68708800953),
(9, 4.4001998002, 0.680106580075,
3.0672153976, 5.7331842028),
(13, 4.85714285714, 0.665379090563, 3.55302380357,
6.16126191071),
(15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])
def test_call_extrapolate(self):
"""Test __call__ computes correct estimates (extrapolation)."""
# Verified with iNEXT. Differs slightly from their output because
# they've slightly modified Colwell 2012 equation 9, and we're using
# the original one. SE estimates differ because they use a different
# technique. SE estimates have been verified against values in Colwell
# 2012 instead (in separate unit tests).
obs = self.estimator1(start=15, stop=30, num_steps=1)
self.assertEqual(obs.getSampleCount(), 1)
assert_almost_equal(obs.getEstimates('S1'),
[(15, 5, 0.674199862463, 3.67859255119, 6.32140744881),
(30, 5.4415544562981095, 1.073911829557642, 3.33672594779,
7.5463829648)])
obs = self.estimator1(start=20, stop=30, num_steps=2)
self.assertEqual(obs.getSampleCount(), 1)
assert_almost_equal(obs.getEstimates('S1'),
[(15, 5, 0.674199862463, 3.67859255119, 6.32140744881),
(20, 5.2555272427983537, 0.77331345626875192, 3.73986071975,
6.77119376585),
(25, 5.38046614197245, 0.93220670591157662, 3.55337457224,
7.20755771171),
(30, 5.4415544562981095, 1.073911829557642, 3.33672594779,
7.5463829648)])
def test_get_points_to_estimate_invalid_input(self):
"""Raises an error on invalid input."""
# Invalid min.
self.assertRaises(ValueError, self.estimator1._get_points_to_estimate,
5, 0, 10, 1)
# Invalid num_steps.
self.assertRaises(ValueError, self.estimator1._get_points_to_estimate,
5, 1, 10, 0)
# max < min.
self.assertRaises(ValueError, self.estimator1._get_points_to_estimate,
5, 1, -1, 1)
def test_get_points_to_estimate(self):
"""Correctly calculates estimation points given range parameters."""
# Ref in range.
obs = self.estimator1._get_points_to_estimate(4, 1, 5, 4)
self.assertEqual(obs, [1, 2, 3, 4, 5])
# Ref not in range.
obs = self.estimator1._get_points_to_estimate(4, 5, 10, 2)
self.assertEqual(obs, [4, 5, 7, 9])
# stop not supplied.
obs = self.estimator1._get_points_to_estimate(5, 5, num_steps=2)
self.assertEqual(obs, [5, 17, 29])
class AbstractPointEstimatorTests(TestCase):
"""Tests for the AbstractPointEstimator class."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
self.colwell_data1 = asarray(colwell_data1)
self.colwell_data2 = asarray(colwell_data2)
self.est1 = AbstractPointEstimator(asarray([0, 1, 2, 3, 4, 5]))
self.est2 = AbstractPointEstimator(self.colwell_data1)
self.est3 = AbstractPointEstimator(self.colwell_data2)
def test_constructor(self):
"""Test instantiating an AbstractPointEstimator instance."""
self.assertTrue(isinstance(self.est1, AbstractPointEstimator))
def test_constructor_empty_sample(self):
"""Test instantiating an estimator with a sample that has no obs."""
with self.assertRaises(EmptySampleError):
_ = AbstractPointEstimator(asarray([0, 0, 0, 0.0, 0, 0.0]))
def test_getTotalIndividualCount(self):
"""Returns correct total number of observed individuals."""
# Verified with iNEXT.
self.assertEqual(self.est1.getTotalIndividualCount(), 15)
# Verified against results in Colwell 2012 paper.
self.assertEqual(self.est2.getTotalIndividualCount(), 976)
self.assertEqual(self.est3.getTotalIndividualCount(), 237)
def test_getObservationCount(self):
"""Returns correct number of (observed) observations."""
# Verified with iNEXT.
self.assertEqual(self.est1.getObservationCount(), 5)
# Verified against results in Colwell 2012 paper.
self.assertEqual(self.est2.getObservationCount(), 140)
self.assertEqual(self.est3.getObservationCount(), 112)
def test_getAbundanceFrequencyCounts(self):
"""Returns correct abundance frequency counts."""
# Verified with iNEXT.
exp = defaultdict(int, {1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
obs = self.est1.getAbundanceFrequencyCounts()
self.assertEqual(obs, exp)
# Verified against results in Colwell 2012 paper.
self.assertEqual(self.est2.getAbundanceFrequencyCounts(), colwell_fk1)
self.assertEqual(self.est3.getAbundanceFrequencyCounts(), colwell_fk2)
def test_call(self):
"""Test should raise error."""
with self.assertRaises(NotImplementedError):
self.est1(1)
class Chao1MultinomialPointEstimatorTests(TestCase):
"""Tests for the Chao1MultinomialPointEstimator class."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
self.colwell_data1 = asarray(colwell_data1)
self.colwell_data2 = asarray(colwell_data2)
self.samp_data1 = asarray([1, 2, 3, 4, 5])
self.samp_data2 = asarray([1, 3, 4, 5])
self.estimator1 = Chao1MultinomialPointEstimator(self.colwell_data1)
self.estimator2 = Chao1MultinomialPointEstimator(self.colwell_data2)
self.estimator3 = Chao1MultinomialPointEstimator(self.samp_data1)
self.estimator4 = Chao1MultinomialPointEstimator(self.samp_data2)
def test_estimateUnobservedObservationCount(self):
"""Test returns correct Chao1 estimate of num unobserved obs."""
# Verified with iNEXT.
obs = self.estimator3.estimateUnobservedObservationCount()
assert_almost_equal(obs, 0.5)
def test_estimateFullRichness(self):
"""Test returns correct Chao1 full observation richness estimate."""
# Verified with iNEXT.
# f2 > 0
obs = self.estimator3.estimateFullRichness()
assert_almost_equal(obs, 5.5)
# f2 == 0
obs = self.estimator4.estimateFullRichness()
assert_almost_equal(obs, 4)
def test_call_interpolate(self):
"""Test computing S(m) using data from Colwell 2012 paper."""
# Verified against results in Colwell 2012 paper.
# Second-growth data.
# m = 1 (min)
# Note: Colwell 2012 list the std err as 0.00 in their table, but after
# extensive searching I'm not sure why. All other values match theirs,
# so I'm guessing they're treating 1 as a special case (since you can't
# have an observation count of less than one if you have exactly one
# individual).
obs = self.estimator1(1)
assert_almost_equal(obs, (1.0, 0.17638208235509734, 0.654297471066,
1.34570252893))
# m = 100
obs = self.estimator1(100)
assert_almost_equal(obs, (44.295771605749465, 4.3560838094150975,
35.7580042257, 52.8335389858))
# m = 800
obs = self.estimator1(800)
assert_almost_equal(obs, (126.7974481741264, 7.7007346056227375,
111.704285693, 141.890610656))
# m = 976 (max)
obs = self.estimator1(976)
assert_almost_equal(obs, (140, 8.4270097160038446, 123.483364459,
156.516635541))
# Old-growth data.
# m = 1 (min)
obs = self.estimator2(1)
assert_almost_equal(obs, (1.0, 0.20541870170521284, 0.597386742907,
1.40261325709))
# m = 20
obs = self.estimator2(20)
assert_almost_equal(obs, (15.891665207609165, 1.9486745986194465,
12.0723331767, 19.7109972385))
# m = 200
obs = self.estimator2(200)
assert_almost_equal(obs, (98.63181822376555, 8.147805938386115,
82.6624120315, 114.601224416))
# m = 237 (max)
obs = self.estimator2(237)
assert_almost_equal(obs, (112.00, 9.22019783913399, 93.928744305,
130.071255695))
def test_call_extrapolate(self):
"""Test computing S(n+m*) using data from Colwell 2012 paper."""
# Verified against results in Colwell 2012 paper.
# Second-growth data.
# m = 1076 (n+100)
obs = self.estimator1(1076)
assert_almost_equal(obs, (146.99829023479796, 8.8700520745653257,
129.613307628, 164.383272842))
# m = 1176 (n+200)
obs = self.estimator1(1176)
assert_almost_equal(obs, (153.6567465407886, 9.3364370482687296,
135.357666182, 171.955826899))
# m = 1976 (n+1000)
obs = self.estimator1(1976)
assert_almost_equal(obs, (196.51177687081162, 13.989113717395064,
169.093617809, 223.929935933))
# Old-growth data.
# m = 337 (n+100)
obs = self.estimator2(337)
assert_almost_equal(obs, (145.7369598336187, 12.20489285355208,
121.815809405, 169.658110262))
# m = 437 (n+200)
obs = self.estimator2(437)
assert_almost_equal(obs, (176.24777891095846, 15.382655350552035,
146.098328437, 206.397229385))
# m = 1237 (n+1000)
obs = self.estimator2(1237)
assert_almost_equal(obs, (335.67575295919767, 48.962273606327834,
239.71146009, 431.640045829))
def test_call_invalid_input(self):
"""Test error is raised on invalid input."""
with self.assertRaises(ValueError):
self.estimator1(42, confidence_level=0)
def test_call_na_samples(self):
"""Test on sample without any singletons or doubletons."""
est = Chao1MultinomialPointEstimator(asarray([4, 3, 4, 5]))
obs = est(42)
self.assertEqual(obs, (None, None, None, None))
def test_partial_derivative_f1(self):
"""Test computes correct partial derivative wrt f1."""
# Verified with Wolfram Alpha.
# f2 > 0
obs = self.estimator1._partial_derivative_f1(2, 3, 10, 42)
assert_almost_equal(obs, 1.22672908818)
# f2 == 0
obs = self.estimator1._partial_derivative_f1(2, 0, 10, 42)
assert_almost_equal(obs, 1.272173492918482)
# f1 == 0, f2 == 0
obs = self.estimator1._partial_derivative_f1(0, 0, 10, 42)
assert_almost_equal(obs, 1.2961664362634027)
def test_partial_derivative_f2(self):
"""Test computes correct partial derivative wrt f2."""
# Verified with Wolfram Alpha.
# f2 > 0
obs = self.estimator1._partial_derivative_f2(2, 3, 10, 42)
assert_almost_equal(obs, 0.9651585982441183)
# f2 == 0
obs = self.estimator1._partial_derivative_f2(2, 0, 10, 42)
assert_almost_equal(obs, 0.9208698803111386)
# f1 ==0, f2 == 0
obs = self.estimator1._partial_derivative_f2(0, 0, 10, 42)
assert_almost_equal(obs, 1.0)
class RichnessEstimatesResultsTests(TestCase):
"""Tests for the RichnessEstimatesResults class."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
self.res1 = RichnessEstimatesResults()
self.res2 = RichnessEstimatesResults()
self.res2.addSample('S2', 52)
self.res2.addSampleEstimate('S2', 1, 3, 0.4, 2.5, 3.5)
self.res2.addSample('S1', 42)
self.res2.addSampleEstimate('S1', 10, 20, 2.5, 2.5, 3.5)
self.res2.addSampleEstimate('S1', 20, 30, 3.5, 2.5, 3.5)
self.res2.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)
def test_constructor(self):
"""Test instantiating a RichnessEstimatesResults instance."""
self.assertTrue(isinstance(self.res1, RichnessEstimatesResults))
self.assertEqual(self.res1.getSampleCount(), 0)
def test_getSampleCount(self):
"""Test getting the number of samples in the results container."""
self.assertEqual(self.res1.getSampleCount(), 0)
self.res1.addSample('S1', 42)
self.assertEqual(self.res1.getSampleCount(), 1)
self.res1.addSample('S2', 43)
self.assertEqual(self.res1.getSampleCount(), 2)
def test_getReferenceIndividualCount(self):
"""Test getting the original number of individuals in a sample."""
with self.assertRaises(ValueError):
self.res1.getReferenceIndividualCount('S1')
self.res1.addSample('S1', 42)
self.assertEqual(self.res1.getReferenceIndividualCount('S1'), 42)
def test_getEstimates(self):
"""Test getting the estimates for a sample."""
with self.assertRaises(ValueError):
self.res1.getEstimates('S1')
self.res1.addSample('S1', 42)
self.res1.addSampleEstimate('S1', 15, 30, 4.75, 2.5, 3.5)
self.res1.addSampleEstimate('S1', 10, 20, 2.5, 2.5, 3.5)
assert_almost_equal(self.res1.getEstimates('S1'),
[(10, 20, 2.5, 2.5, 3.5), (15, 30, 4.75, 2.5, 3.5)])
def test_addSample(self):
"""Test adding a new sample to the results container."""
self.res1.addSample('S1', 42)
self.assertEqual(self.res1.getSampleCount(), 1)
self.assertEqual(self.res1.getReferenceIndividualCount('S1'), 42)
with self.assertRaises(ValueError):
self.res1.addSample('S1', 45)
def test_addSampleEstimate(self):
"""Test adding a new estimate for a sample."""
with self.assertRaises(ValueError):
self.res1.addSampleEstimate('S1', 10, 20, 2.5, 2.5, 3.5)
self.res1.addSample('S1', 42)
self.res1.addSampleEstimate('S1', 10, 20, 2.5, 2.5, 3.5)
assert_almost_equal(self.res1.getEstimates('S1'),
[(10, 20, 2.5, 2.5, 3.5)])
with self.assertRaises(ValueError):
self.res1.addSampleEstimate('S1', 10, 35, 0.002, 2.5, 3.5)
def test_toTable(self):
"""Test writing results container to a table."""
# Empty results.
out_f = StringIO()
self.res1.toTable(out_f)
self.assertEqual(out_f.getvalue(),
"SampleID\tSize\tEstimate\tStd Err\tCI (lower)\tCI (upper)\n")
out_f.close()
# Results with multiple samples.
exp = """SampleID\tSize\tEstimate\tStd Err\tCI (lower)\tCI (upper)
S1\t5\t21\t1.5\t2.5\t3.5
S1\t10\t20\t2.5\t2.5\t3.5
S1\t20\t30\t3.5\t2.5\t3.5
S2\t1\t3\t0.4\t2.5\t3.5
"""
out_f = StringIO()
self.res2.toTable(out_f)
self.assertEqual(out_f.getvalue(), exp)
out_f.close()
# Custom header.
exp = """foo\tbar\tbaz\tbazaar\tbazaaar\tbazaaaar
S1\t5\t21\t1.5\t2.5\t3.5
"""
out_f = StringIO()
self.res1.addSample('S1', 42)
self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)
self.res1.toTable(out_f,
header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])
self.assertEqual(out_f.getvalue(), exp)
out_f.close()
# Invalid header.
with self.assertRaises(ValueError):
out_f = StringIO()
self.res1.toTable(out_f, header=['foo'])
# Cells with None as their value.
exp = """SampleID\tSize\tEstimate\tStd Err\tCI (lower)\tCI (upper)
S1\t43\tN/A\tN/A\tN/A\tN/A
"""
out_f = StringIO()
res = RichnessEstimatesResults()
res.addSample('S1', 42)
res.addSampleEstimate('S1', 43, None, None, None, None)
res.toTable(out_f)
self.assertEqual(out_f.getvalue(), exp)
out_f.close()
# OTU ID S1 taxonomy
# OTU0 0 foo;bar;baz
# OTU1 1 foo;bar;bazz
# OTU2 2 foo;bar;bazzz
# OTU3 3 foo;bar;bazzzz
# OTU4 4 foo;bar;bazzzzz
# OTU5 5 foo;bar;bazzzzzz
biom_table_str1 = """{"id": "None","format": "Biological Observation Matrix 1.0.0","format_url": "http://biom-format.org","type": "OTU table","generated_by": "BIOM-Format 1.1.2","date": "2013-04-11T11:39:44.032365","matrix_type": "sparse","matrix_element_type": "float","shape": [6, 1],"data": [[1,0,1.0],[2,0,2.0],[3,0,3.0],[4,0,4.0],[5,0,5.0]],"rows": [{"id": "OTU0", "metadata": {"taxonomy": ["foo", "bar", "baz"]}},{"id": "OTU1", "metadata": {"taxonomy": ["foo", "bar", "bazz"]}},{"id": "OTU2", "metadata": {"taxonomy": ["foo", "bar", "bazzz"]}},{"id": "OTU3", "metadata": {"taxonomy": ["foo", "bar", "bazzzz"]}},{"id": "OTU4", "metadata": {"taxonomy": ["foo", "bar", "bazzzzz"]}},{"id": "OTU5", "metadata": {"taxonomy": ["foo", "bar", "bazzzzzz"]}}],"columns": [{"id": "S1", "metadata": null}]}"""
# Taken from Colwell 2012 Osa second growth sample (Table 1a). Added some zeros
# as these should be ignored.
colwell_data1 = [64,
1,
1,
1,
1,
0.0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
0,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
4,
4,
4,
4,
4,
5,
5,
5,
5,
5,
6,
6,
6,
6,
6,
7,
7,
7,
7,
7,
8,
8,
8,
9,
10,
10,
11,
11,
11,
12,
12,
77,
14,
14,
17,
19,
19,
20,
20,
20,
21,
24,
26,
40,
71,
57,
57,
60,
0]
colwell_fk1 = defaultdict(
int,
{1: 70,
2: 17,
3: 4,
4: 5,
5: 5,
6: 5,
7: 5,
8: 3,
9: 1,
10: 2,
11: 3,
12: 2,
14: 2,
17: 1,
19: 2,
20: 3,
21: 1,
24: 1,
26: 1,
40: 1,
57: 2,
60: 1,
64: 1,
71: 1,
77: 1})
# Taken from Colwell 2012 Osa old growth sample (Table 1b). Added some zeros as
# these should be ignored.
colwell_data2 = [0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0.0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
4,
4,
4,
5,
5,
5,
5,
5,
6,
7,
7,
8,
42,
14]
colwell_fk2 = defaultdict(
int,
{1: 84,
2: 10,
3: 4,
4: 3,
5: 5,
6: 1,
7: 2,
8: 1,
14: 1,
42: 1})
if __name__ == "__main__":
main()
| gpl-2.0 |
mattjvincent/file2db | file2db/compat.py | 1 | 2099 | # -*- coding: utf-8 -*-
# taken from requests library: https://github.com/kennethreitz/requests
"""
pythoncompat
"""
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, parse_qsl
from urllib2 import parse_http_list
import cookielib
from StringIO import StringIO
#bytes = str
#str = unicode
#basestring = basestring
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, parse_qsl
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import SimpleCookie
from io import StringIO
#str = str
#bytes = bytes
#basestring = (str,bytes)
| bsd-3-clause |
CydarLtd/ansible | test/units/modules/network/nxos/test_nxos_bgp_af.py | 9 | 3510 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpAfModule(TestNxosModule):
module = nxos_bgp_af
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('nxos_bgp_config.cfg')
self.load_config.return_value = None
def test_nxos_bgp_af(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf(self):
set_module_args(dict(asn=65535, vrf='test', afi='ipv4', safi='unicast'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'vrf test', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_dampening_routemap(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening route-map route-map-a']
)
def test_nxos_bgp_af_dampening_manual(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening 5 1900 2000 10']
)
def test_nxos_bgp_af_dampening_mix(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'dampening_routemap cannot be used with the dampening_half_time param')
| gpl-3.0 |
EverlyWell/redash | redash/handlers/embed.py | 4 | 3617 | import logging
import time
import pystache
from flask import request
from authentication import current_org
from flask_login import current_user, login_required
from flask_restful import abort
from redash import models, utils
from redash.handlers import routes
from redash.handlers.base import (get_object_or_404, org_scoped_rule,
record_event)
from redash.handlers.query_results import collect_query_parameters
from redash.handlers.static import render_index
from redash.utils import gen_query_hash
#
# Run a parameterized query synchronously and return the result
# DISCLAIMER: Temporary solution to support parameters in queries. Should be
# removed once we refactor the query results API endpoints and handling
# on the client side. Please don't reuse in other API handlers.
#
def run_query_sync(data_source, parameter_values, query_text, max_age=0):
query_parameters = set(collect_query_parameters(query_text))
missing_params = set(query_parameters) - set(parameter_values.keys())
if missing_params:
raise Exception('Missing parameter value for: {}'.format(", ".join(missing_params)))
if query_parameters:
query_text = pystache.render(query_text, parameter_values)
if max_age <= 0:
query_result = None
else:
query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
query_hash = gen_query_hash(query_text)
if query_result:
logging.info("Returning cached result for query %s" % query_hash)
return query_result.data
try:
started_at = time.time()
data, error = data_source.query_runner.run_query(query_text, current_user)
if error:
return None
# update cache
if max_age > 0:
run_time = time.time() - started_at
query_result, updated_query_ids = models.QueryResult.store_result(data_source.org_id, data_source.id,
query_hash, query_text, data,
run_time, utils.utcnow())
models.db.session.commit()
return data
except Exception:
if max_age > 0:
abort(404, message="Unable to get result from the database, and no cached query result found.")
else:
abort(503, message="Unable to get result from the database.")
return None
@routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
@login_required
def embed(query_id, visualization_id, org_slug=None):
record_event(current_org, current_user._get_current_object(), {
'action': 'view',
'object_id': visualization_id,
'object_type': 'visualization',
'query_id': query_id,
'embed': True,
'referer': request.headers.get('Referer')
})
return render_index()
@routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
@login_required
def public_dashboard(token, org_slug=None):
if current_user.is_api_user():
dashboard = current_user.object
else:
api_key = get_object_or_404(models.ApiKey.get_by_api_key, token)
dashboard = api_key.object
record_event(current_org, current_user, {
'action': 'view',
'object_id': dashboard.id,
'object_type': 'dashboard',
'public': True,
'headless': 'embed' in request.args,
'referer': request.headers.get('Referer')
})
return render_index()
| bsd-2-clause |
vitaly4uk/django | tests/template_tests/tests.py | 183 | 4760 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.contrib.auth.models import Group
from django.core import urlresolvers
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import UNKNOWN_SOURCE
from django.test import SimpleTestCase, override_settings
class TemplateTests(SimpleTestCase):
def test_string_origin(self):
template = Engine().from_string('string template')
self.assertEqual(template.origin.name, UNKNOWN_SOURCE)
self.assertEqual(template.origin.loader_name, None)
self.assertEqual(template.source, 'string template')
@override_settings(SETTINGS_MODULE=None)
def test_url_reverse_no_settings_module(self):
"""
#9005 -- url tag shouldn't require settings.SETTINGS_MODULE to
be set.
"""
t = Engine(debug=True).from_string('{% url will_not_match %}')
c = Context()
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(c)
def test_url_reverse_view_name(self):
"""
#19827 -- url tag should keep original strack trace when reraising
exception.
"""
t = Engine().from_string('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except urlresolvers.NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5,
"The traceback context was lost when reraising the traceback. See #19827")
def test_no_wrapped_exception(self):
"""
# 16770 -- The template system doesn't wrap exceptions, but annotates
them.
"""
engine = Engine(debug=True)
c = Context({"coconuts": lambda: 42 / 0})
t = engine.from_string("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as e:
t.render(c)
debug = e.exception.template_debug
self.assertEqual(debug['start'], 0)
self.assertEqual(debug['end'], 14)
def test_invalid_block_suggestion(self):
"""
#7876 -- Error messages should include the unexpected block name.
"""
engine = Engine()
with self.assertRaises(TemplateSyntaxError) as e:
engine.from_string("{% if 1 %}lala{% endblock %}{% endif %}")
self.assertEqual(
e.exception.args[0],
"Invalid block tag: 'endblock', expected 'elif', 'else' or 'endif'",
)
def test_compile_filter_expression_error(self):
"""
19819 -- Make sure the correct token is highlighted for
FilterExpression errors.
"""
engine = Engine(debug=True)
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with self.assertRaisesMessage(TemplateSyntaxError, msg) as e:
engine.from_string("{% if 1 %}{{ foo@bar }}{% endif %}")
debug = e.exception.template_debug
self.assertEqual((debug['start'], debug['end']), (10, 23))
self.assertEqual((debug['during']), '{{ foo@bar }}')
def test_compile_tag_error(self):
"""
Errors raised while compiling nodes should include the token
information.
"""
engine = Engine(
debug=True,
libraries={'bad_tag': 'template_tests.templatetags.bad_tag'},
)
with self.assertRaises(RuntimeError) as e:
engine.from_string("{% load bad_tag %}{% badtag %}")
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_super_errors(self):
"""
#18169 -- NoReverseMatch should not be silence in block.super.
"""
engine = Engine(app_dirs=True)
t = engine.get_template('included_content.html')
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(Context())
def test_debug_tag_non_ascii(self):
"""
#23060 -- Test non-ASCII model representation in debug output.
"""
group = Group(name="清風")
c1 = Context({"objs": [group]})
t1 = Engine().from_string('{% debug %}')
self.assertIn("清風", t1.render(c1))
def test_extends_generic_template(self):
"""
#24338 -- Allow extending django.template.backends.django.Template
objects.
"""
engine = Engine()
parent = engine.from_string('{% block content %}parent{% endblock %}')
child = engine.from_string(
'{% extends parent %}{% block content %}child{% endblock %}')
self.assertEqual(child.render(Context({'parent': parent})), 'child')
| bsd-3-clause |
Vagab0nd/SiCKRAGE | lib3/unidecode/x05c.py | 252 | 4612 | data = (
'Po ', # 0x00
'Feng ', # 0x01
'Zhuan ', # 0x02
'Fu ', # 0x03
'She ', # 0x04
'Ke ', # 0x05
'Jiang ', # 0x06
'Jiang ', # 0x07
'Zhuan ', # 0x08
'Wei ', # 0x09
'Zun ', # 0x0a
'Xun ', # 0x0b
'Shu ', # 0x0c
'Dui ', # 0x0d
'Dao ', # 0x0e
'Xiao ', # 0x0f
'Ji ', # 0x10
'Shao ', # 0x11
'Er ', # 0x12
'Er ', # 0x13
'Er ', # 0x14
'Ga ', # 0x15
'Jian ', # 0x16
'Shu ', # 0x17
'Chen ', # 0x18
'Shang ', # 0x19
'Shang ', # 0x1a
'Mo ', # 0x1b
'Ga ', # 0x1c
'Chang ', # 0x1d
'Liao ', # 0x1e
'Xian ', # 0x1f
'Xian ', # 0x20
'[?] ', # 0x21
'Wang ', # 0x22
'Wang ', # 0x23
'You ', # 0x24
'Liao ', # 0x25
'Liao ', # 0x26
'Yao ', # 0x27
'Mang ', # 0x28
'Wang ', # 0x29
'Wang ', # 0x2a
'Wang ', # 0x2b
'Ga ', # 0x2c
'Yao ', # 0x2d
'Duo ', # 0x2e
'Kui ', # 0x2f
'Zhong ', # 0x30
'Jiu ', # 0x31
'Gan ', # 0x32
'Gu ', # 0x33
'Gan ', # 0x34
'Tui ', # 0x35
'Gan ', # 0x36
'Gan ', # 0x37
'Shi ', # 0x38
'Yin ', # 0x39
'Chi ', # 0x3a
'Kao ', # 0x3b
'Ni ', # 0x3c
'Jin ', # 0x3d
'Wei ', # 0x3e
'Niao ', # 0x3f
'Ju ', # 0x40
'Pi ', # 0x41
'Ceng ', # 0x42
'Xi ', # 0x43
'Bi ', # 0x44
'Ju ', # 0x45
'Jie ', # 0x46
'Tian ', # 0x47
'Qu ', # 0x48
'Ti ', # 0x49
'Jie ', # 0x4a
'Wu ', # 0x4b
'Diao ', # 0x4c
'Shi ', # 0x4d
'Shi ', # 0x4e
'Ping ', # 0x4f
'Ji ', # 0x50
'Xie ', # 0x51
'Chen ', # 0x52
'Xi ', # 0x53
'Ni ', # 0x54
'Zhan ', # 0x55
'Xi ', # 0x56
'[?] ', # 0x57
'Man ', # 0x58
'E ', # 0x59
'Lou ', # 0x5a
'Ping ', # 0x5b
'Ti ', # 0x5c
'Fei ', # 0x5d
'Shu ', # 0x5e
'Xie ', # 0x5f
'Tu ', # 0x60
'Lu ', # 0x61
'Lu ', # 0x62
'Xi ', # 0x63
'Ceng ', # 0x64
'Lu ', # 0x65
'Ju ', # 0x66
'Xie ', # 0x67
'Ju ', # 0x68
'Jue ', # 0x69
'Liao ', # 0x6a
'Jue ', # 0x6b
'Shu ', # 0x6c
'Xi ', # 0x6d
'Che ', # 0x6e
'Tun ', # 0x6f
'Ni ', # 0x70
'Shan ', # 0x71
'[?] ', # 0x72
'Xian ', # 0x73
'Li ', # 0x74
'Xue ', # 0x75
'Nata ', # 0x76
'[?] ', # 0x77
'Long ', # 0x78
'Yi ', # 0x79
'Qi ', # 0x7a
'Ren ', # 0x7b
'Wu ', # 0x7c
'Han ', # 0x7d
'Shen ', # 0x7e
'Yu ', # 0x7f
'Chu ', # 0x80
'Sui ', # 0x81
'Qi ', # 0x82
'[?] ', # 0x83
'Yue ', # 0x84
'Ban ', # 0x85
'Yao ', # 0x86
'Ang ', # 0x87
'Ya ', # 0x88
'Wu ', # 0x89
'Jie ', # 0x8a
'E ', # 0x8b
'Ji ', # 0x8c
'Qian ', # 0x8d
'Fen ', # 0x8e
'Yuan ', # 0x8f
'Qi ', # 0x90
'Cen ', # 0x91
'Qian ', # 0x92
'Qi ', # 0x93
'Cha ', # 0x94
'Jie ', # 0x95
'Qu ', # 0x96
'Gang ', # 0x97
'Xian ', # 0x98
'Ao ', # 0x99
'Lan ', # 0x9a
'Dao ', # 0x9b
'Ba ', # 0x9c
'Zuo ', # 0x9d
'Zuo ', # 0x9e
'Yang ', # 0x9f
'Ju ', # 0xa0
'Gang ', # 0xa1
'Ke ', # 0xa2
'Gou ', # 0xa3
'Xue ', # 0xa4
'Bei ', # 0xa5
'Li ', # 0xa6
'Tiao ', # 0xa7
'Ju ', # 0xa8
'Yan ', # 0xa9
'Fu ', # 0xaa
'Xiu ', # 0xab
'Jia ', # 0xac
'Ling ', # 0xad
'Tuo ', # 0xae
'Pei ', # 0xaf
'You ', # 0xb0
'Dai ', # 0xb1
'Kuang ', # 0xb2
'Yue ', # 0xb3
'Qu ', # 0xb4
'Hu ', # 0xb5
'Po ', # 0xb6
'Min ', # 0xb7
'An ', # 0xb8
'Tiao ', # 0xb9
'Ling ', # 0xba
'Chi ', # 0xbb
'Yuri ', # 0xbc
'Dong ', # 0xbd
'Cem ', # 0xbe
'Kui ', # 0xbf
'Xiu ', # 0xc0
'Mao ', # 0xc1
'Tong ', # 0xc2
'Xue ', # 0xc3
'Yi ', # 0xc4
'Kura ', # 0xc5
'He ', # 0xc6
'Ke ', # 0xc7
'Luo ', # 0xc8
'E ', # 0xc9
'Fu ', # 0xca
'Xun ', # 0xcb
'Die ', # 0xcc
'Lu ', # 0xcd
'An ', # 0xce
'Er ', # 0xcf
'Gai ', # 0xd0
'Quan ', # 0xd1
'Tong ', # 0xd2
'Yi ', # 0xd3
'Mu ', # 0xd4
'Shi ', # 0xd5
'An ', # 0xd6
'Wei ', # 0xd7
'Hu ', # 0xd8
'Zhi ', # 0xd9
'Mi ', # 0xda
'Li ', # 0xdb
'Ji ', # 0xdc
'Tong ', # 0xdd
'Wei ', # 0xde
'You ', # 0xdf
'Sang ', # 0xe0
'Xia ', # 0xe1
'Li ', # 0xe2
'Yao ', # 0xe3
'Jiao ', # 0xe4
'Zheng ', # 0xe5
'Luan ', # 0xe6
'Jiao ', # 0xe7
'E ', # 0xe8
'E ', # 0xe9
'Yu ', # 0xea
'Ye ', # 0xeb
'Bu ', # 0xec
'Qiao ', # 0xed
'Qun ', # 0xee
'Feng ', # 0xef
'Feng ', # 0xf0
'Nao ', # 0xf1
'Li ', # 0xf2
'You ', # 0xf3
'Xian ', # 0xf4
'Hong ', # 0xf5
'Dao ', # 0xf6
'Shen ', # 0xf7
'Cheng ', # 0xf8
'Tu ', # 0xf9
'Geng ', # 0xfa
'Jun ', # 0xfb
'Hao ', # 0xfc
'Xia ', # 0xfd
'Yin ', # 0xfe
'Yu ', # 0xff
)
| gpl-3.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/feature_extraction/text.py | 1 | 49725 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..externals import six
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _check_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', charset=None, encoding='utf-8',
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
`vocabulary_` : dict
A mapping of terms to feature indices.
`stop_words_` : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encoding "
"instead.",
DeprecationWarning)
self.encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
self.decode_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
# some Python/Scipy versions won't accept an array.array:
if j_indices:
j_indices = np.frombuffer(j_indices, dtype=np.intc)
else:
j_indices = np.array([], dtype=np.int32)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._check_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._check_vocabulary()
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
if not hasattr(self, "_idf_diag"):
raise ValueError("idf vector not fitted")
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
``idf_`` : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8', charset=None,
decode_error='strict', charset_error=None,
strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| apache-2.0 |
michael-dev2rights/ansible | test/units/modules/network/nxos/test_nxos_ospf.py | 47 | 1964 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_ospf
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosOspfModule(TestNxosModule):
module = nxos_ospf
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ospf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_ospf.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_ospf_present(self):
set_module_args(dict(ospf=1, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router ospf 1'])
def test_nxos_ospf_absent(self):
set_module_args(dict(ospf=1, state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
| gpl-3.0 |
polimediaupv/edx-platform | common/djangoapps/student/tests/test_reset_password.py | 54 | 10790 | """
Test the various password reset flows
"""
import json
import re
import unittest
from django.core.cache import cache
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import int_to_base36
from mock import Mock, patch
import ddt
from student.views import password_reset, password_reset_confirm_wrapper, SETTING_CHANGE_INITIATED
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from util.testing import EventTestMixin
from test_microsite import fake_site_name
@ddt.ddt
class ResetPasswordTests(EventTestMixin, TestCase):
""" Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD
self.user_bad_passwd.save()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""Tests password reset behavior for user with password marked UNUSABLE_PASSWORD"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""Now test the exception cases with of reset_password called with invalid email."""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
""" Try (and fail) resetting password 30 times in a row on an non-existant email address """
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': '[email protected]'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_reset_password_email(self, send_email):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
obj = json.loads(good_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
(subject, msg, from_addr, to_addrs) = send_email.call_args[0]
self.assertIn("Password reset", subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", msg)
self.assertEquals(from_addr, settings.DEFAULT_FROM_EMAIL)
self.assertEquals(len(to_addrs), 1)
self.assertIn(self.user.email, to_addrs)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
#test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), (None, 'edX'))
@ddt.unpack
def test_reset_password_email_domain(self, domain_override, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=domain_override)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
if domain_override:
reset_msg = reset_msg.format(domain_override)
else:
reset_msg = reset_msg.format(settings.SITE_NAME)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("microsite_configuration.microsite.get_value", fake_site_name)
@patch('django.core.mail.send_mail')
def test_reset_password_email_microsite(self, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at openedx.localhost"
self.assertIn(reset_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@patch('student.views.password_reset_confirm')
def test_reset_password_bad_token(self, reset_confirm):
"""Tests bad token and uidb36 in password reset"""
bad_reset_req = self.request_factory.get('/password_reset_confirm/NO-OP/')
password_reset_confirm_wrapper(bad_reset_req, 'NO', 'OP')
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb36'], 'NO')
self.assertEquals(confirm_kwargs['token'], 'OP')
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
@patch('student.views.password_reset_confirm')
def test_reset_password_good_token(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb36'], self.uidb36)
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@patch('student.views.password_reset_confirm')
def test_reset_password_with_reused_password(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb36'], self.uidb36)
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
| agpl-3.0 |
pjdelport/django | django/contrib/messages/tests/cookie.py | 9 | 5435 | import json
from django.contrib.messages import constants
from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.storage.cookie import (CookieStorage,
MessageEncoder, MessageDecoder)
from django.contrib.messages.storage.base import Message
from django.test.utils import override_settings
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Sets ``request.COOKIES`` with the encoded data and removes the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Returns an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
if data[-1] == CookieStorage.not_finished:
data.pop()
return len(data)
@override_settings(SESSION_COOKIE_DOMAIN='.example.com')
class CookieTest(BaseTest):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_domain(self):
"""
Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN.
Refs #15618.
"""
# Test before the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
storage.update(response)
self.assertTrue('test' in response.cookies['messages'].value)
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], '')
# Test after the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
for m in storage:
pass # Iterate through the storage to simulate consumption of messages.
storage.update(response)
self.assertEqual(response.cookies['messages'].value, '')
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
Tests that, if the data exceeds what is allowed in a cookie, older
messages are removed before saving (and returned by the ``update``
method).
"""
storage = self.get_storage()
response = self.get_response()
# When storing as a cookie, the cookie has constant overhead of approx
# 54 chars, and each message has a constant overhead of about 37 chars
# and a variable overhead of zero in the best case. We aim for a message
# size which will fit 4 messages into the cookie, but not 5.
# See also FallbackTest.test_session_fallback
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assertTrue(unstored_messages[0].message == '0' * msg_size)
def test_json_encoder_decoder(self):
"""
Tests that a complex nested data structure containing Message
instances is properly encoded/decoded by the custom JSON
encoder/decoder classes.
"""
messages = [
{
'message': Message(constants.INFO, 'Test message'),
'message_list': [Message(constants.INFO, 'message %s') \
for x in range(5)] + [{'another-message': \
Message(constants.ERROR, 'error')}],
},
Message(constants.INFO, 'message %s'),
]
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
decoded_messages = json.loads(value, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
| bsd-3-clause |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/werkzeug/routing.py | 9 | 73410 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import ast
import difflib
import posixpath
import re
import uuid
from pprint import pformat
from threading import Lock
from ._compat import implements_to_string
from ._compat import iteritems
from ._compat import itervalues
from ._compat import native_string_result
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import wsgi_decoding_dance
from ._internal import _encode_idna
from ._internal import _get_environ
from .datastructures import ImmutableDict
from .datastructures import MultiDict
from .exceptions import BadHost
from .exceptions import HTTPException
from .exceptions import MethodNotAllowed
from .exceptions import NotFound
from .urls import _fast_url_quote
from .urls import url_encode
from .urls import url_join
from .urls import url_quote
from .utils import cached_property
from .utils import format_string
from .utils import redirect
from .wsgi import get_host
_rule_re = re.compile(
r"""
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
""",
re.VERBOSE,
)
_simple_rule_re = re.compile(r"<([^>]+)>")
_converter_args_re = re.compile(
r"""
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
[\w\d_.]+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
""",
re.VERBOSE | re.UNICODE,
)
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in "\"'":
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ","
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group("stringval")
if value is None:
value = item.group("value")
value = _pythonize(value)
if not item.group("name"):
args.append(value)
else:
name = item.group("name")
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data["static"]:
yield None, None, data["static"]
variable = data["variable"]
converter = data["converter"] or "default"
if variable in used_names:
raise ValueError("variable name %r used twice." % variable)
used_names.add(variable)
yield converter, data["args"] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if ">" in remaining or "<" in remaining:
raise ValueError("malformed url rule: %r" % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 308
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException): # noqa: B903
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum(
[
0.98
* difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods),
]
)
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append("Could not build url for endpoint %r" % self.endpoint)
if self.method:
message.append(" (%r)" % self.method)
if self.values:
message.append(" with values %r" % sorted(self.values.keys()))
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(
" Did you mean to use methods %r?"
% sorted(self.suggested.methods)
)
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
" Did you forget to specify values %r?" % sorted(missing_values)
)
else:
message.append(" Did you mean %r instead?" % self.suggested.endpoint)
return u"".join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip("/")
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes,
)
def _prefix_names(src):
"""ast parse and prefix names with `.` to avoid collision with user vars"""
tree = ast.parse(src).body[0]
if isinstance(tree, ast.Expr):
tree = tree.value
for node in ast.walk(tree):
if isinstance(node, ast.Name):
node.id = "." + node.id
return tree
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
_IF_KWARGS_URL_ENCODE_CODE = """\
if kwargs:
q = '?'
params = self._encode_query_vars(kwargs)
else:
q = params = ''
"""
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(
self,
string,
defaults=None,
subdomain=None,
methods=None,
build_only=False,
endpoint=None,
strict_slashes=None,
redirect_to=None,
alias=False,
host=None,
):
if not string.startswith("/"):
raise ValueError("urls must start with a leading slash")
self.rule = string
self.is_leaf = not string.endswith("/")
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
if isinstance(methods, str):
raise TypeError("param `methods` should be `Iterable[str]`, not `str`")
self.methods = set([x.upper() for x in methods])
if "HEAD" not in self.methods and "GET" in self.methods:
self.methods.add("HEAD")
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._argument_weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(
defaults=defaults,
subdomain=self.subdomain,
methods=self.methods,
build_only=self.build_only,
endpoint=self.endpoint,
strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to,
alias=self.alias,
host=self.host,
)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError("url rule %r already bound to map %r" % (self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError("the converter %r does not exist" % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def _encode_query_vars(self, query_vars):
return url_encode(
query_vars,
charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key,
)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, "rule not bound"
if self.map.host_matching:
domain_rule = self.host or ""
else:
domain_rule = self.subdomain or ""
self._trace = []
self._converters = {}
self._static_weights = []
self._argument_weights = []
regex_parts = []
def _build_regex(rule):
index = 0
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split("/"):
if part:
self._static_weights.append((index, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(variable, converter, c_args, c_kwargs)
regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._argument_weights.append(convobj.weight)
self.arguments.add(str(variable))
index = index + 1
_build_regex(domain_rule)
regex_parts.append("\\|")
self._trace.append((False, "|"))
_build_regex(self.rule if self.is_leaf else self.rule.rstrip("/"))
if not self.is_leaf:
self._trace.append((False, "/"))
self._build = self._compile_builder(False).__get__(self, None)
self._build_unknown = self._compile_builder(True).__get__(self, None)
if self.build_only:
return
regex = r"^%s%s$" % (
u"".join(regex_parts),
(not self.is_leaf or not self.strict_slashes)
and "(?<!/)(?P<__suffix__>/?)"
or "",
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if (
self.strict_slashes
and not self.is_leaf
and not groups.pop("__suffix__")
and (
method is None or self.methods is None or method in self.methods
)
):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups["__suffix__"]
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
@staticmethod
def _get_func_code(code, name):
globs, locs = {}, {}
exec(code, globs, locs)
return locs[name]
def _compile_builder(self, append_unknown=True):
defaults = self.defaults or {}
dom_ops = []
url_ops = []
opl = dom_ops
for is_dynamic, data in self._trace:
if data == "|" and opl is dom_ops:
opl = url_ops
continue
# this seems like a silly case to ever come up but:
# if a default is given for a value that appears in the rule,
# resolve it to a constant ahead of time
if is_dynamic and data in defaults:
data = self._converters[data].to_url(defaults[data])
opl.append((False, data))
elif not is_dynamic:
opl.append(
(False, url_quote(to_bytes(data, self.map.charset), safe="/:|+"))
)
else:
opl.append((True, data))
def _convert(elem):
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
ret.args = [ast.Name(str(elem), ast.Load())] # str for py2
return ret
def _parts(ops):
parts = [
_convert(elem) if is_dynamic else ast.Str(s=elem)
for is_dynamic, elem in ops
]
parts = parts or [ast.Str("")]
# constant fold
ret = [parts[0]]
for p in parts[1:]:
if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str):
ret[-1] = ast.Str(ret[-1].s + p.s)
else:
ret.append(p)
return ret
dom_parts = _parts(dom_ops)
url_parts = _parts(url_ops)
if not append_unknown:
body = []
else:
body = [_IF_KWARGS_URL_ENCODE_AST]
url_parts.extend(_URL_ENCODE_AST_NAMES)
def _join(parts):
if len(parts) == 1: # shortcut
return parts[0]
elif hasattr(ast, "JoinedStr"): # py36+
return ast.JoinedStr(parts)
else:
call = _prefix_names('"".join()')
call.args = [ast.Tuple(parts, ast.Load())]
return call
body.append(
ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
)
# str is necessary for python2
pargs = [
str(elem)
for is_dynamic, elem in dom_ops + url_ops
if is_dynamic and elem not in defaults
]
kargs = [str(k) for k in defaults]
func_ast = _prefix_names("def _(): pass")
func_ast.name = "<builder:{!r}>".format(self.rule)
if hasattr(ast, "arg"): # py3
func_ast.args.args.append(ast.arg(".self", None))
for arg in pargs + kargs:
func_ast.args.args.append(ast.arg(arg, None))
func_ast.args.kwarg = ast.arg(".kwargs", None)
else:
func_ast.args.args.append(ast.Name(".self", ast.Param()))
for arg in pargs + kargs:
func_ast.args.args.append(ast.Name(arg, ast.Param()))
func_ast.args.kwarg = ".kwargs"
for _ in kargs:
func_ast.args.defaults.append(ast.Str(""))
func_ast.body = body
# use `ast.parse` instead of `ast.Module` for better portability
# python3.8 changes the signature of `ast.Module`
module = ast.parse("")
module.body = [func_ast]
# mark everything as on line 1, offset 0
# less error-prone than `ast.fix_missing_locations`
# bad line numbers cause an assert to fail in debug builds
for node in ast.walk(module):
if "lineno" in node._attributes:
node.lineno = 1
if "col_offset" in node._attributes:
node.col_offset = 0
code = compile(module, "<werkzeug routing>", "exec")
return self._get_func_code(code, func_ast.name)
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
try:
if append_unknown:
return self._build_unknown(**values)
else:
return self._build(**values)
except ValidationError:
return None
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return (
not self.build_only
and self.defaults
and self.endpoint == rule.endpoint
and self != rule
and self.arguments == rule.arguments
)
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if (
method is not None
and self.methods is not None
and method not in self.methods
):
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure that either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. rules with more static parts come first so the second argument
is the negative length of the number of the static weights.
3. we order by static weights, which is a combination of index
and length
4. The more complex rules come first so the next argument is the
negative length of the number of argument weights.
5. lastly we order by the actual argument weights.
:internal:
"""
return (
bool(self.arguments),
-len(self._static_weights),
self._static_weights,
-len(self._argument_weights),
self._argument_weights,
)
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u"<%s (unbound)>" % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u"<%s>" % data)
else:
tmp.append(data)
return u"<%s %s%s -> %s>" % (
self.__class__.__name__,
repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"),
self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"",
self.endpoint,
)
class BaseConverter(object):
"""Base class for all converters."""
regex = "[^/]+"
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
if isinstance(value, (bytes, bytearray)):
return _fast_url_quote(value)
return _fast_url_quote(text_type(value).encode(self.map.charset))
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = "{%d}" % int(length)
else:
if maxlength is None:
maxlength = ""
else:
maxlength = int(maxlength)
length = "{%s,%s}" % (int(minlength), maxlength)
self.regex = "[^/]" + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = "[^/].*?"
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
if signed:
self.regex = self.signed_regex
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value):
if self.fixed_digits and len(value) != self.fixed_digits:
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or (
self.max is not None and value > self.max
):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ("%%0%sd" % self.fixed_digits) % value
return str(value)
@property
def signed_regex(self):
return r"-?" + self.regex
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule("/page/<int:page>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/page/<int(signed=True):page>")
:param map: The :class:`Map`.
:param fixed_digits: The number of fixed digits in the URL. If you
set this to ``4`` for example, the rule will only match if the
URL looks like ``/0001/``. The default is variable length.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+"
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(self, map, min=None, max=None, signed=False):
NumberConverter.__init__(self, map, min=min, max=max, signed=signed)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = (
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
)
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
"default": UnicodeConverter,
"string": UnicodeConverter,
"any": AnyConverter,
"path": PathConverter,
"int": IntegerConverter,
"float": FloatConverter,
"uuid": UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(
self,
rules=None,
default_subdomain="",
charset="utf-8",
strict_slashes=True,
redirect_defaults=True,
converters=None,
sort_parameters=False,
sort_key=None,
encoding_errors="replace",
host_matching=False,
):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name,
script_name=None,
subdomain=None,
url_scheme="http",
default_method="GET",
path_info=None,
query_args=None,
):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(
self,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
wsgi_server_name = get_host(environ).lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
environ["wsgi.url_scheme"],
environ["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return "%s(%s)" % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(
self,
map,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args=None,
):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u"/"):
script_name += u"/"
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(
self, view_func, path_info=None, method=None, catch_http_exceptions=False
):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False, query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. They will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u"%s|%s" % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and "/%s" % path_info.lstrip("/"),
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestSlash:
raise RequestRedirect(
self.make_redirect_url(
url_quote(path_info, self.map.charset, safe="/:|+") + "/",
query_args,
)
)
except RequestAliasRedirect as e:
raise RequestRedirect(
self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args
)
)
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv, query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(
str(
url_join(
"%s://%s%s%s"
% (
self.url_scheme or "http",
self.subdomain + "." if self.subdomain else "",
self.server_name,
self.script_name,
),
redirect_url,
)
)
)
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, "ascii")
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, "ascii")
return (subdomain + u"." if subdomain else u"") + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ""
if query_args:
suffix = "?" + self.encode_query_args(query_args)
return str(
"%s://%s/%s%s"
% (
self.url_scheme or "http",
self.get_host(domain_part),
posixpath.join(
self.script_name[:-1].lstrip("/"), path_info.lstrip("/")
),
suffix,
)
)
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(
endpoint, values, method, append_unknown=False, force_external=True
)
if query_args:
url += "?" + self.encode_query_args(query_args)
assert url != path, "detected invalid alias setting. No canonical URL found"
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(
endpoint, values, self.default_method, append_unknown
)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(
self,
endpoint,
values=None,
method=None,
force_external=False,
append_unknown=True,
):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
temp_values = {}
# iteritems(dict, values) is like `values.lists()`
# without the call or `list()` coercion overhead.
for key, value in iteritems(dict, values):
if not value:
continue
if len(value) == 1: # flatten single item lists
value = value[0]
if value is None: # drop None
continue
temp_values[key] = value
values = temp_values
else:
# drop None
values = dict(i for i in iteritems(values) if i[1] is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
return str(
"%s//%s%s/%s"
% (
self.url_scheme + ":" if self.url_scheme else "",
host,
self.script_name[:-1],
path.lstrip("/"),
)
)
| apache-2.0 |
Eric-Zhong/odoo | addons/l10n_gt/__init__.py | 411 | 1113 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aonotas/chainer | tests/chainer_tests/links_tests/connection_tests/test_highway.py | 9 | 2427 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
class TestHighway(unittest.TestCase):
in_out_size = 3
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (5, self.in_out_size)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (5, self.in_out_size)).astype(numpy.float32)
self.link = links.Highway(
self.in_out_size, activate=functions.tanh)
Wh = self.link.plain.W.data
Wh[...] = numpy.random.uniform(-1, 1, Wh.shape)
bh = self.link.plain.b.data
bh[...] = numpy.random.uniform(-1, 1, bh.shape)
Wt = self.link.transform.W.data
Wt[...] = numpy.random.uniform(-1, 1, Wt.shape)
bt = self.link.transform.b.data
bt[...] = numpy.random.uniform(-1, 1, bt.shape)
self.link.cleargrads()
self.Wh = Wh.copy() # fixed on CPU
self.bh = bh.copy() # fixed on CPU
self.Wt = Wt.copy() # fixed on CPU
self.bt = bt.copy() # fixed on CPU
a = numpy.tanh(self.x.dot(Wh.T) + bh)
b = self.sigmoid(self.x.dot(Wt.T) + bt)
self.y = (a * b +
self.x * (numpy.ones_like(self.x) - b))
def sigmoid(self, x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad,
(self.link.plain.W, self.link.plain.b,
self.link.transform.W, self.link.transform.b),
eps=1e-2, atol=3.2e-3, rtol=1e-2)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| mit |
SofiaReis/django-cms | cms/test_utils/project/sampleapp/south_migrations/0001_initial.py | 46 | 3361 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'sampleapp_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('depth', self.gf('django.db.models.fields.PositiveIntegerField')()),
('numchild', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sampleapp.Category'], null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('description', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
))
db.send_create_signal(u'sampleapp', ['Category'])
# Adding model 'Picture'
db.create_table(u'sampleapp_picture', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sampleapp.Category'])),
))
db.send_create_signal(u'sampleapp', ['Picture'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table(u'sampleapp_category')
# Deleting model 'Picture'
db.delete_table(u'sampleapp_picture')
models = {
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'sampleapp.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sampleapp.Category']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'sampleapp.picture': {
'Meta': {'object_name': 'Picture'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sampleapp.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['sampleapp'] | bsd-3-clause |
ojengwa/talk | venv/lib/python2.7/site-packages/setuptools/site-patch.py | 720 | 2389 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| mit |
fiete201/qutebrowser | tests/unit/utils/test_urlmatch.py | 2 | 23551 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.urlmatch.
The tests are mostly inspired by Chromium's:
https://cs.chromium.org/chromium/src/extensions/common/url_pattern_unittest.cc
Currently not tested:
- Nested filesystem:// URLs as we don't have those.
- Unicode matching because QUrl doesn't like those URLs.
- Any other features we don't need, such as .GetAsString() or set operations.
"""
import string
import pytest
import hypothesis
import hypothesis.strategies as hst
from PyQt5.QtCore import QUrl
from qutebrowser.utils import urlmatch
@pytest.mark.parametrize('pattern, error', [
### Chromium: kMissingSchemeSeparator
## TEST(ExtensionURLPatternTest, ParseInvalid)
# ("http", "No scheme given"),
("http:", "Invalid port: Port is empty"),
("http:/", "Invalid port: Port is empty"),
("about://", "Pattern without path"),
("http:/bar", "Invalid port: Port is empty"),
### Chromium: kEmptyHost
## TEST(ExtensionURLPatternTest, ParseInvalid)
("http://", "Pattern without host"),
("http:///", "Pattern without host"),
("http://:1234/", "Pattern without host"),
("http://*./", "Pattern without host"),
## TEST(ExtensionURLPatternTest, IPv6Patterns)
("http://[]:8888/*", "Pattern without host"),
### Chromium: kEmptyPath
## TEST(ExtensionURLPatternTest, ParseInvalid)
# We deviate from Chromium and allow this for ease of use
# ("http://bar", "..."),
### Chromium: kInvalidHost
## TEST(ExtensionURLPatternTest, ParseInvalid)
("http://\0www/", "May not contain NUL byte"),
## TEST(ExtensionURLPatternTest, IPv6Patterns)
# No closing bracket (`]`).
("http://[2607:f8b0:4005:805::200e/*", "Invalid IPv6 URL"),
# Two closing brackets (`]]`).
pytest.param("http://[2607:f8b0:4005:805::200e]]/*", "Invalid IPv6 URL", marks=pytest.mark.xfail(reason="https://bugs.python.org/issue34360")),
# Two open brackets (`[[`).
("http://[[2607:f8b0:4005:805::200e]/*", r"""Expected '\]' to match '\[' in hostname; source was "\[2607:f8b0:4005:805::200e"; host = """""),
# Too few colons in the last chunk.
("http://[2607:f8b0:4005:805:200e]/*", 'Invalid IPv6 address; source was "2607:f8b0:4005:805:200e"; host = ""'),
# Non-hex piece.
("http://[2607:f8b0:4005:805:200e:12:bogus]/*", 'Invalid IPv6 address; source was "2607:f8b0:4005:805:200e:12:bogus"; host = ""'),
### Chromium: kInvalidHostWildcard
## TEST(ExtensionURLPatternTest, ParseInvalid)
("http://*foo/bar", "Invalid host wildcard"),
("http://foo.*.bar/baz", "Invalid host wildcard"),
("http://fo.*.ba:123/baz", "Invalid host wildcard"),
("http://foo.*/bar", "Invalid host wildcard"),
### Chromium: kInvalidPort
## TEST(ExtensionURLPatternTest, Ports)
("http://foo:/", "Invalid port: Port is empty"),
("http://*.foo:/", "Invalid port: Port is empty"),
("http://foo:com/", "Invalid port: .* 'com'"),
("http://foo:123456/", "Invalid port: Port out of range 0-65535"),
("http://foo:80:80/monkey", "Invalid port: .* '80:80'"),
("chrome://foo:1234/bar", "Ports are unsupported with chrome scheme"),
# No port specified, but port separator.
("http://[2607:f8b0:4005:805::200e]:/*", "Invalid port: Port is empty"),
### Additional tests
("http://[", "Invalid IPv6 URL"),
("http://[fc2e::bb88::edac]", 'Invalid IPv6 address; source was "fc2e::bb88::edac"; host = ""'),
("http://[fc2e:0e35:bb88::edac:fc2e:0e35:bb88:edac]", 'Invalid IPv6 address; source was "fc2e:0e35:bb88::edac:fc2e:0e35:bb88:edac"; host = ""'),
("http://[fc2e:0e35:bb88:af:edac:fc2e:0e35:bb88:edac]", 'Invalid IPv6 address; source was "fc2e:0e35:bb88:af:edac:fc2e:0e35:bb88:edac"; host = ""'),
("http://[127.0.0.1:fc2e::bb88:edac]", r'Invalid IPv6 address; source was "127\.0\.0\.1:fc2e::bb88:edac'),
("http://[fc2e::bb88", "Invalid IPv6 URL"),
("http://[fc2e:bb88:edac]", 'Invalid IPv6 address; source was "fc2e:bb88:edac"; host = ""'),
("http://[fc2e:bb88:edac::z]", 'Invalid IPv6 address; source was "fc2e:bb88:edac::z"; host = ""'),
("http://[fc2e:bb88:edac::2]:2a2", "Invalid port: .* '2a2'"),
("://", "Missing scheme"),
])
def test_invalid_patterns(pattern, error):
with pytest.raises(urlmatch.ParseError, match=error):
urlmatch.UrlPattern(pattern)
@pytest.mark.parametrize('host', ['.', ' ', ' .', '. ', '. .', '. . .', ' . '])
def test_whitespace_hosts(host):
"""Test that whitespace dot hosts are invalid.
This is a deviation from Chromium.
"""
template = 'https://{}/*'
url = QUrl(template.format(host))
assert not url.isValid()
with pytest.raises(urlmatch.ParseError,
match='Invalid host|Pattern without host'):
urlmatch.UrlPattern(template.format(host))
@pytest.mark.parametrize('pattern, port', [
## TEST(ExtensionURLPatternTest, Ports)
("http://foo:1234/", 1234),
("http://foo:1234/bar", 1234),
("http://*.foo:1234/", 1234),
("http://*.foo:1234/bar", 1234),
("http://*:1234/", 1234),
("http://*:*/", None),
("http://foo:*/", None),
("file://foo:1234/bar", None),
# Port-like strings in the path should not trigger a warning.
("http://*/:1234", None),
("http://*.foo/bar:1234", None),
("http://foo/bar:1234/path", None),
])
def test_port(pattern, port):
up = urlmatch.UrlPattern(pattern)
assert up._port == port
@pytest.mark.parametrize('pattern, path', [
("http://foo/", '/'),
("http://foo/*", None),
])
def test_parse_path(pattern, path):
up = urlmatch.UrlPattern(pattern)
assert up._path == path
@pytest.mark.parametrize('pattern, scheme, host, path', [
("http://example.com", 'http', 'example.com', None), # no path
("example.com/path", None, 'example.com', '/path'), # no scheme
("example.com", None, 'example.com', None), # no scheme and no path
("example.com:1234", None, 'example.com', None), # no scheme/path but port
("data:monkey", 'data', None, 'monkey'), # existing scheme
])
def test_lightweight_patterns(pattern, scheme, host, path):
"""Make sure we can leave off parts of a URL.
This is a deviation from Chromium to make patterns more user-friendly.
"""
up = urlmatch.UrlPattern(pattern)
assert up._scheme == scheme
assert up.host == host
assert up._path == path
class TestMatchAllPagesForGivenScheme:
"""Based on TEST(ExtensionURLPatternTest, Match1)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*/*")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up.host is None
assert up._match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('url, expected', [
("http://google.com", True),
("http://yahoo.com", True),
("http://google.com/foo", True),
("https://google.com", False),
("http://74.125.127.100/search", True),
# Additional tests
("http://google.com:80", True),
("http://google.com.", True),
("http://[fc2e:0e35:bb88::edac]", True),
("http://[fc2e:e35:bb88::edac]", True),
("http://[fc2e:e35:bb88::127.0.0.1]", True),
("http://[::1]/bar", True),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchAllDomains:
"""Based on TEST(ExtensionURLPatternTest, Match2)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("https://*/foo*")
def test_attrs(self, up):
assert up._scheme == 'https'
assert up.host is None
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*'
@pytest.mark.parametrize('url, expected', [
("https://google.com/foo", True),
("https://google.com/foobar", True),
("http://google.com/foo", False),
("https://google.com/", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchSubdomains:
"""Based on TEST(ExtensionURLPatternTest, Match3)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*.google.com/foo*bar")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up.host == 'google.com'
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*bar'
@pytest.mark.parametrize('url, expected', [
("http://google.com/foobar", True),
# FIXME The ?bar seems to be treated as path by GURL but as query by
# QUrl.
# ("http://www.google.com/foo?bar", True),
("http://monkey.images.google.com/foooobar", True),
("http://yahoo.com/foobar", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchGlobEscaping:
"""Based on TEST(ExtensionURLPatternTest, Match5)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern(r"file:///foo-bar\*baz")
def test_attrs(self, up):
assert up._scheme == 'file'
assert up.host is None
assert not up._match_subdomains
assert not up._match_all
assert up._path == r'/foo-bar\*baz'
@pytest.mark.parametrize('url, expected', [
## TEST(ExtensionURLPatternTest, Match5)
# We use - instead of ? so it doesn't get treated as query
(r"file:///foo-bar\hellobaz", True),
(r"file:///fooXbar\hellobaz", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchIpAddresses:
"""Based on TEST(ExtensionURLPatternTest, Match6/7)."""
@pytest.mark.parametrize('pattern, host, match_subdomains', [
("http://127.0.0.1/*", "127.0.0.1", False),
("http://*.0.0.1/*", "0.0.1", True),
## Others
("http://[::1]/*", "::1", False),
("http://[0::1]/*", "::1", False),
("http://[::01]/*", "::1", False),
("http://[0:0:0:0:20::1]/*", "::20:0:0:1", False),
])
def test_attrs(self, pattern, host, match_subdomains):
up = urlmatch.UrlPattern(pattern)
assert up._scheme == 'http'
assert up.host == host
assert up._match_subdomains == match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('pattern, expected', [
("http://127.0.0.1/*", True),
# No subdomain matching is done with IPs
("http://*.0.0.1/*", False),
])
def test_urls(self, pattern, expected):
up = urlmatch.UrlPattern(pattern)
assert up.matches(QUrl("http://127.0.0.1")) == expected
## FIXME Missing TEST(ExtensionURLPatternTest, Match8) (unicode)?
class TestMatchChromeUrls:
"""Based on TEST(ExtensionURLPatternTest, Match9/10)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("chrome://favicon/*")
def test_attrs(self, up):
assert up._scheme == 'chrome'
assert up.host == 'favicon'
assert not up._match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('url, expected', [
("chrome://favicon/http://google.com", True),
("chrome://favicon/https://google.com", True),
("chrome://history", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchAnything:
"""Based on TEST(ExtensionURLPatternTest, Match10/11)."""
@pytest.fixture(params=['*://*/*', '*://*:*/*', '<all_urls>', '*://*'])
def up(self, request):
return urlmatch.UrlPattern(request.param)
def test_attrs_common(self, up):
assert up._scheme is None
assert up.host is None
assert up._path is None
def test_attrs_wildcard(self):
up = urlmatch.UrlPattern('*://*/*')
assert up._match_subdomains
assert not up._match_all
def test_attrs_all(self):
up = urlmatch.UrlPattern('<all_urls>')
assert not up._match_subdomains
assert up._match_all
@pytest.mark.parametrize('url', [
"http://127.0.0.1",
# We deviate from Chromium as we allow other schemes as well
"chrome://favicon/http://google.com",
"file:///foo/bar",
"file://localhost/foo/bar",
"qute://version",
"about:blank",
"data:text/html;charset=utf-8,<html>asdf</html>",
"javascript:",
])
def test_urls(self, up, url):
assert up.matches(QUrl(url))
@pytest.mark.parametrize('pattern, url, expected', [
("about:*", "about:blank", True),
("about:blank", "about:blank", True),
("about:*", "about:version", True),
("data:*", "data:monkey", True),
("javascript:*", "javascript:atemyhomework", True),
("data:*", "about:blank", False),
])
def test_special_schemes(pattern, url, expected):
"""Based on TEST(ExtensionURLPatternTest, Match13)."""
assert urlmatch.UrlPattern(pattern).matches(QUrl(url)) == expected
class TestFileScheme:
"""Based on TEST(ExtensionURLPatternTest, Match14/15/16)."""
@pytest.fixture(params=[
'file:///foo*',
'file://foo*',
# FIXME This doesn't pass all tests
pytest.param('file://localhost/foo*', marks=pytest.mark.skip(
reason="We're not handling this correctly in all cases"))
])
def up(self, request):
return urlmatch.UrlPattern(request.param)
def test_attrs(self, up):
assert up._scheme == 'file'
assert up.host is None
assert not up._match_subdomains
assert not up._match_all
assert up._path == '/foo*'
@pytest.mark.parametrize('url, expected', [
("file://foo", False),
("file://foobar", False),
("file:///foo", True),
("file:///foobar", True),
("file://localhost/foo", True),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchSpecificPort:
"""Based on TEST(ExtensionURLPatternTest, Match17)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://www.example.com:80/foo")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up.host == 'www.example.com'
assert not up._match_subdomains
assert not up._match_all
assert up._path == '/foo'
assert up._port == 80
@pytest.mark.parametrize('url, expected', [
("http://www.example.com:80/foo", True),
("http://www.example.com/foo", True),
("http://www.example.com:8080/foo", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestExplicitPortWildcard:
"""Based on TEST(ExtensionURLPatternTest, Match18)."""
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://www.example.com:*/foo")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up.host == 'www.example.com'
assert not up._match_subdomains
assert not up._match_all
assert up._path == '/foo'
assert up._port is None
@pytest.mark.parametrize('url, expected', [
("http://www.example.com:80/foo", True),
("http://www.example.com/foo", True),
("http://www.example.com:8080/foo", True),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
def test_ignore_missing_slashes():
"""Based on TEST(ExtensionURLPatternTest, IgnoreMissingBackslashes)."""
pattern1 = urlmatch.UrlPattern("http://www.example.com/example")
pattern2 = urlmatch.UrlPattern("http://www.example.com/example/*")
url1 = QUrl('http://www.example.com/example')
url2 = QUrl('http://www.example.com/example/')
# Same patterns should match same URLs.
assert pattern1.matches(url1)
assert pattern2.matches(url1)
# The not terminated path should match the terminated pattern.
assert pattern2.matches(url1)
# The terminated path however should not match the unterminated pattern.
assert not pattern1.matches(url2)
def test_trailing_slash():
"""Contrary to Chromium, we allow to leave off a trailing slash."""
url = QUrl('http://www.example.com/')
pattern = urlmatch.UrlPattern('http://www.example.com')
assert pattern.matches(url)
@pytest.mark.parametrize('pattern', ['*://example.com/*',
'*://example.com./*'])
@pytest.mark.parametrize('url', ['http://example.com/',
'http://example.com./'])
def test_trailing_dot_domain(pattern, url):
"""Both patterns should match trailing dot and non trailing dot domains.
More information about this not obvious behavior can be found in [1].
RFC 1738 [2] specifies clearly that the <host> part of a URL is supposed to
contain a fully qualified domain name:
3.1. Common Internet Scheme Syntax
//<user>:<password>@<host>:<port>/<url-path>
host
The fully qualified domain name of a network host
[1] http://www.dns-sd.org./TrailingDotsInDomainNames.html
[2] http://www.ietf.org/rfc/rfc1738.txt
"""
assert urlmatch.UrlPattern(pattern).matches(QUrl(url))
class TestUncanonicalizedUrl:
"""Test that URLPattern properly canonicalizes uncanonicalized hosts.
Equivalent to Chromium's TEST(ExtensionURLPatternTest, UncanonicalizedUrl).
"""
@pytest.mark.parametrize('url', [
'https://google.com',
'https://maps.google.com',
])
def test_lowercase(self, url):
"""Simple case: canonicalization should lowercase the host.
This is important, since gOoGle.com would never be matched in
practice.
"""
pattern = urlmatch.UrlPattern('*://*.gOoGle.com/*')
assert pattern.matches(QUrl(url))
@pytest.mark.parametrize('url', [
'https://ɡoogle.com',
'https://xn--oogle-qmc.com/',
])
def test_punycode(self, url):
"""Trickier case: internationalization with UTF8 characters.
The first 'g' isn't actually a 'g'.
"""
pattern = urlmatch.UrlPattern('https://*.ɡoogle.com/*')
assert pattern.matches(QUrl(url))
@pytest.mark.xfail(reason="Gets accepted by urllib.parse")
def test_failing_canonicalization(self):
"""Sometimes, canonicalization can fail.
Such as here, where we have invalid unicode characters. In that case,
URLPattern parsing should also fail.
This fails in Chromium, but Python's urllib.parse.urlparse happily
tries to parse it...
"""
with pytest.raises(urlmatch.ParseError):
urlmatch.UrlPattern('https://\xef\xb7\x90zyx.com/*')
@pytest.mark.xfail(reason="We return the original string")
@pytest.mark.parametrize('pattern_str, string, host', [
('*://*.gOoGle.com/*',
'*://*.google.com/*',
'google.com'),
('https://*.ɡoogle.com/*',
'https://*.xn--oogle-qmc.com/*',
'xn--oogle-qmc.com'),
])
def test_str(self, pattern_str, string, host):
"""Test that str() and .host get the canonicalized string.
Contrary to Chromium, we return the original values here.
"""
pattern = urlmatch.UrlPattern(pattern_str)
assert str(pattern) == string
assert pattern.host == host
def test_urlpattern_benchmark(benchmark):
url = QUrl('https://www.example.com/barfoobar')
def run():
up = urlmatch.UrlPattern('https://*.example.com/*foo*')
up.matches(url)
benchmark(run)
URL_TEXT = hst.text(alphabet=string.ascii_letters)
@hypothesis.given(pattern=hst.builds(
lambda *a: ''.join(a),
# Scheme
hst.sampled_from(['*', 'http', 'file']),
# Separator
hst.sampled_from([':', '://']),
# Host
hst.one_of(hst.just('*'),
hst.builds(lambda *a: ''.join(a), hst.just('*.'), URL_TEXT),
URL_TEXT),
# Port
hst.one_of(hst.just(''),
hst.builds(lambda *a: ''.join(a), hst.just(':'),
hst.integers(min_value=0,
max_value=65535).map(str))),
# Path
hst.one_of(hst.just(''),
hst.builds(lambda *a: ''.join(a), hst.just('/'), URL_TEXT))
))
def test_urlpattern_hypothesis(pattern):
try:
up = urlmatch.UrlPattern(pattern)
except urlmatch.ParseError:
return
up.matches(QUrl('https://www.example.com/'))
@pytest.mark.parametrize('text1, text2, equal', [
# schemes
("http://en.google.com/blah/*/foo",
"https://en.google.com/blah/*/foo",
False),
("https://en.google.com/blah/*/foo",
"https://en.google.com/blah/*/foo",
True),
("https://en.google.com/blah/*/foo",
"ftp://en.google.com/blah/*/foo",
False),
# subdomains
("https://en.google.com/blah/*/foo",
"https://fr.google.com/blah/*/foo",
False),
("https://www.google.com/blah/*/foo",
"https://*.google.com/blah/*/foo",
False),
("https://*.google.com/blah/*/foo",
"https://*.google.com/blah/*/foo",
True),
# domains
("http://en.example.com/blah/*/foo",
"http://en.google.com/blah/*/foo",
False),
# ports
("http://en.google.com:8000/blah/*/foo",
"http://en.google.com/blah/*/foo",
False),
("http://fr.google.com:8000/blah/*/foo",
"http://fr.google.com:8000/blah/*/foo",
True),
("http://en.google.com:8000/blah/*/foo",
"http://en.google.com:8080/blah/*/foo",
False),
# paths
("http://en.google.com/blah/*/foo",
"http://en.google.com/blah/*",
False),
("http://en.google.com/*",
"http://en.google.com/",
False),
("http://en.google.com/*",
"http://en.google.com/*",
True),
# all_urls
("<all_urls>",
"<all_urls>",
True),
("<all_urls>",
"http://*/*",
False)
])
def test_equal(text1, text2, equal):
pat1 = urlmatch.UrlPattern(text1)
pat2 = urlmatch.UrlPattern(text2)
assert (pat1 == pat2) == equal
assert (hash(pat1) == hash(pat2)) == equal
def test_equal_string():
assert urlmatch.UrlPattern("<all_urls>") != '<all_urls>'
def test_repr():
pat = urlmatch.UrlPattern('https://www.example.com/')
expected = ("qutebrowser.utils.urlmatch.UrlPattern("
"pattern='https://www.example.com/')")
assert repr(pat) == expected
def test_str():
text = 'https://www.example.com/'
pat = urlmatch.UrlPattern(text)
assert str(pat) == text
| gpl-3.0 |
abhiQmar/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_serializer.py | 59 | 4791 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import unittest
from cStringIO import StringIO
import pytest
from .. import parser, serializer
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.serializer = serializer.ManifestSerializer()
self.parser = parser.Parser()
def serialize(self, input_str):
return self.serializer.serialize(self.parser.parse(input_str))
def compare(self, input_str, expected=None):
if expected is None:
expected = input_str
expected = expected.encode("utf8")
actual = self.serialize(input_str)
self.assertEquals(actual, expected)
def test_0(self):
self.compare("""key: value
[Heading 1]
other_key: other_value
""")
def test_1(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
""")
def test_2(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
fallback_value
""")
def test_3(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == 1: other_value
fallback_value
""")
def test_4(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "1": other_value
fallback_value
""")
def test_5(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[1]: other_value
fallback_value
""")
def test_6(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[c]: other_value
fallback_value
""")
def test_7(self):
self.compare("""key: value
[Heading 1]
other_key:
if (a or b) and c: other_value
fallback_value
""",
"""key: value
[Heading 1]
other_key:
if a or b and c: other_value
fallback_value
""")
def test_8(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or (b and c): other_value
fallback_value
""")
def test_9(self):
self.compare("""key: value
[Heading 1]
other_key:
if not (a and b): other_value
fallback_value
""")
def test_10(self):
self.compare("""key: value
[Heading 1]
some_key: some_value
[Heading 2]
other_key: other_value
""")
def test_11(self):
self.compare("""key:
if not a and b and c and d: true
""")
def test_12(self):
self.compare("""[Heading 1]
key: [a:1, b:2]
""")
def test_13(self):
self.compare("""key: [a:1, "b:#"]
""")
def test_14(self):
self.compare("""key: [","]
""")
def test_15(self):
self.compare("""key: ,
""")
def test_16(self):
self.compare("""key: ["]", b]
""")
def test_17(self):
self.compare("""key: ]
""")
def test_18(self):
self.compare("""key: \]
""", """key: ]
""")
def test_escape_0(self):
self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
""")
def test_escape_1(self):
self.compare(r"""k\x00: \x12A\x45""",
r"""k\x00: \x12AE
""")
def test_escape_2(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_3(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_4(self):
self.compare(r"""key: '\u1234A\uABc6'""",
u"""key: \u1234A\uabc6
""")
def test_escape_5(self):
self.compare(r"""key: [\u1234A\uABc6]""",
u"""key: [\u1234A\uabc6]
""")
def test_escape_6(self):
self.compare(r"""key: [\u1234A\uABc6\,]""",
u"""key: ["\u1234A\uabc6,"]
""")
def test_escape_7(self):
self.compare(r"""key: [\,\]\#]""",
r"""key: [",]#"]
""")
def test_escape_8(self):
self.compare(r"""key: \#""",
r"""key: "#"
""")
@pytest.mark.xfail(sys.maxunicode == 0xFFFF, reason="narrow unicode")
def test_escape_9(self):
self.compare(r"""key: \U10FFFFabc""",
u"""key: \U0010FFFFabc
""")
def test_escape_10(self):
self.compare(r"""key: \u10FFab""",
u"""key: \u10FFab
""")
def test_escape_11(self):
self.compare(r"""key: \\ab
""")
def test_atom_1(self):
self.compare(r"""key: @True
""")
def test_atom_2(self):
self.compare(r"""key: @False
""")
def test_atom_3(self):
self.compare(r"""key: @Reset
""")
def test_atom_4(self):
self.compare(r"""key: [a, @Reset, b]
""")
| mpl-2.0 |
incaser/odoo-odoo | addons/portal/__init__.py | 382 | 1140 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import portal
import mail_thread
import mail_mail
import mail_message
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zetaops/ulakbus | tests/test_ders_ve_sinav_programi_hazirla.py | 1 | 9633 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from pyoko.db.adapter.db_riak import BlockSave
from ulakbus.models import User, DersEtkinligi, SinavEtkinligi, Donem, Room
from zengine.lib.test_utils import BaseTestCase
import time
class TestCase(BaseTestCase):
def test_ders_programi_yap(self):
usr = User.objects.get(username='ders_programi_koordinatoru_1')
unit = usr.role_set[0].role.unit()
ders_etkinligi = DersEtkinligi.objects.filter(bolum=unit, donem=Donem.guncel_donem())
published_true = ders_etkinligi.filter(published=True)
with BlockSave(DersEtkinligi, query_dict={'published': False}):
for pt in published_true:
pt.published = False
pt.save()
published_false_count = DersEtkinligi.objects.filter(bolum=unit, donem=Donem.guncel_donem(),
published=False).count()
self.prepare_client("/ders_programi_hazirla", user=usr)
resp = self.client.post()
assert resp.json['msgbox']['title'] == "Yayınlanmamış Program Var!"
self.client.post(form={'devam': 1})
self.client.post(cmd='incele')
for i in range(2):
if i == 0:
# Derslik Arama Kayit Yok
ara_form = {'arama_button': 1,
'arama_sec': 1,
'arama_text': "C4034"}
title = "Kayıt Bulunamadı"
else:
# Ogretim Elemani Arama Kayit Yok
ara_form = {'arama_button': 1,
'arama_sec': 2,
'arama_text': "Baba Zula"}
title = "Kayıt Bulunamadı"
resp = self.client.post(form=ara_form)
assert resp.json['msgbox']['title'] == title
self.client.post(form={'devam': 1})
if i == 0:
# Derslik Arama Kayit Var
ara_form = {'arama_button': 1,
'arama_sec': 1,
'arama_text': "C50610"}
title = "C50610 C608 - CAD Laboratuarları 38 - Detaylı Zaman Tablosu"
else:
# Ogretim Elemani Arama Kayit Var
ara_form = {'arama_button': 1,
'arama_sec': 2,
'arama_text': "İsmet Tarhan"}
title = "İsmet Tarhan - Detaylı Zaman Tablosu"
resp = self.client.post(form=ara_form)
assert resp.json['objects'][1]['title'] == title
self.client.post(form={'tamamla': 1})
resp = self.client.post(cmd='vazgec')
assert 'incele' and 'yayinla' in resp.json['forms']['model'].keys()
resp = self.client.post(cmd='bitir')
assert resp.json['msgbox']['title'] == "Program Yayınlandı!"
time.sleep(1)
resp = self.client.post()
assert resp.json['msgbox']['title'] == "Yayınlanmış Program Var!"
published_true = DersEtkinligi.objects.filter(bolum=unit, published=True, donem=Donem.guncel_donem())
assert published_false_count == len(published_true)
self.client.set_path("/derslik_ders_programlari")
resp = self.client.post()
derslikler = [etkinlik.room for etkinlik in published_true]
assert len(resp.json['forms']['form'][2]['titleMap']) == len(derslikler)
resp = self.client.post(form={"ileri": 1, "derslik": "3rPQ4bB2lDtxdCE41RBoNqZM19f"})
num_of_ders_etkinlikleri = DersEtkinligi.objects.filter(room_id="3rPQ4bB2lDtxdCE41RBoNqZM19f", published=True,
donem=Donem.guncel_donem())
count_of_ders_etkinlikleri = 0
for i in range(1, len(resp.json['objects'])):
for day in resp.json['objects'][i]['fields']:
if resp.json['objects'][i]['fields'][day]:
count_of_ders_etkinlikleri += 1
assert len(num_of_ders_etkinlikleri) == count_of_ders_etkinlikleri
with BlockSave(DersEtkinligi, query_dict={'published': False}):
for de in published_true:
de.published = False
de.save()
assert published_false_count == DersEtkinligi.objects.filter(bolum=unit, published=False, donem=Donem.guncel_donem()).count()
def test_sinav_programi_yap(self):
"""
Derslik Sınav Programları iş akışı aşağıdaki adımlardan oluşur.
İlk adımda sınav etkinlikleri kontrol edilir.
Yayınlanlanmamış sınav etkinlikleri varsa;
Bilgi ver wf adımına geçer. Bu adımda yayınlanmamış sınavların
olduğuna dair bilgi mesajı ekrana basılır.
İlk adımda derslik seçilir.
Veritabanından çekilen derslik sayısı ile sunucudan dönen derslik sayısı karşılaştırılıp test edilir.
İkinci adımda seçilen dersliğe ait sınav programı getirilir.
Veritabanından çekilen sınav etkinlikleri sayısı ile sunucudan dönen sınav etkinlikleri sayısı
karşılaştırılıp test edilir.
"""
usr = User.objects.get(username='ders_programi_koordinatoru_1')
unit = usr.role_set[0].role.unit()
sinav_etkinligi = SinavEtkinligi.objects.filter(bolum=unit, donem=Donem.guncel_donem())
published_true = sinav_etkinligi.filter(published=True)
with BlockSave(SinavEtkinligi, query_dict={'published': False}):
for pt in published_true:
pt.published = False
pt.save()
published_false_count = SinavEtkinligi.objects.filter(bolum=unit, donem=Donem.guncel_donem(),
published=False).count()
self.prepare_client('/sinav_programi_hazirla', user=usr)
resp = self.client.post()
assert resp.json['msgbox']['title'] == "Yayınlanmamış Program Var!"
self.client.set_path("/derslik_sinav_programlari")
resp = self.client.post()
assert "msgbox" in resp.json
self.client.set_path('/sinav_programi_hazirla')
self.client.post()
self.client.post(form={'devam': 1})
self.client.post(cmd='incele')
for i in range(2):
if i == 0:
# Derslik Arama Kayit Yok
ara_form = {'arama_button': 1,
'arama_sec': 1,
'arama_text': "C4034"}
title = "Kayıt Bulunamadı"
else:
# Ogretim Elemani Arama Kayit Yok
ara_form = {'arama_button': 1,
'arama_sec': 2,
'arama_text': "Baba Zula"}
title = "Kayıt Bulunamadı"
resp = self.client.post(form=ara_form)
assert resp.json['msgbox']['title'] == title
self.client.post(form={'devam': 1})
if i == 0:
# Derslik Arama Kayit Var
ara_form = {'arama_button': 1,
'arama_sec': 1,
'arama_text': "M50616"}
title = "M50616 C402 - Theatre 44 - Detaylı Zaman Tablosu"
else:
# Ogretim Elemani Arama Kayit Var
ara_form = {'arama_button': 1,
'arama_sec': 2,
'arama_text': "İsmet Tarhan"}
title = "İsmet Tarhan - Detaylı Zaman Tablosu"
resp = self.client.post(form=ara_form)
assert resp.json['objects'][1]['title'] == title
self.client.post(form={'tamamla': 1})
resp = self.client.post(cmd='vazgec')
assert 'incele' and 'yayinla' in resp.json['forms']['model'].keys()
resp = self.client.post(cmd='bitir')
assert resp.json['msgbox']['title'] == "Program Yayınlandı!"
time.sleep(1)
resp = self.client.post()
assert resp.json['msgbox']['title'] == "Yayınlanmış Program Var!"
published_true = SinavEtkinligi.objects.filter(bolum=unit, published=True, donem=Donem.guncel_donem())
assert published_false_count == len(published_true)
self.client.set_path("derslik_sinav_programlari")
resp = self.client.post()
derslikler = [s_yerleri.room for s_etkinlik in published_true
for s_yerleri in s_etkinlik.SinavYerleri if s_etkinlik.SinavYerleri]
assert len(derslikler) == len(resp.json['forms']['form'][2]['titleMap'])
resp = self.client.post(form={"ileri": 1, "derslik": 'Jju1xbrWBsMoFb9fPyNpLnwPuW9'})
room = Room.objects.get("Jju1xbrWBsMoFb9fPyNpLnwPuW9")
num_of_sinav_etkinlikleri = [s for s in SinavEtkinligi.objects if room in s.SinavYerleri]
count_of_sinav_etkinlikleri = 0
for i in range(1, len(resp.json['objects'])):
for day in resp.json['objects'][i]['fields']:
if resp.json['objects'][i]['fields'][day]:
count_of_sinav_etkinlikleri += 1
assert len(num_of_sinav_etkinlikleri) == count_of_sinav_etkinlikleri
with BlockSave(SinavEtkinligi, query_dict={'published': False}):
for se in published_true:
se.published = False
se.save()
assert published_false_count == SinavEtkinligi.objects.filter(bolum=unit, published=False, donem=Donem.guncel_donem()).count()
| gpl-3.0 |
plone/plone.server | src/plone.server/plone/server/api/content.py | 1 | 14057 | # -*- coding: utf-8 -*-
from aiohttp.web_exceptions import HTTPMethodNotAllowed
from aiohttp.web_exceptions import HTTPNotFound
from aiohttp.web_exceptions import HTTPUnauthorized
from dateutil.tz import tzlocal
from plone.server import app_settings
from plone.server import configure
from plone.server import _
from plone.server.api.service import Service
from plone.server.browser import ErrorResponse
from plone.server.browser import Response
from plone.server.content import create_content_in_container
from plone.server.events import notify
from plone.server.events import ObjectFinallyCreatedEvent
from plone.server.events import ObjectFinallyDeletedEvent
from plone.server.events import ObjectFinallyModifiedEvent
from plone.server.events import ObjectFinallyVisitedEvent
from plone.server.events import ObjectPermissionsModifiedEvent
from plone.server.events import ObjectPermissionsViewEvent
from plone.server.exceptions import ConflictIdOnContainer
from plone.server.exceptions import PreconditionFailed
from plone.server.interfaces import IAbsoluteURL
from plone.server.interfaces import IResource
from plone.server.json.exceptions import DeserializationError
from plone.server.interfaces import IResourceDeserializeFromJson
from plone.server.interfaces import IResourceSerializeToJson
from plone.server.utils import get_authenticated_user_id
from plone.server.utils import iter_parents
from plone.server.auth import settings_for_object
from zope.component import getMultiAdapter
from zope.component import queryMultiAdapter
from plone.server.auth.role import local_roles
from plone.server.interfaces import IPrincipalPermissionMap
from plone.server.interfaces import IPrincipalRoleManager
from plone.server.interfaces import IRolePermissionManager
from plone.server.interfaces import IPrincipalPermissionManager
from plone.server.interfaces import IPrincipalRoleMap
from plone.server.interfaces import IRolePermissionMap
from zope.security.interfaces import IInteraction
_zone = tzlocal()
@configure.service(context=IResource, method='GET', permission='plone.ViewContent')
class DefaultGET(Service):
async def __call__(self):
serializer = getMultiAdapter(
(self.context, self.request),
IResourceSerializeToJson)
result = serializer()
await notify(ObjectFinallyVisitedEvent(self.context))
return result
@configure.service(context=IResource, method='POST', permission='plone.AddContent')
class DefaultPOST(Service):
async def __call__(self):
"""To create a content."""
data = await self.get_data()
type_ = data.get('@type', None)
id_ = data.get('id', None)
behaviors = data.get('@behaviors', None)
if '__acl__' in data:
# we don't allow to change the permisions on this patch
del data['__acl__']
if not type_:
return ErrorResponse(
'RequiredParam',
_("Property '@type' is required"))
# Generate a temporary id if the id is not given
if not id_:
new_id = None
else:
new_id = id_
user = get_authenticated_user_id(self.request)
# Create object
try:
obj = create_content_in_container(
self.context, type_, new_id, id=new_id, creators=(user,),
contributors=(user,))
except PreconditionFailed as e:
return ErrorResponse(
'PreconditionFailed',
str(e),
status=412)
except ConflictIdOnContainer as e:
return ErrorResponse(
'ConflictId',
str(e),
status=409)
except ValueError as e:
return ErrorResponse(
'CreatingObject',
str(e),
status=400)
for behavior in behaviors or ():
obj.add_behavior(behavior)
# Update fields
deserializer = queryMultiAdapter((obj, self.request),
IResourceDeserializeFromJson)
if deserializer is None:
return ErrorResponse(
'DeserializationError',
'Cannot deserialize type {}'.format(obj.portal_type),
status=501)
try:
await deserializer(data, validate_all=True)
except DeserializationError as e:
return ErrorResponse(
'DeserializationError',
str(e),
exc=e,
status=400)
# Local Roles assign owner as the creator user
roleperm = IPrincipalRoleManager(obj)
roleperm.assign_role_to_principal(
'plone.Owner',
user)
await notify(ObjectFinallyCreatedEvent(obj, data))
absolute_url = queryMultiAdapter((obj, self.request), IAbsoluteURL)
headers = {
'Access-Control-Expose-Headers': 'Location',
'Location': absolute_url()
}
serializer = queryMultiAdapter(
(obj, self.request),
IResourceSerializeToJson
)
return Response(response=serializer(), headers=headers, status=201)
@configure.service(context=IResource, method='PUT', permission='plone.ModifyContent')
class DefaultPUT(Service):
pass
@configure.service(context=IResource, method='PATCH', permission='plone.ModifyContent')
class DefaultPATCH(Service):
async def __call__(self):
data = await self.get_data()
behaviors = data.get('@behaviors', None)
for behavior in behaviors or ():
self.context.add_behavior(behavior)
deserializer = queryMultiAdapter((self.context, self.request),
IResourceDeserializeFromJson)
if deserializer is None:
return ErrorResponse(
'DeserializationError',
'Cannot deserialize type {}'.format(self.context.portal_type),
status=501)
try:
await deserializer(data)
except DeserializationError as e:
return ErrorResponse(
'DeserializationError',
str(e),
status=400)
await notify(ObjectFinallyModifiedEvent(self.context, data))
return Response(response={}, status=204)
@configure.service(context=IResource, method='GET', permission='plone.SeePermissions',
name='@sharing')
async def sharing_get(context, request):
roleperm = IRolePermissionMap(context)
prinperm = IPrincipalPermissionMap(context)
prinrole = IPrincipalRoleMap(context)
result = {
'local': {},
'inherit': []
}
result['local']['roleperm'] = roleperm._bycol
result['local']['prinperm'] = prinperm._bycol
result['local']['prinrole'] = prinrole._bycol
for obj in iter_parents(context):
roleperm = IRolePermissionMap(obj)
prinperm = IPrincipalPermissionMap(obj)
prinrole = IPrincipalRoleMap(obj)
result['inherit'].append({
'@id': IAbsoluteURL(obj, request)(),
'roleperm': roleperm._bycol,
'prinperm': prinperm._bycol,
'prinrole': prinrole._bycol,
})
await notify(ObjectPermissionsViewEvent(context))
return result
@configure.service(context=IResource, method='GET', permission='plone.SeePermissions',
name='@all_permissions')
async def all_permissions(context, request):
result = settings_for_object(context)
await notify(ObjectPermissionsViewEvent(context))
return result
PermissionMap = {
'prinrole': {
'Allow': 'assign_role_to_principal',
'Deny': 'remove_role_from_principal',
'AllowSingle': 'assign_role_to_principal_no_inherit',
'Unset': 'unset_role_for_principal'
},
'roleperm': {
'Allow': 'grant_permission_to_role',
'Deny': 'deny_permission_to_role',
'AllowSingle': 'grant_permission_to_role_no_inherit',
'Unset': 'unset_permission_from_role'
},
'prinperm': {
'Allow': 'grant_permission_to_principal',
'Deny': 'deny_permission_to_principal',
'AllowSingle': 'grant_permission_to_principal_no_inherit',
'Unset': 'unset_permission_for_principal'
}
}
@configure.service(context=IResource, method='POST', permission='plone.ChangePermissions',
name='@sharing')
async def sharing_post(context, request):
"""Change permissions"""
lroles = local_roles()
data = await request.json()
if 'prinrole' not in data and \
'roleperm' not in data and \
'prinperm' not in data:
raise AttributeError('prinrole or roleperm or prinperm missing')
if 'type' not in data:
raise AttributeError('type missing')
setting = data['type']
# we need to check if we are changing any info
changed = False
if 'prinrole' in data:
if setting not in PermissionMap['prinrole']:
raise AttributeError('Invalid Type')
manager = IPrincipalRoleManager(context)
operation = PermissionMap['prinrole'][setting]
func = getattr(manager, operation)
for user, roles in data['prinrole'].items():
for role in roles:
if role in lroles:
changed = True
func(role, user)
else:
raise KeyError('No valid local role')
if 'prinperm' in data:
if setting not in PermissionMap['prinperm']:
raise AttributeError('Invalid Type')
manager = IPrincipalPermissionManager(context)
operation = PermissionMap['prinperm'][setting]
func = getattr(manager, operation)
for user, permissions in data['prinperm'].items():
for permision in permissions:
changed = True
func(permision, user)
if 'roleperm' in data:
if setting not in PermissionMap['roleperm']:
raise AttributeError('Invalid Type')
manager = IRolePermissionManager(context)
operation = PermissionMap['roleperm'][setting]
func = getattr(manager, operation)
for role, permissions in data['roleperm'].items():
for permission in permissions:
changed = True
func(permission, role)
context._p_changed = 1
if changed:
await notify(ObjectPermissionsModifiedEvent(context, data))
@configure.service(
context=IResource, method='GET', permission='plone.AccessContent',
name='@canido')
async def can_i_do(context, request):
if 'permission' not in request.GET:
raise TypeError('No permission param')
permission = request.GET['permission']
return IInteraction(request).check_permission(permission, context)
@configure.service(context=IResource, method='DELETE', permission='plone.DeleteContent')
class DefaultDELETE(Service):
async def __call__(self):
content_id = self.context.id
del self.context.__parent__[content_id]
await notify(ObjectFinallyDeletedEvent(self.context))
@configure.service(context=IResource, method='OPTIONS', permission='plone.AccessPreflight')
class DefaultOPTIONS(Service):
"""Preflight view for Cors support on DX content."""
def getRequestMethod(self): # noqa
"""Get the requested method."""
return self.request.headers.get(
'Access-Control-Request-Method', None)
async def preflight(self):
"""We need to check if there is cors enabled and is valid."""
headers = {}
if not app_settings['cors']:
return {}
origin = self.request.headers.get('Origin', None)
if not origin:
raise HTTPNotFound(text='Origin this header is mandatory')
requested_method = self.getRequestMethod()
if not requested_method:
raise HTTPNotFound(
text='Access-Control-Request-Method this header is mandatory')
requested_headers = (
self.request.headers.get('Access-Control-Request-Headers', ()))
if requested_headers:
requested_headers = map(str.strip, requested_headers.split(', '))
requested_method = requested_method.upper()
allowed_methods = app_settings['cors']['allow_methods']
if requested_method not in allowed_methods:
raise HTTPMethodNotAllowed(
requested_method, allowed_methods,
text='Access-Control-Request-Method Method not allowed')
supported_headers = app_settings['cors']['allow_headers']
if '*' not in supported_headers and requested_headers:
supported_headers = [s.lower() for s in supported_headers]
for h in requested_headers:
if not h.lower() in supported_headers:
raise HTTPUnauthorized(
text='Access-Control-Request-Headers Header %s not allowed' % h)
supported_headers = [] if supported_headers is None else supported_headers
requested_headers = [] if requested_headers is None else requested_headers
supported_headers = set(supported_headers) | set(requested_headers)
headers['Access-Control-Allow-Headers'] = ','.join(
supported_headers)
headers['Access-Control-Allow-Methods'] = ','.join(
app_settings['cors']['allow_methods'])
headers['Access-Control-Max-Age'] = str(app_settings['cors']['max_age'])
return headers
async def render(self):
"""Need to be overwritten in case you implement OPTIONS."""
return {}
async def __call__(self):
"""Apply CORS on the OPTIONS view."""
headers = await self.preflight()
resp = await self.render()
if isinstance(resp, Response):
headers.update(resp.headers)
resp.headers = headers
return resp
return Response(response=resp, headers=headers, status=200)
| bsd-2-clause |
Zhaoyanzhang/-myflasky | venv/lib/python2.7/site-packages/flask_pagedown/widgets.py | 8 | 1872 | from wtforms.widgets import HTMLString, TextArea
pagedown_pre_html = '<div class="flask-pagedown">'
pagedown_post_html = '</div>'
preview_html = '''
<div class="flask-pagedown-preview" id="flask-pagedown-%(field)s-preview"></div>
<script type="text/javascript">
f = function() {
if (typeof flask_pagedown_converter === "undefined")
flask_pagedown_converter = Markdown.getSanitizingConverter().makeHtml;
var textarea = document.getElementById("flask-pagedown-%(field)s");
var preview = document.getElementById("flask-pagedown-%(field)s-preview");
textarea.onkeyup = function() { preview.innerHTML = flask_pagedown_converter(textarea.value); }
textarea.onkeyup.call(textarea);
}
if (document.readyState === 'complete')
f();
else if (window.addEventListener)
window.addEventListener("load", f, false);
else if (window.attachEvent)
window.attachEvent("onload", f);
else
f();
</script>
'''
class PageDown(TextArea):
def __call__(self, field, **kwargs):
show_input = True
show_preview = True
if 'only_input' in kwargs or 'only_preview' in kwargs:
show_input = kwargs.pop('only_input', False)
show_preview = kwargs.pop('only_preview', False)
if not show_input and not show_preview:
raise ValueError('One of show_input and show_preview must be true')
html = ''
if show_input:
class_ = kwargs.pop('class', '').split() + \
kwargs.pop('class_', '').split()
class_ += ['flask-pagedown-input']
html += pagedown_pre_html + super(PageDown, self).__call__(
field, id='flask-pagedown-' + field.name,
class_=' '.join(class_), **kwargs) + pagedown_post_html
if show_preview:
html += preview_html % {'field': field.name}
return HTMLString(html)
| mit |
LaoZhongGu/kbengine | kbe/res/scripts/common/Lib/test/test_pow.py | 177 | 4593 | import test.support, unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0, 31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in (int,):
for i in list(range(-10, 0)) + list(range(1, 10)):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == int:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powlong(self):
self.powtest(int)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(int(i),j) % k,
pow(int(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
def test_main():
test.support.run_unittest(PowTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
raghavrv/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 12 | 20126 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_lsh_forest_deprecation():
assert_warns_message(DeprecationWarning,
"LSHForest has poor performance and has been "
"deprecated in 0.19. It will be removed "
"in version 0.21.", LSHForest)
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
print('accuracies:', accuracies)
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0, n_candidates=n_points, random_state=42).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1)
forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)(
radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
wongkwunkit/jinja2 | tests/test_utils.py | 22 | 1969 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~
Tests utilities jinja uses.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import gc
import pytest
import pickle
from jinja2.utils import LRUCache, escape, object_type_repr
@pytest.mark.utils
@pytest.mark.lrucache
class TestLRUCache():
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.utils
@pytest.mark.helpers
class TestHelpers():
def test_object_type_repr(self):
class X(object):
pass
assert object_type_repr(42) == 'int object'
assert object_type_repr([]) == 'list object'
assert object_type_repr(X()) == 'test_utils.X object'
assert object_type_repr(None) == 'None'
assert object_type_repr(Ellipsis) == 'Ellipsis'
@pytest.mark.utils
@pytest.mark.markupleak
@pytest.mark.skipif(hasattr(escape, 'func_code'),
reason='this test only tests the c extension')
class TestMarkupLeak():
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
| bsd-3-clause |
stopstalk/stopstalk-deployment | private/scripts/populate-institute-to-country.py | 1 | 2063 | """
Copyright (c) 2015-2020 Raj Patel([email protected]), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Atleast 3 users of an institute must have entered the
sql_query = """
SELECT institute, country
FROM auth_user
WHERE institute != "Other" and country != ""
GROUP BY institute, country
HAVING count(*) > 2;
"""
institute_to_country = dict(db.executesql(sql_query))
for institute in institute_to_country:
print institute, "->", institute_to_country[institute]
atable = db.auth_user
cftable = db.custom_friend
updated_count = 0
for record in db(atable.institute.belongs(institute_to_country.keys())).select():
if not record.country:
record.update_record(country=institute_to_country[record.institute])
updated_count += 1
for record in db(cftable.institute.belongs(institute_to_country.keys())).select():
if not record.country:
record.update_record(country=institute_to_country[record.institute])
updated_count += 1
print "Total updated:", updated_count
| mit |
ambikeshwar1991/gnuradio | gnuradio-core/src/python/gnuradio/gr/qa_wavefile.py | 18 | 1854 | #!/usr/bin/env python
#
# Copyright 2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import os
from os.path import getsize
g_in_file = os.path.join (os.getenv ("srcdir"), "test_16bit_1chunk.wav")
class test_wavefile(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_checkwavread (self):
wf = gr.wavfile_source(g_in_file)
self.assertEqual(wf.sample_rate(), 8000)
def test_002_checkwavcopy (self):
infile = g_in_file
outfile = "test_out.wav"
wf_in = gr.wavfile_source(infile)
wf_out = gr.wavfile_sink(outfile,
wf_in.channels(),
wf_in.sample_rate(),
wf_in.bits_per_sample())
self.tb.connect(wf_in, wf_out)
self.tb.run()
wf_out.close()
self.assertEqual(getsize(infile), getsize(outfile))
in_f = file(infile, 'rb')
out_f = file(outfile, 'rb')
in_data = in_f.read()
out_data = out_f.read()
out_f.close()
os.remove(outfile)
self.assertEqual(in_data, out_data)
if __name__ == '__main__':
gr_unittest.run(test_wavefile, "test_wavefile.xml")
| gpl-3.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/tkinter/_fix.py | 76 | 2897 | import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
if isinstance(s, bytes):
s = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(s, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer("", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res]
# Ignore leading \\?\
if s.startswith("\\\\?\\"):
s = s[4:]
if s.startswith("UNC"):
s = "\\" + s[3:]
return s
prefix = os.path.join(sys.base_prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.base_prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if "TCL_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if "TK_LIBRARY" not in os.environ:
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if "TIX_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
| lgpl-3.0 |
frederick-masterton/django | django/views/i18n.py | 68 | 11000 | import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.apps import apps
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language, LANGUAGE_SESSION_KEY
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause |
talishte/ctigre | env/lib/python2.7/site-packages/django/core/files/uploadedfile.py | 223 | 4156 | """
Classes representing uploaded files.
"""
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def close(self):
pass
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
| bsd-2-clause |
futurulus/scipy | scipy/io/harwell_boeing/_fortran_format_parser.py | 127 | 9092 | """
Preliminary module to handle fortran formats for IO. Does not use this outside
scipy.sparse io for now, until the API is deemed reasonable.
The *Format classes handle conversion between fortran and python format, and
FortranFormatParser can create *Format instances from raw fortran format
strings (e.g. '(3I4)', '(10I3)', etc...)
"""
from __future__ import division, print_function, absolute_import
import re
import warnings
import numpy as np
__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"]
TOKENS = {
"LPAR": r"\(",
"RPAR": r"\)",
"INT_ID": r"I",
"EXP_ID": r"E",
"INT": r"\d+",
"DOT": r"\.",
}
class BadFortranFormat(SyntaxError):
pass
def number_digits(n):
return int(np.floor(np.log10(np.abs(n))) + 1)
class IntFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given an integer, returns a "reasonable" IntFormat instance to represent
any number between 0 and n if n > 0, -n and n if n < 0
Parameters
----------
n : int
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : IntFormat
IntFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
without losing precision. For example, IntFormat.from_number(1) will
return an IntFormat instance of width 2, so that any 0 and 1 may be
represented as 1-character strings without loss of information.
"""
width = number_digits(n) + 1
if n < 0:
width += 1
repeat = 80 // width
return cls(width, min, repeat=repeat)
def __init__(self, width, min=None, repeat=None):
self.width = width
self.repeat = repeat
self.min = min
def __repr__(self):
r = "IntFormat("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width) + "d"
class ExpFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given a float number, returns a "reasonable" ExpFormat instance to
represent any number between -n and n.
Parameters
----------
n : float
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : ExpFormat
ExpFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
to avoid losing precision.
"""
# len of one number in exp format: sign + 1|0 + "." +
# number of digit for fractional part + 'E' + sign of exponent +
# len of exponent
finfo = np.finfo(n.dtype)
# Number of digits for fractional part
n_prec = finfo.precision + 1
# Number of digits for exponential part
n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
width = 1 + 1 + n_prec + 1 + n_exp + 1
if n < 0:
width += 1
repeat = int(np.floor(80 / width))
return cls(width, n_prec, min, repeat=repeat)
def __init__(self, width, significand, min=None, repeat=None):
"""\
Parameters
----------
width : int
number of characters taken by the string (includes space).
"""
self.width = width
self.significand = significand
self.repeat = repeat
self.min = min
def __repr__(self):
r = "ExpFormat("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width-1) + "." + str(self.significand) + "E"
class Token(object):
def __init__(self, type, value, pos):
self.type = type
self.value = value
self.pos = pos
def __str__(self):
return """Token('%s', "%s")""" % (self.type, self.value)
def __repr__(self):
return self.__str__()
class Tokenizer(object):
def __init__(self):
self.tokens = list(TOKENS.keys())
self.res = [re.compile(TOKENS[i]) for i in self.tokens]
def input(self, s):
self.data = s
self.curpos = 0
self.len = len(s)
def next_token(self):
curpos = self.curpos
tokens = self.tokens
while curpos < self.len:
for i, r in enumerate(self.res):
m = r.match(self.data, curpos)
if m is None:
continue
else:
self.curpos = m.end()
return Token(self.tokens[i], m.group(), self.curpos)
else:
raise SyntaxError("Unknown character at position %d (%s)"
% (self.curpos, self.data[curpos]))
# Grammar for fortran format:
# format : LPAR format_string RPAR
# format_string : repeated | simple
# repeated : repeat simple
# simple : int_fmt | exp_fmt
# int_fmt : INT_ID width
# exp_fmt : simple_exp_fmt
# simple_exp_fmt : EXP_ID width DOT significand
# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
# repeat : INT
# width : INT
# significand : INT
# ndigits : INT
# Naive fortran formatter - parser is hand-made
class FortranFormatParser(object):
"""Parser for fortran format strings. The parse method returns a *Format
instance.
Notes
-----
Only ExpFormat (exponential format for floating values) and IntFormat
(integer format) for now.
"""
def __init__(self):
self.tokenizer = Tokenizer()
def parse(self, s):
self.tokenizer.input(s)
tokens = []
try:
while True:
t = self.tokenizer.next_token()
if t is None:
break
else:
tokens.append(t)
return self._parse_format(tokens)
except SyntaxError as e:
raise BadFortranFormat(str(e))
def _get_min(self, tokens):
next = tokens.pop(0)
if not next.type == "DOT":
raise SyntaxError()
next = tokens.pop(0)
return next.value
def _expect(self, token, tp):
if not token.type == tp:
raise SyntaxError()
def _parse_format(self, tokens):
if not tokens[0].type == "LPAR":
raise SyntaxError("Expected left parenthesis at position "
"%d (got '%s')" % (0, tokens[0].value))
elif not tokens[-1].type == "RPAR":
raise SyntaxError("Expected right parenthesis at position "
"%d (got '%s')" % (len(tokens), tokens[-1].value))
tokens = tokens[1:-1]
types = [t.type for t in tokens]
if types[0] == "INT":
repeat = int(tokens.pop(0).value)
else:
repeat = None
next = tokens.pop(0)
if next.type == "INT_ID":
next = self._next(tokens, "INT")
width = int(next.value)
if tokens:
min = int(self._get_min(tokens))
else:
min = None
return IntFormat(width, min, repeat)
elif next.type == "EXP_ID":
next = self._next(tokens, "INT")
width = int(next.value)
next = self._next(tokens, "DOT")
next = self._next(tokens, "INT")
significand = int(next.value)
if tokens:
next = self._next(tokens, "EXP_ID")
next = self._next(tokens, "INT")
min = int(next.value)
else:
min = None
return ExpFormat(width, significand, min, repeat)
else:
raise SyntaxError("Invalid formater type %s" % next.value)
def _next(self, tokens, tp):
if not len(tokens) > 0:
raise SyntaxError()
next = tokens.pop(0)
self._expect(next, tp)
return next
| bsd-3-clause |
neilLasrado/erpnext | erpnext/assets/doctype/asset_category/asset_category.py | 1 | 4214 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, get_link_to_form
from frappe.model.document import Document
class AssetCategory(Document):
def validate(self):
self.validate_finance_books()
self.validate_account_types()
self.validate_account_currency()
self.valide_cwip_account()
def validate_finance_books(self):
for d in self.finance_books:
for field in ("Total Number of Depreciations", "Frequency of Depreciation"):
if cint(d.get(frappe.scrub(field)))<1:
frappe.throw(_("Row {0}: {1} must be greater than 0").format(d.idx, field), frappe.MandatoryError)
def validate_account_currency(self):
account_types = [
'fixed_asset_account', 'accumulated_depreciation_account', 'depreciation_expense_account', 'capital_work_in_progress_account'
]
invalid_accounts = []
for d in self.accounts:
company_currency = frappe.get_value('Company', d.get('company_name'), 'default_currency')
for type_of_account in account_types:
if d.get(type_of_account):
account_currency = frappe.get_value("Account", d.get(type_of_account), "account_currency")
if account_currency != company_currency:
invalid_accounts.append(frappe._dict({ 'type': type_of_account, 'idx': d.idx, 'account': d.get(type_of_account) }))
for d in invalid_accounts:
frappe.throw(_("Row #{}: Currency of {} - {} doesn't matches company currency.")
.format(d.idx, frappe.bold(frappe.unscrub(d.type)), frappe.bold(d.account)),
title=_("Invalid Account"))
def validate_account_types(self):
account_type_map = {
'fixed_asset_account': { 'account_type': 'Fixed Asset' },
'accumulated_depreciation_account': { 'account_type': 'Accumulated Depreciation' },
'depreciation_expense_account': { 'root_type': 'Expense' },
'capital_work_in_progress_account': { 'account_type': 'Capital Work in Progress' }
}
for d in self.accounts:
for fieldname in account_type_map.keys():
if d.get(fieldname):
selected_account = d.get(fieldname)
key_to_match = next(iter(account_type_map.get(fieldname))) # acount_type or root_type
selected_key_type = frappe.db.get_value('Account', selected_account, key_to_match)
expected_key_type = account_type_map[fieldname][key_to_match]
if selected_key_type != expected_key_type:
frappe.throw(_("Row #{}: {} of {} should be {}. Please modify the account or select a different account.")
.format(d.idx, frappe.unscrub(key_to_match), frappe.bold(selected_account), frappe.bold(expected_key_type)),
title=_("Invalid Account"))
def valide_cwip_account(self):
if self.enable_cwip_accounting:
missing_cwip_accounts_for_company = []
for d in self.accounts:
if (not d.capital_work_in_progress_account and
not frappe.db.get_value("Company", d.company_name, "capital_work_in_progress_account")):
missing_cwip_accounts_for_company.append(get_link_to_form("Company", d.company_name))
if missing_cwip_accounts_for_company:
frappe.throw(_("""
To enable Capital Work in Progress Accounting, you must select Capital Work in Progress Account in accounts table.<br><br>
You can also set default CWIP account in Company {0}
""").format(", ".join(missing_cwip_accounts_for_company)), title=_("Missing Account"))
@frappe.whitelist()
def get_asset_category_account(fieldname, item=None, asset=None, account=None, asset_category = None, company = None):
if item and frappe.db.get_value("Item", item, "is_fixed_asset"):
asset_category = frappe.db.get_value("Item", item, ["asset_category"])
elif not asset_category or not company:
if account:
if frappe.db.get_value("Account", account, "account_type") != "Fixed Asset":
account=None
if not account:
asset_details = frappe.db.get_value("Asset", asset, ["asset_category", "company"])
asset_category, company = asset_details or [None, None]
account = frappe.db.get_value("Asset Category Account",
filters={"parent": asset_category, "company_name": company}, fieldname=fieldname)
return account | gpl-3.0 |
sunil07t/e-mission-server | emission/core/wrapper/client.py | 1 | 2709 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#
# In the current iteration, there is a client object that can be loaded from
# the filesystem into the database and its settings loaded from the database.
# There are no special settings (e.g. active/inactive).
#
# I have no idea how this will be used, but it is nice^H^H^H^H, unit tested code,
# so let us keep it around a bit longer
#
# Ah but this assumes that the settings file is in `emission/clients/` and we
# just deleted that entire directory. Changing this to conf for now...
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from builtins import object
import json
import logging
import dateutil.parser
from datetime import datetime
# Our imports
from emission.core.get_database import get_profile_db, get_client_db
class Client(object):
def __init__(self, clientName):
# TODO: write background process to ensure that there is only one client with each name
# Maybe clean up unused clients?
self.clientName = clientName
self.settings_filename = "conf/clients/%s.settings.json" % self.clientName
self.__reload()
# Smart settings call, which returns the override settings if the client is
# active, and
def getSettings(self):
logging.debug("For client %s, returning settings %s" % (self.clientName, self.clientJSON['client_settings']))
return self.clientJSON['client_settings']
def __reload(self):
self.clientJSON = None
if self.clientName is not None:
self.clientJSON = get_client_db().find_one({'name': self.clientName})
# Figure out if the JSON object here should always be passed in
# Having it be passed in is a lot more flexible
# Let's compromise for now by passing it in and seeing how much of a hassle it is
# That will also ensure that the update_client script is not a complete NOP
def __update(self, newEntry):
get_client_db().update({'name': self.clientName}, newEntry, upsert = True)
self.__reload()
def update(self, createKey = True):
import uuid
newEntry = json.load(open(self.settings_filename))
if createKey:
newEntry['key'] = str(uuid.uuid4())
# logging.info("Updating with new entry %s" % newEntry)
self.__update(newEntry)
return newEntry['key']
def getClientKey(self):
if self.clientJSON is None:
return None
logging.debug("About to return %s from JSON %s" % (self.clientJSON['key'], self.clientJSON))
return self.clientJSON['key']
def clientSpecificSetters(self, uuid, sectionId, predictedModeMap):
return None
| bsd-3-clause |
ktnyt/chainer | chainerx/math/misc.py | 2 | 1549 | import chainerx
# TODO(sonots): Implement in C++
def square(x):
"""Returns the element-wise square of the input.
Args:
x (~chainerx.ndarray or scalar): Input data
Returns:
~chainerx.ndarray: Returned array: :math:`y = x * x`.
A scalar is returned if ``x`` is a scalar.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``x``.
.. seealso:: :data:`numpy.square`
"""
return x * x
# TODO(sonots): Implement in C++
def clip(a, a_min, a_max):
"""Clips the values of an array to a given interval.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
a (~chainerx.ndarray): Array containing elements to clip.
a_min (scalar): Maximum value.
a_max (scalar): Minimum value.
Returns:
~chainerx.ndarray: An array with the elements of ``a``, but where
values < ``a_min`` are replaced with ``a_min``,
and those > ``a_max`` with ``a_max``.
Note:
The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are
not supported yet.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``a``.
.. seealso:: :func:`numpy.clip`
"""
return -chainerx.maximum(-chainerx.maximum(a, a_min), -a_max)
| mit |
femmerling/DirMaker | box/lib/python2.7/site-packages/migrate/tests/versioning/test_version.py | 32 | 5951 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from migrate.exceptions import *
from migrate.versioning.version import *
from migrate.tests import fixture
class TestVerNum(fixture.Base):
def test_invalid(self):
"""Disallow invalid version numbers"""
versions = ('-1', -1, 'Thirteen', '')
for version in versions:
self.assertRaises(ValueError, VerNum, version)
def test_str(self):
"""Test str and repr version numbers"""
self.assertEqual(str(VerNum(2)), '2')
self.assertEqual(repr(VerNum(2)), '<VerNum(2)>')
def test_is(self):
"""Two version with the same number should be equal"""
a = VerNum(1)
b = VerNum(1)
self.assert_(a is b)
self.assertEqual(VerNum(VerNum(2)), VerNum(2))
def test_add(self):
self.assertEqual(VerNum(1) + VerNum(1), VerNum(2))
self.assertEqual(VerNum(1) + 1, 2)
self.assertEqual(VerNum(1) + 1, '2')
self.assert_(isinstance(VerNum(1) + 1, VerNum))
def test_sub(self):
self.assertEqual(VerNum(1) - 1, 0)
self.assert_(isinstance(VerNum(1) - 1, VerNum))
self.assertRaises(ValueError, lambda: VerNum(0) - 1)
def test_eq(self):
"""Two versions are equal"""
self.assertEqual(VerNum(1), VerNum('1'))
self.assertEqual(VerNum(1), 1)
self.assertEqual(VerNum(1), '1')
self.assertNotEqual(VerNum(1), 2)
def test_ne(self):
self.assert_(VerNum(1) != 2)
self.assertFalse(VerNum(1) != 1)
def test_lt(self):
self.assertFalse(VerNum(1) < 1)
self.assert_(VerNum(1) < 2)
self.assertFalse(VerNum(2) < 1)
def test_le(self):
self.assert_(VerNum(1) <= 1)
self.assert_(VerNum(1) <= 2)
self.assertFalse(VerNum(2) <= 1)
def test_gt(self):
self.assertFalse(VerNum(1) > 1)
self.assertFalse(VerNum(1) > 2)
self.assert_(VerNum(2) > 1)
def test_ge(self):
self.assert_(VerNum(1) >= 1)
self.assert_(VerNum(2) >= 1)
self.assertFalse(VerNum(1) >= 2)
class TestVersion(fixture.Pathed):
def setUp(self):
super(TestVersion, self).setUp()
def test_str_to_filename(self):
self.assertEquals(str_to_filename(''), '')
self.assertEquals(str_to_filename('__'), '_')
self.assertEquals(str_to_filename('a'), 'a')
self.assertEquals(str_to_filename('Abc Def'), 'Abc_Def')
self.assertEquals(str_to_filename('Abc "D" Ef'), 'Abc_D_Ef')
self.assertEquals(str_to_filename("Abc's Stuff"), 'Abc_s_Stuff')
self.assertEquals(str_to_filename("a b"), 'a_b')
self.assertEquals(str_to_filename("a.b to c"), 'a_b_to_c')
def test_collection(self):
"""Let's see how we handle versions collection"""
coll = Collection(self.temp_usable_dir)
coll.create_new_python_version("foo bar")
coll.create_new_sql_version("postgres", "foo bar")
coll.create_new_sql_version("sqlite", "foo bar")
coll.create_new_python_version("")
self.assertEqual(coll.latest, 4)
self.assertEqual(len(coll.versions), 4)
self.assertEqual(coll.version(4), coll.version(coll.latest))
coll2 = Collection(self.temp_usable_dir)
self.assertEqual(coll.versions, coll2.versions)
Collection.clear()
def test_old_repository(self):
open(os.path.join(self.temp_usable_dir, '1'), 'w')
self.assertRaises(Exception, Collection, self.temp_usable_dir)
#TODO: def test_collection_unicode(self):
# pass
def test_create_new_python_version(self):
coll = Collection(self.temp_usable_dir)
coll.create_new_python_version("'")
ver = coll.version()
self.assert_(ver.script().source())
def test_create_new_sql_version(self):
coll = Collection(self.temp_usable_dir)
coll.create_new_sql_version("sqlite", "foo bar")
ver = coll.version()
ver_up = ver.script('sqlite', 'upgrade')
ver_down = ver.script('sqlite', 'downgrade')
ver_up.source()
ver_down.source()
def test_selection(self):
"""Verify right sql script is selected"""
# Create empty directory.
path = self.tmp_repos()
os.mkdir(path)
# Create files -- files must be present or you'll get an exception later.
python_file = '001_initial_.py'
sqlite_upgrade_file = '001_sqlite_upgrade.sql'
default_upgrade_file = '001_default_upgrade.sql'
for file_ in [sqlite_upgrade_file, default_upgrade_file, python_file]:
filepath = '%s/%s' % (path, file_)
open(filepath, 'w').close()
ver = Version(1, path, [sqlite_upgrade_file])
self.assertEquals(os.path.basename(ver.script('sqlite', 'upgrade').path), sqlite_upgrade_file)
ver = Version(1, path, [default_upgrade_file])
self.assertEquals(os.path.basename(ver.script('default', 'upgrade').path), default_upgrade_file)
ver = Version(1, path, [sqlite_upgrade_file, default_upgrade_file])
self.assertEquals(os.path.basename(ver.script('sqlite', 'upgrade').path), sqlite_upgrade_file)
ver = Version(1, path, [sqlite_upgrade_file, default_upgrade_file, python_file])
self.assertEquals(os.path.basename(ver.script('postgres', 'upgrade').path), default_upgrade_file)
ver = Version(1, path, [sqlite_upgrade_file, python_file])
self.assertEquals(os.path.basename(ver.script('postgres', 'upgrade').path), python_file)
def test_bad_version(self):
ver = Version(1, self.temp_usable_dir, [])
self.assertRaises(ScriptError, ver.add_script, '123.sql')
pyscript = os.path.join(self.temp_usable_dir, 'bla.py')
open(pyscript, 'w')
ver.add_script(pyscript)
self.assertRaises(ScriptError, ver.add_script, 'bla.py')
| mit |
yodalee/servo | tests/wpt/web-platform-tests/tools/wptserve/docs/conf.py | 467 | 7855 | # -*- coding: utf-8 -*-
#
# wptserve documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 14 17:23:24 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath(".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptserve'
copyright = u'2013, Mozilla Foundation and other wptserve contributers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptservedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'wptserve.tex', u'wptserve Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptserve', u'wptserve Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptserve', u'wptserve Documentation',
u'James Graham', 'wptserve', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mpl-2.0 |
0-wiz-0/audacity | lib-src/libsndfile/src/create_symbols_file.py | 39 | 5490 | #!/usr/bin/python
# Copyright (C) 2003-2011 Erik de Castro Lopo <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the author nor the names of any contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re, sys
#----------------------------------------------------------------
# These are all of the public functions exported from libsndfile.
#
# Its important not to change the order they are listed in or
# the ordinal values in the second column.
ALL_SYMBOLS = (
( "sf_command", 1 ),
( "sf_open", 2 ),
( "sf_close", 3 ),
( "sf_seek", 4 ),
( "sf_error", 7 ),
( "sf_perror", 8 ),
( "sf_error_str", 9 ),
( "sf_error_number", 10 ),
( "sf_format_check", 11 ),
( "sf_read_raw", 16 ),
( "sf_readf_short", 17 ),
( "sf_readf_int", 18 ),
( "sf_readf_float", 19 ),
( "sf_readf_double", 20 ),
( "sf_read_short", 21 ),
( "sf_read_int", 22 ),
( "sf_read_float", 23 ),
( "sf_read_double", 24 ),
( "sf_write_raw", 32 ),
( "sf_writef_short", 33 ),
( "sf_writef_int", 34 ),
( "sf_writef_float", 35 ),
( "sf_writef_double", 36 ),
( "sf_write_short", 37 ),
( "sf_write_int", 38 ),
( "sf_write_float", 39 ),
( "sf_write_double", 40 ),
( "sf_strerror", 50 ),
( "sf_get_string", 60 ),
( "sf_set_string", 61 ),
( "sf_version_string",68 ),
( "sf_open_fd", 70 ),
( "sf_wchar_open", 71 ),
( "sf_open_virtual", 80 ),
( "sf_write_sync", 90 )
)
#-------------------------------------------------------------------------------
def linux_symbols (progname, version):
print "# Auto-generated by %s\n" %progname
print "libsndfile.so.%s" % version
print "{"
print " global:"
for name, ordinal in ALL_SYMBOLS:
if name == "sf_wchar_open":
continue
print " %s ;" % name
print " local:"
print " * ;"
print "} ;"
print
return
def darwin_symbols (progname, version):
print "# Auto-generated by %s\n" %progname
for name, ordinal in ALL_SYMBOLS:
if name == "sf_wchar_open":
continue
print "_%s" % name
print
return
def win32_symbols (progname, version, name):
print "; Auto-generated by %s\n" %progname
print "LIBRARY %s-%s.dll" % (name, re.sub ("\..*", "", version))
print "EXPORTS\n"
for name, ordinal in ALL_SYMBOLS:
print "%-20s @%s" % (name, ordinal)
print
return
def os2_symbols (progname, version, name):
print "; Auto-generated by %s\n" %progname
print "LIBRARY %s%s" % (name, re.sub ("\..*", "", version))
print "INITINSTANCE TERMINSTANCE"
print "CODE PRELOAD MOVEABLE DISCARDABLE"
print "DATA PRELOAD MOVEABLE MULTIPLE NONSHARED"
print "EXPORTS\n"
for name, ordinal in ALL_SYMBOLS:
if name == "sf_wchar_open":
continue
print "_%-20s @%s" % (name, ordinal)
print
return
def plain_symbols (progname, version, name):
for name, ordinal in ALL_SYMBOLS:
print name
def no_symbols (os_name):
print
print "No known way of restricting exported symbols on '%s'." % os_name
print "If you know a way, please contact the author."
print
return
#-------------------------------------------------------------------------------
progname = re.sub (".*[\\/]", "", sys.argv [0])
if len (sys.argv) != 3:
print
print "Usage : %s <target OS name> <libsndfile version>." % progname
print
print " Currently supported values for target OS are:"
print " linux"
print " darwin (ie MacOSX)"
print " win32 (ie wintendo)"
print " cygwin (Cygwin on wintendo)"
print " os2 (OS/2)"
print " plain (plain list of symbols)"
print
sys.exit (1)
os_name = sys.argv [1]
version = re.sub ("\.[a-z0-9]+$", "", sys.argv [2])
if os_name == "linux" or os_name == "gnu" or os_name == "binutils":
linux_symbols (progname, version)
elif os_name == "darwin":
darwin_symbols (progname, version)
elif os_name == "win32":
win32_symbols (progname, version, "libsndfile")
elif os_name == "cygwin":
win32_symbols (progname, version, "cygsndfile")
elif os_name == "os2":
os2_symbols (progname, version, "sndfile")
elif os_name == "static":
plain_symbols (progname, version, "")
else:
no_symbols (os_name)
sys.exit (0)
| gpl-2.0 |
calfonso/ansible | lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py | 23 | 8816 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_access_group_manager
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update volume access groups on SolidFire
options:
state:
description:
- Whether the specified volume access group should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the volume access group. It is not required to be unique, but recommended.
required: true
initiators:
description:
- List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
volumes:
description:
- List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
virtual_network_id:
description:
- The ID of the SolidFire Virtual Network ID to associate the volume access group with.
virtual_network_tags:
description:
- The ID of the VLAN Virtual Network Tag to associate the volume access group with.
attributes:
description: List of Name/Value pairs in JSON object format.
volume_access_group_id:
description:
- The ID of the volume access group to modify or delete.
'''
EXAMPLES = """
- name: Create Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVolumeAccessGroup
volumes: [7,8]
- name: Modify Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
volume_access_group_id: 1
name: AnsibleVolumeAccessGroup-Renamed
attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
- name: Delete Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
volume_access_group_id: 1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolumeAccessGroup(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
volume_access_group_id=dict(required=False, type='int', default=None),
initiators=dict(required=False, type='list', default=None),
volumes=dict(required=False, type='list', default=None),
virtual_network_id=dict(required=False, type='list', default=None),
virtual_network_tags=dict(required=False, type='list', default=None),
attributes=dict(required=False, type='dict', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.volume_access_group_id = p['volume_access_group_id']
self.initiators = p['initiators']
self.volumes = p['volumes']
self.virtual_network_id = p['virtual_network_id']
self.virtual_network_tags = p['virtual_network_tags']
self.attributes = p['attributes']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume_access_group(self):
access_groups_list = self.sfe.list_volume_access_groups()
for group in access_groups_list.volume_access_groups:
if group.name == self.name:
# Update self.volume_access_group_id:
if self.volume_access_group_id is not None:
if group.volume_access_group_id == self.volume_access_group_id:
return group
else:
self.volume_access_group_id = group.volume_access_group_id
return group
return None
def create_volume_access_group(self):
try:
self.sfe.create_volume_access_group(name=self.name,
initiators=self.initiators,
volumes=self.volumes,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error creating volume access group %s: %s" %
(self.name, to_native(e)), exception=traceback.format_exc())
def delete_volume_access_group(self):
try:
self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
except Exception as e:
self.module.fail_json(msg="Error deleting volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)),
exception=traceback.format_exc())
def update_volume_access_group(self):
try:
self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
name=self.name,
initiators=self.initiators,
volumes=self.volumes,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error updating volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
def apply(self):
changed = False
group_exists = False
update_group = False
group_detail = self.get_volume_access_group()
if group_detail:
group_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the group
if self.volumes is not None and group_detail.volumes != self.volumes:
update_group = True
changed = True
elif self.initiators is not None and group_detail.initiators != self.initiators:
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
self.attributes is not None:
update_group = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not group_exists:
self.create_volume_access_group()
elif update_group:
self.update_volume_access_group()
elif self.state == 'absent':
self.delete_volume_access_group()
self.module.exit_json(changed=changed)
def main():
v = SolidFireVolumeAccessGroup()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
parrt/lolviz | prince_dtree.py | 1 | 12296 | import IPython, graphviz, re
from io import StringIO
from IPython.display import Image
import numpy as np
import pandas as pd
import math
from sklearn import tree
from sklearn.datasets import load_boston, load_iris
from collections import defaultdict
import string
import re
YELLOW = "#fefecd" # "#fbfbd0" # "#FBFEB0"
BLUE = "#D9E6F5"
GREEN = "#cfe2d4"
color_blind_friendly_colors = {
'redorange': '#f46d43',
'orange': '#fdae61', 'yellow': '#fee090', 'sky': '#e0f3f8',
'babyblue': '#abd9e9', 'lightblue': '#74add1', 'blue': '#4575b4'
}
color_blind_friendly_colors = [
None, # 0 classes
None, # 1 class
[YELLOW,BLUE], # 2 classes
[YELLOW,BLUE,GREEN], # 3 classes
[YELLOW,BLUE,GREEN,'#a1dab4'], # 4
[YELLOW,BLUE,GREEN,'#a1dab4','#41b6c4'], # 5
[YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#2c7fb8','#253494'], # 6
[YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 7
[YELLOW,'#edf8b1','#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 8
[YELLOW,'#ece7f2','#d0d1e6','#a6bddb','#74a9cf','#3690c0','#0570b0','#045a8d','#023858'], # 9
[YELLOW,'#e0f3f8','#313695','#fee090','#4575b4','#fdae61','#abd9e9','#74add1','#d73027','#f46d43'] # 10
]
for x in color_blind_friendly_colors[2:]:
print(x)
max_class_colors = len(color_blind_friendly_colors)-1
def tree_traverse(n_nodes, children_left, children_right):
"""
Derives code from http://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html
to walk tree
Traversing tree structure to compute compute various properties such
as the depth of each node and whether or not it is a leaf.
Input -
n_nodes: number of nodes in the tree
children_left: array of length n_nodes. left children node indexes
children_right: array of length n_nodes. right children node indexes
:return:
is_leaf: array of length n_nodes with boolean whether node i is leaf or not,
node_depth: depth of each node from root to node. root is depth 0
"""
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaf = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop() # (0,-1)
node_depth[node_id] = parent_depth + 1
# If we have a non-leaf node
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaf[node_id] = True
return is_leaf, node_depth
# def dectree_max_depth(tree):
# n_nodes = tree.node_count
# children_left = tree.children_left
# children_right = tree.children_right
#
# def walk(node_id):
# if (children_left[node_id] != children_right[node_id]):
# left_max = 1 + walk(children_left[node_id])
# right_max = 1 + walk(children_right[node_id])
# # if node_id<100: print(f"node {node_id}: {left_max}, {right_max}")
# return max(left_max, right_max)
# else: # leaf
# return 1
#
# root_node_id = 0
# return walk(root_node_id)
def dtreeviz(tree, X, y, precision=1, classnames=None, orientation="LR"):
def get_feature(i):
name = X.columns[feature[i]]
node_name = ''.join(c for c in name if c not in string.punctuation)+str(i)
node_name = re.sub("["+string.punctuation+string.whitespace+"]", '_', node_name)
return name, node_name
def round(v,ndigits=precision):
return format(v, '.' + str(ndigits) + 'f')
def dec_node_box(name, node_name, split):
html = """<table BORDER="0" CELLPADDING="0" CELLBORDER="0" CELLSPACING="0">
<tr>
<td colspan="3" align="center" cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="b"><font face="Helvetica" color="#444443" point-size="12">{name}</font></td>
</tr>
<tr>
<td colspan="3" cellpadding="1" border="0" bgcolor="#fefecd"></td>
</tr>
<tr>
<td cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="r" align="right"><font face="Helvetica" color="#444443" point-size="11">split</font></td>
<td cellspacing="0" cellpadding="0" border="0"></td>
<td cellspacing="0" cellpadding="0" bgcolor="#fefecd" align="left"><font face="Helvetica" color="#444443" point-size="11">{split}</font></td>
</tr>
</table>""".format(name=name, split=split)
return '{node_name} [shape=box label=<{label}>]\n'.format(label=html, node_name=node_name)
def dec_node(name, node_name, split):
html = """<font face="Helvetica" color="#444443" point-size="12">{name}<br/>@{split}</font>""".format(name=name, split=split)
return '{node_name} [shape=none label=<{label}>]\n'.format(label=html, node_name=node_name)
def prop_size(n):
# map to 0.03 to .35
margin_range = (0.03, 0.35)
if sample_count_range>0:
zero_to_one = (n - min_samples) / sample_count_range
return zero_to_one * (margin_range[1] - margin_range[0]) + margin_range[0]
else:
return margin_range[0]
# parsing the tree structure
n_nodes = tree.node_count # total nodes in the tree
children_left = tree.children_left # left children node index
children_right = tree.children_right # right children node index
feature = tree.feature # feature index at splits (-2 means leaf)
threshold = tree.threshold # split threshold values at given feature
is_leaf, node_depth = tree_traverse(n_nodes, children_left, children_right)
ranksep = ".22"
if orientation=="TD":
ranksep = ".35"
st = '\ndigraph G {splines=line;\n \
nodesep=0.1;\n \
ranksep=%s;\n \
rankdir=%s;\n \
node [margin="0.03" penwidth="0.5" width=.1, height=.1];\n \
edge [arrowsize=.4 penwidth="0.5"]\n' % (ranksep,orientation)
# Define decision nodes (non leaf nodes) as feature names
for i in range(n_nodes):
if not is_leaf[i]: # non leaf nodes
name, node_name = get_feature(i)
# st += dec_node_box(name, node_name, split=round(threshold[i]))
st += dec_node(name, node_name, split=round(threshold[i]))
# non leaf edges with > and <=
for i in range(n_nodes):
if not is_leaf[i]:
name, node_name = get_feature(i)
left, left_node_name = get_feature(children_left[i])
if is_leaf[children_left[i]]:
left = left_node_name ='leaf%d' % children_left[i]
right_name, right_node_name = get_feature(children_right[i])
if is_leaf[children_right[i]]:
right = right_node_name ='leaf%d' % children_right[i]
split = round(threshold[i])
left_html = '<font face="Helvetica" color="#444443" point-size="11"><</font>'
right_html = '<font face="Helvetica" color="#444443" point-size="11">≥</font>'
if orientation=="TD":
ldistance = ".9"
rdistance = ".9"
langle = "-28"
rangle = "28"
else:
ldistance = "1.3" # not used in LR mode; just label not taillable.
rdistance = "1.3"
langle = "-90"
rangle = "90"
blankedge = 'label=<<font face="Helvetica" color="#444443" point-size="1"> </font>>'
st += '{name} -> {left} [{blankedge} labelangle="{angle}" labeldistance="{ldistance}" {tail}label=<{label}>]\n'\
.format(label="",#left_html,
angle=langle,
ldistance=ldistance,
name=node_name,
blankedge = "",#blankedge,
tail="tail",#""tail" if orientation=="TD" else "",
left=left_node_name)
st += '{name} -> {right} [{blankedge} labelangle="{angle}" labeldistance="{rdistance}" {tail}label=<{label}>]\n' \
.format(label="",#right_html,
angle=rangle,
rdistance=rdistance,
name=node_name,
blankedge="",#blankedge,
tail="tail",# "tail" if orientation == "TD" else "",
right=right_node_name)
# find range of leaf sample count
leaf_sample_counts = [tree.n_node_samples[i] for i in range(n_nodes) if is_leaf[i]]
min_samples = min(leaf_sample_counts)
max_samples = max(leaf_sample_counts)
sample_count_range = max_samples - min_samples
print(leaf_sample_counts)
print("range is ", sample_count_range)
# is_classifier = hasattr(tree, 'n_classes')
is_classifier = tree.n_classes > 1
color_values = list(reversed(color_blind_friendly_colors))
n_classes = tree.n_classes[0]
color_values = color_blind_friendly_colors[n_classes]
# color_values = [c+"EF" for c in color_values] # add alpha
# Define leaf nodes (after edges so >= edges shown properly)
for i in range(n_nodes):
if is_leaf[i]:
node_samples = tree.n_node_samples[i]
impurity = tree.impurity
if is_classifier:
counts = np.array(tree.value[i][0])
predicted_class = np.argmax(counts)
predicted = predicted_class
if classnames:
predicted = classnames[predicted_class]
ratios = counts / node_samples # convert counts to ratios totalling 1.0
ratios = [round(r,3) for r in ratios]
color_spec = ["{c};{r}".format(c=color_values[i],r=r) for i,r in enumerate(ratios)]
color_spec = ':'.join(color_spec)
if n_classes > max_class_colors:
color_spec = YELLOW
html = """<font face="Helvetica" color="black" point-size="12">{predicted}<br/> </font>""".format(predicted=predicted)
margin = prop_size(node_samples)
st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style={style} fillcolor="{colors}" shape=circle label=<{label}>]\n' \
.format(i=i, label=html, name=node_name, colors=color_spec, margin=margin,
style='wedged' if n_classes<=max_class_colors else 'filled')
else:
value = tree.value[i][0]
html = """<font face="Helvetica" color="#444443" point-size="11">"""+round(value[0])+"""</font>"""
margin = prop_size(node_samples)
st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style=filled fillcolor="{color}" shape=circle label=<{label}>]\n'\
.format(i=i, label=html, name=node_name, color=YELLOW, margin=margin)
# end of string
st = st+'}'
return st
def boston():
regr = tree.DecisionTreeRegressor(max_depth=4, random_state=666)
boston = load_boston()
print(boston.data.shape, boston.target.shape)
data = pd.DataFrame(boston.data)
data.columns =boston.feature_names
regr = regr.fit(data, boston.target)
# st = dectreeviz(regr.tree_, data, boston.target)
st = dtreeviz(regr.tree_, data, boston.target, orientation="TD")
with open("/tmp/t3.dot", "w") as f:
f.write(st)
return st
def iris():
clf = tree.DecisionTreeClassifier(max_depth=4, random_state=666)
iris = load_iris()
print(iris.data.shape, iris.target.shape)
data = pd.DataFrame(iris.data)
data.columns = iris.feature_names
clf = clf.fit(data, iris.target)
# st = dectreeviz(clf.tree_, data, boston.target)
st = dtreeviz(clf.tree_, data, iris.target, orientation="TD"
, classnames=["setosa", "versicolor", "virginica"]
)
with open("/tmp/t3.dot", "w") as f:
f.write(st)
print(clf.tree_.value)
return st
# st = iris()
st = boston()
print(st)
graphviz.Source(st).view()
| bsd-3-clause |
gilhooley/python_koans | python3/libs/colorama/win32.py | 451 | 4833 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| mit |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/menus/base.py | 13 | 1838 | # -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
class Menu(object):
namespace = None
def __init__(self):
if not self.namespace:
self.namespace = self.__class__.__name__
def get_nodes(self, request):
"""
should return a list of NavigationNode instances
"""
raise NotImplementedError
class Modifier(object):
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
pass
class NavigationNode(object):
def __init__(self, title, url, id, parent_id=None, parent_namespace=None, attr=None, visible=True):
self.children = [] # do not touch
self.parent = None # do not touch, code depends on this
self.namespace = None # TODO: Assert why we need this and above
self.title = title
self.url = url
self.id = id
self.parent_id = parent_id
self.parent_namespace = parent_namespace
self.visible = visible
if attr:
self.attr = attr
else:
self.attr = {} # To avoid declaring a dict in defaults...
def __repr__(self):
return "<Navigation Node: %s>" % smart_str(self.title)
def get_menu_title(self):
return self.title
def get_absolute_url(self):
return self.url
def get_attribute(self, name):
return self.attr.get(name, None)
def get_descendants(self):
nodes = []
for node in self.children:
nodes.append(node)
nodes += node.get_descendants()
return nodes
def get_ancestors(self):
nodes = []
if getattr(self, 'parent', None):
nodes.append(self.parent)
nodes += self.parent.get_ancestors()
return nodes
| mit |
RahulMahalingam/raft | tabs/CrawlerTab.py | 11 | 10084 | #
# crawler tab implementation
#
#
# Author: Gregory Fleischer ([email protected])
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
import PyQt4
from PyQt4.QtCore import Qt, QObject, SIGNAL, QUrl, QTimer
from PyQt4.QtGui import *
from core.crawler.SpiderPageController import SpiderPageController
from core.network.InMemoryCookieJar import InMemoryCookieJar
class CrawlerTab(QObject):
def __init__(self, framework, mainWindow):
QObject.__init__(self, mainWindow)
self.framework = framework
self.mainWindow = mainWindow
self.mainWindow.crawlerSpiderSequenceCheckBox.stateChanged.connect(self.handle_crawlerSpiderSequenceCheckBox_stateChanged)
self.mainWindow.crawlerSpiderStartButton.clicked.connect(self.handle_spiderStart_clicked)
self.mainWindow.crawlerSpiderStopButton.clicked.connect(self.handle_spiderStop_clicked)
self.mainWindow.crawlerSpiderClearQueueButton.clicked.connect(self.handle_spiderClearQueue_clicked)
self.mainWindow.crawlerSpiderPendingResponsesClearButton.clicked.connect(self.handle_spiderClearPendingResponses_clicked)
self.mainWindow.crawlerSpiderPendingResponsesResetButton.clicked.connect(self.handle_spiderResetPendingResponses_clicked)
self.mainWindow.crawlerSpiderStartButton.setEnabled(True)
self.mainWindow.crawlerSpiderStopButton.setEnabled(False)
self.setup_spider_window()
self.Data = None
self.cursor = None
self.framework.subscribe_database_events(self.db_attach, self.db_detach)
self.framework.subscribe_sequences_changed(self.fill_sequences)
def db_attach(self):
self.Data = self.framework.getDB()
self.cursor = self.Data.allocate_thread_cursor()
self.fill_sequences()
def db_detach(self):
self.close_cursor()
self.Data = None
def close_cursor(self):
if self.cursor and self.Data:
self.cursor.close()
self.Data.release_thread_cursor(self.cursor)
self.cursor = None
def fill_sequences(self):
self.fill_sequences_combo_box(self.mainWindow.crawlerSpiderSequenceComboBox)
def fill_sequences_combo_box(self, comboBox):
selectedText = comboBox.currentText()
comboBox.clear()
for row in self.Data.get_all_sequences(self.cursor):
sequenceItem = [m or '' for m in row]
name = str(sequenceItem[1])
Id = str(sequenceItem[0])
item = comboBox.addItem(name, Id)
if selectedText:
index = comboBox.findText(selectedText)
if index != -1:
comboBox.setCurrentIndex(index)
def handle_crawlerSpiderSequenceCheckBox_stateChanged(self, state):
self.mainWindow.crawlerSpiderSequenceComboBox.setEnabled(self.mainWindow.crawlerSpiderSequenceCheckBox.isChecked())
def handle_spiderStart_clicked(self):
self.mainWindow.crawlerSpiderStartButton.setEnabled(False)
self.mainWindow.crawlerSpiderStopButton.setEnabled(True)
sequenceId = None
if self.mainWindow.crawlerSpiderSequenceCheckBox.isChecked():
sequenceId = int(self.mainWindow.crawlerSpiderSequenceComboBox.itemData(self.mainWindow.crawlerSpiderSequenceComboBox.currentIndex()))
self.spiderThread.startSpidering(self, sequenceId, self.cookieJar)
def handle_spiderStop_clicked(self):
self.mainWindow.crawlerSpiderStartButton.setEnabled(True)
self.mainWindow.crawlerSpiderStopButton.setEnabled(False)
self.spiderThread.stopSpidering()
def handle_spiderClearQueue_clicked(self):
self.spiderThread.clearSpiderQueue()
def handle_spiderClearPendingResponses_clicked(self):
self.spiderThread.clearSpiderPendingResponses()
def handle_spiderResetPendingResponses_clicked(self):
self.spiderThread.resetSpiderPendingResponses()
def set_spider_thread(self, spiderThread):
self.spiderThread = spiderThread
QObject.connect(self, SIGNAL('spiderRunFinished()'), self.handle_spiderRunFinished)
QObject.connect(self, SIGNAL('spiderItemAvailable(int, QString, QUrl, int)'), self.handle_spiderItemAvailable)
self.spider_qtimer = QTimer()
self.spider_qtimer2 = QTimer()
QObject.connect(self.spider_qtimer, SIGNAL('timeout()'), self.handle_spider_load_timeout)
QObject.connect(self.spider_qtimer2, SIGNAL('timeout()'), self.handle_spider_render_timeout)
def setup_spider_window(self):
self.cookieJar = InMemoryCookieJar(self.framework, self)
self.spiderPageController = SpiderPageController(self.framework, self.cookieJar, self.mainWindow, self)
self.spiderConfig = self.framework.getSpiderConfig()
self.crawlerSpiderWebView = self.spiderPageController.add_web_view()
self.crawlerSpiderPlaceholderLayout = self.mainWindow.crawlerSpiderWindowPlaceholder.layout()
if not self.crawlerSpiderPlaceholderLayout:
self.crawlerSpiderPlaceholderLayout = QVBoxLayout(self.mainWindow.crawlerSpiderWindowPlaceholder)
self.crawlerSpiderPlaceholderLayout.addWidget(self.crawlerSpiderWebView)
self.currentSpiderId = None
self.currentHtmlContent = None
self.currentQUrl = None
QObject.connect(self.crawlerSpiderWebView, SIGNAL('loadStarted()'), self.handle_spiderWebView_loadStarted)
QObject.connect(self.crawlerSpiderWebView, SIGNAL('loadFinished(bool)'), self.handle_spiderWebView_loadFinished)
def handle_spiderRunFinished(self):
self.mainWindow.crawlerSpiderStartButton.setEnabled(True)
self.mainWindow.crawlerSpiderStopButton.setEnabled(False)
def handle_spiderItemAvailable(self, spiderId, htmlContent, qurl, depth):
self.currentSpiderId = spiderId
self.currentHtmlContent = htmlContent
self.currentQUrl = qurl
self.currentDepth = depth
self.currentSpiderUrl = qurl.toEncoded().data().decode('utf-8')
self.spiderPageController.reset_state(qurl)
self.load_html_content()
def load_html_content(self):
self.spider_qtimer.start(3000) # 3 seconds to finish
self.crawlerSpiderWebView.setHtml(self.currentHtmlContent, self.currentQUrl)
def handle_spiderWebView_loadStarted(self):
print(('spider web loading started: %s' % (self.spiderPageController.get_phase())))
def handle_spiderWebView_loadFinished(self, ok):
print(('spider web loading finished [%s]: %s' % (ok, self.spiderPageController.get_phase())))
if self.spider_qtimer.isActive():
self.spider_qtimer.stop()
if self.spider_qtimer2.isActive():
self.spider_qtimer2.stop()
if ok:
self.spider_qtimer2.start(1000) # 1 seconds to finish
else:
self.spiderItemCompleted(ok)
def handle_spider_load_timeout(self):
if self.spider_qtimer.isActive():
self.spider_qtimer.stop()
print('forcbily stopping page')
self.crawlerSpiderWebView.stop()
self.spiderItemCompleted(False)
def handle_spider_render_timeout(self):
if self.spider_qtimer2.isActive():
self.spider_qtimer2.stop()
# TODO: should check progress
self.crawlerSpiderWebView.stop()
self.spiderItemCompleted(True)
def spiderItemCompleted(self, ok):
webPage = self.crawlerSpiderWebView.page()
mainFrame = webPage.mainFrame()
if not self.spiderPageController.is_extraction_finished():
self.process_page_events(webPage, mainFrame)
self.process_frame_content(mainFrame)
if self.spiderConfig.evaluate_javascript:
if not self.spiderPageController.is_finished():
self.spiderPageController.advance_phase()
# TODO: should this be signal emitted via one-shot timer ?
self.load_html_content()
else:
self.finish_spider_item()
else:
self.finish_spider_item()
def finish_spider_item(self):
for link, base_url in self.spiderPageController.get_url_links():
self.spiderThread.process_page_url_link(link, base_url, self.currentDepth)
for response_id in self.spiderPageController.get_response_ids():
self.spiderThread.process_page_response_id(response_id, self.currentDepth)
self.crawlerSpiderWebView.cleanup()
self.spiderThread.spiderItemFinished(self.currentSpiderId)
def process_page_events(self, webPage, frame):
try:
webPage.process_page_events(frame)
for child in frame.childFrames():
self.process_page_events(webPage, child)
except Exception as error:
self.framework.report_exception(error)
def process_frame_content(self, frame):
self.extract_frame_content(frame)
for child in frame.childFrames():
self.process_frame_content(child)
def extract_frame_content(self, frame):
parentFrame = frame.parentFrame()
if parentFrame:
referer = parentFrame.url().toEncoded().data().decode('utf-8')
else:
referer = self.currentSpiderUrl
dom = frame.documentElement()
html = dom.toOuterXml()
url = frame.url().toEncoded().data().decode('utf-8')
self.spiderThread.process_page_html_content(html, url, self.currentDepth)
| gpl-3.0 |
cboyce93/epitome-xl | src/library_manager.py | 1 | 74166 | #!/usr/bin/env python
import pygtk
import gtk
import dill
import pango
import os
import subprocess
import time
pygtk.require('2.0')
from os.path import expanduser
from lib.Project import Project
from pipeline_editor import PipelineEditor
from lib.Module import Module
from dialog import Dialog
from util.library import Library
from util.open import Open
from cmd_editor import CmdEditor
import pdb
class LibraryManager():
def exit(self, widget, data=None):
self.window.destroy()
def on_NEW_LIBRARY_activated(self, widget, data=None):
self.new_library_window = gtk.Window()
self.new_library_window.connect("delete_event", self.on_delete_event)
self.new_library_window.set_title('New Library')
self.new_library_window.set_position(gtk.WIN_POS_CENTER)
self.new_library_window.set_size_request(280, 100)
self.new_library_window.set_border_width(10)
self.new_library_window.set_resizable(False)
self.new_library_window_vbox = gtk.VBox(False, 0)
self.nl_hbox = gtk.HBox(False, 0)
self.label = gtk.Label("Library Name:")
self.label.set_alignment(0, 0.5)
self.nl_hbox.pack_start(self.label, False, False, 10)
self.name = gtk.Entry()
self.nl_hbox.pack_start(self.name, True, True, 0)
self.new_library_window_vbox.pack_start(self.nl_hbox, False, False, 10)
# Create HBox wrapper for lower content
self.new_library_window_hbox = gtk.HBox(True, 3)
self.cancel = gtk.Button("Cancel")
self.cancel.connect(
"clicked", self.on_cancel, self.new_library_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.create = gtk.Button("Create")
self.create.connect("clicked", self.on_create_library_clicked)
# pack buttons
self.new_library_window_hbox.add(self.cancel)
self.new_library_window_hbox.add(self.create)
self.new_library_window_vbox.pack_start(
self.new_library_window_hbox, False, False, 2)
self.new_library_window.add(self.new_library_window_vbox)
self.new_library_window.show_all()
def on_delete_event(self, widget, data=None):
widget.destroy()
def on_cancel(self, widget, window):
window.destroy()
def on_new_library_window_cancel_clicked(self, widget, data=None):
self.save_library()
self.new_library_window.destroy()
def on_create_library_clicked(self, widget, data=None):
library = self.name.get_text()
if self.library_liststore.iter_n_children(None) == 0:
next_index = 0
else:
next_index = int(self.library_liststore.get_value(
self.library_liststore.iter_nth_child(None,
self.library_liststore.iter_n_children(None) - 1), 0)) + 1
self.library_liststore.append([next_index, library])
ls = Library("library")
ls.library_entry(next_index, library)
ls.close()
self.new_library_window.destroy()
def on_EDIT_LIBRARY_activated(self, widget, data=None):
model, iterr = self.library_treeview.get_selection().get_selected()
iterr = self.sorted_library_liststore.convert_iter_to_child_iter(None, iterr)
name = self.library_liststore.get_value(iterr, 1)
self.new_library_window = gtk.Window()
self.new_library_window.connect("delete_event", self.on_delete_event)
self.new_library_window.set_title('Edit ' + name)
self.new_library_window.set_position(gtk.WIN_POS_CENTER)
self.new_library_window.set_size_request(280, 100)
self.new_library_window.set_border_width(10)
self.new_library_window.set_resizable(False)
self.new_library_window_vbox = gtk.VBox(False, 0)
self.nl_hbox = gtk.HBox(False, 0)
self.label = gtk.Label("Library Name:")
self.label.set_alignment(0, 0.5)
self.nl_hbox.pack_start(self.label, False, False, 10)
self.name = gtk.Entry()
self.name.set_text(name)
self.nl_hbox.pack_start(self.name, True, True, 0)
self.new_library_window_vbox.pack_start(self.nl_hbox, False, False, 10)
# Create HBox wrapper for lower content
self.new_library_window_hbox = gtk.HBox(True, 3)
self.cancel = gtk.Button("Cancel")
self.cancel.connect(
"clicked", self.on_cancel, self.new_library_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.save = gtk.Button("Save")
self.save.connect("clicked", self.on_save_library_clicked)
# pack buttons
self.new_library_window_hbox.add(self.cancel)
self.new_library_window_hbox.add(self.save)
self.new_library_window_vbox.pack_start(
self.new_library_window_hbox, False, False, 2)
self.new_library_window.add(self.new_library_window_vbox)
self.new_library_window.show_all()
def on_save_library_clicked(self, widget, data=None):
m, i = self.library_selection.get_selected()
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
ls = Library("library")
name = self.name.get_text()
ls.update_library_entry(self.library_liststore.get_value(i, 0), name)
ls.close()
self.library_liststore.set(i, 1, name)
self.new_library_window.destroy()
md = Dialog()
md.on_info(name + " updated successfully.")
def on_DELETE_LIBRARY_activated(self, widget, data=None):
m, i = self.library_selection.get_selected()
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
md = Dialog()
name = self.library_liststore.get_value(i, 1)
response = md.on_confirmation("Delete " + name +
" and all associated modules?", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
response = md.on_confirmation("Are you sure? Deleted modules cannot be recovered.", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
self.category_iters = []
self.function_iters = []
self.module_iters= []
self.cat = Library("category")
self.sorted_cat_liststore.foreach(self.remove_category, None)
self.cat.close()
self.lib = Library("library")
self.lib.delete_library_entry(self.library_liststore.get_value(i, 0))
self.lib.close()
self.library_liststore.remove(i)
for iterr in self.category_iters:
self.category_liststore.remove(iterr)
for iterr in self.function_iters:
self.function_liststore.remove(iterr)
for iterr in self.module_iters:
self.module_liststore.remove(iterr)
self.update_module_textview(None, None)
md = Dialog()
md.on_info(name + " deleted successfully.")
def remove_category(self, model, path, iterr, data):
self.category_selection.select_iter(iterr)
iterr = model.convert_iter_to_child_iter(None, iterr)
model1 = model.get_model()
iterr = model1.convert_iter_to_child_iter(iterr)
model2 = model1.get_model()
self.func = Library("function")
self.sorted_function_liststore.foreach(self.remove_function, None)
self.func.close()
self.category_iters.append(iterr)
self.cat.delete_category_entry(model2.get_value(iterr, 0))
def remove_function(self, model, path, iterr, data):
self.function_selection.select_iter(iterr)
iterr = model.convert_iter_to_child_iter(None, iterr)
model1 = model.get_model()
iterr = model1.convert_iter_to_child_iter(iterr)
model2 = model1.get_model()
self.sorted_module_liststore.foreach(self.remove_module, None)
self.function_iters.append(iterr)
self.func.delete_function_entry(model2.get_value(iterr, 0))
def remove_module(self, model, path, iterr, data):
iterr = model.convert_iter_to_child_iter(None, iterr)
model1 = model.get_model()
iterr = model1.convert_iter_to_child_iter(iterr)
model2 = model1.get_model()
filepath = model2.get_value(iterr, 4)
self.module_iters.append(iterr)
subprocess.call(['rm', filepath])
def on_NEW_CATEGORY_activated(self, widget, data=None):
if self.library_liststore.iter_n_children(None) > 0:
self.add_category_window = gtk.Window()
self.add_category_window.connect(
"delete_event", self.on_delete_event)
self.add_category_window.set_title('New Category')
self.add_category_window.set_size_request(320, 132)
self.add_category_window.set_border_width(10)
self.add_category_window.set_resizable(False)
self.add_category_window_vbox = gtk.VBox(False, 0)
self.ac_hbox = gtk.HBox(False, 0)
self.label = gtk.Label(" Library Name: ")
self.label.set_alignment(0, 0.5)
self.ac_hbox.pack_start(self.label, False, False, 10)
self.library_combobox = gtk.ComboBox(self.library_liststore)
self.library_combobox.connect(
"changed", self.on_library_combobox_changed)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
self.library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
cell = gtk.CellRendererText()
self.library_combobox.pack_start(cell, True)
self.library_combobox.add_attribute(cell, 'text', 1)
self.ac_hbox.pack_start(self.library_combobox, True, True, 0)
self.add_category_window_vbox.pack_start(
self.ac_hbox, False, False, 4)
self.ac_hbox2 = gtk.HBox(False, 0)
self.label = gtk.Label("Category Name: ")
self.label.set_alignment(0, 0.5)
self.ac_hbox2.pack_start(self.label, False, False, 10)
self.name = gtk.Entry()
self.ac_hbox2.pack_start(self.name, True, True, 0)
self.add_category_window_vbox.pack_start(
self.ac_hbox2, False, False, 4)
# Create HBox wrapper for lower content
self.add_category_window_hbox = gtk.HBox(True, 3)
self.cancel = gtk.Button("Close")
self.cancel.connect(
"clicked", self.on_cancel, self.add_category_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.add = gtk.Button("Add Category")
self.add.connect("clicked", self.on_add_category_clicked)
# pack buttons
self.add_category_window_hbox.add(self.cancel)
self.add_category_window_hbox.add(self.add)
self.add_category_window_vbox.pack_start(
self.add_category_window_hbox, False, False, 6)
self.add_category_window.add(self.add_category_window_vbox)
self.add_category_window.show_all()
else:
md = Dialog()
md.on_warn(
"Please create at least one library prior to adding categories.")
def on_add_category_clicked(self, widget, data=None):
library = self.library_liststore.get_value(
self.library_combobox.get_active_iter(), 1)
category = self.name.get_text()
if self.category_liststore.iter_n_children(None) == 0:
next_index = 0
else:
next_index = int(self.category_liststore.get_value(
self.category_liststore.iter_nth_child(None,
self.category_liststore.iter_n_children(None) - 1), 0)) + 1
self.category_liststore.append([next_index, library, category])
cs = Library("category")
cs.category_entry(next_index, library, category)
cs.close()
self.add_category_window.destroy()
def on_add_category_window_cancel_clicked(self, widget, data=None):
self.add_category_window.destroy()
def on_library_combobox_changed(self, combobox, data=None):
i = self.sorted_library_liststore.convert_child_iter_to_iter(None, combobox.get_active_iter())
if i is not None:
self.library_selection.select_iter(i)
def on_EDIT_CATEGORY_activated(self, widget, data=None):
if self.library_liststore.iter_n_children(None) > 0:
model, iterr = self.category_treeview.get_selection().get_selected()
self.category_selection.select_iter(iterr)
iterr = self.sorted_cat_liststore.convert_iter_to_child_iter(None, iterr)
iterr = self.catfilter.convert_iter_to_child_iter(iterr)
name = self.category_liststore.get_value(iterr, 2)
self.add_category_window = gtk.Window()
self.add_category_window.connect(
"delete_event", self.on_delete_event)
self.add_category_window.set_title('Edit ' + name)
self.add_category_window.set_size_request(320, 132)
self.add_category_window.set_border_width(10)
self.add_category_window.set_resizable(False)
self.add_category_window_vbox = gtk.VBox(False, 0)
self.ac_hbox = gtk.HBox(False, 0)
self.label = gtk.Label(" Library Name: ")
self.label.set_alignment(0, 0.5)
self.ac_hbox.pack_start(self.label, False, False, 10)
self.library_combobox = gtk.ComboBox(self.library_liststore)
self.library_combobox.connect(
"changed", self.on_library_combobox_changed)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
self.library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
cell = gtk.CellRendererText()
self.library_combobox.pack_start(cell, True)
self.library_combobox.add_attribute(cell, 'text', 1)
self.ac_hbox.pack_start(self.library_combobox, True, True, 0)
self.add_category_window_vbox.pack_start(
self.ac_hbox, False, False, 4)
self.ac_hbox2 = gtk.HBox(False, 0)
self.label = gtk.Label("Category Name: ")
self.label.set_alignment(0, 0.5)
self.ac_hbox2.pack_start(self.label, False, False, 10)
self.name = gtk.Entry()
self.name.set_text(name)
self.ac_hbox2.pack_start(self.name, True, True, 0)
self.add_category_window_vbox.pack_start(
self.ac_hbox2, False, False, 4)
# Create HBox wrapper for lower content
self.add_category_window_hbox = gtk.HBox(True, 3)
self.cancel = gtk.Button("Close")
self.cancel.connect(
"clicked", self.on_cancel, self.add_category_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.save = gtk.Button("Save")
self.save.connect("clicked", self.on_save_category_clicked)
# pack buttons
self.add_category_window_hbox.add(self.cancel)
self.add_category_window_hbox.add(self.save)
self.add_category_window_vbox.pack_start(
self.add_category_window_hbox, False, False, 6)
self.add_category_window.add(self.add_category_window_vbox)
self.add_category_window.show_all()
else:
md = Dialog()
md.on_warn(
"Please create at least one library prior to adding categories.")
def on_save_category_clicked(self, widget, date=None):
m, i = self.category_selection.get_selected()
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
i = self.catfilter.convert_iter_to_child_iter(i)
cs = Library("category")
name = self.name.get_text()
cs.update_category_entry(self.category_liststore.get_value(i, 0),
self.category_liststore.get_value(i, 1),
name)
cs.close()
self.category_liststore.set(i, 2, name)
self.add_category_window.destroy()
md = Dialog()
md.on_info(name + " updated successfully.")
def on_DELETE_CATEGORY_activated(self, widget, data=None):
m, i = self.category_selection.get_selected()
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
i = self.catfilter.convert_iter_to_child_iter(i)
md = Dialog()
name = self.category_liststore.get_value(i, 2)
response = md.on_confirmation("Delete " + name +
" and all associated modules?", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
response = md.on_confirmation("Are you sure? Deleted modules cannot be recovered.", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
self.function_iters = []
self.module_iters= []
self.func = Library("function")
self.sorted_function_liststore.foreach(self.remove_function, None)
self.func.close()
self.cat = Library("category")
self.cat.delete_category_entry(self.category_liststore.get_value(i, 0))
self.cat.close()
self.category_liststore.remove(i)
for iterr in self.function_iters:
self.function_liststore.remove(iterr)
for iterr in self.module_iters:
self.module_liststore.remove(iterr)
self.update_module_textview(None, None)
md = Dialog()
md.on_info(name + " deleted successfully.")
def on_NEW_FUNCTION_activated(self, widget, data=None):
if self.library_liststore.iter_n_children(None) > 0 and self.category_liststore.iter_n_children(None) > 0:
self.nf_window = gtk.Window()
self.nf_window.connect("delete_event", self.on_delete_event)
self.nf_window.set_title('New Function')
self.nf_window.set_size_request(320, 170)
self.nf_window.set_position(gtk.WIN_POS_CENTER)
self.nf_window.set_border_width(10)
self.nf_window.set_resizable(False)
self.nf_window_vbox = gtk.VBox(False, 0)
self.nf_hbox = gtk.HBox(False, 0)
self.nf_label = gtk.Label(" Select Library: ")
self.nf_label.set_alignment(0, 0.5)
self.nf_hbox.pack_start(self.nf_label, False, False, 10)
self.nf_library_combobox = gtk.ComboBox(self.library_liststore)
self.nf_library_combobox.connect(
"changed", self.on_library_combobox_changed)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
self.nf_library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
self.nf_cell = gtk.CellRendererText()
self.nf_library_combobox.pack_start(self.nf_cell, True)
self.nf_library_combobox.add_attribute(self.nf_cell, 'text', 1)
self.nf_hbox.pack_start(self.nf_library_combobox, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox, False, False, 4)
self.nf_hbox2 = gtk.HBox(False, 0)
self.nf_label = gtk.Label("Select Category: ")
self.nf_label.set_alignment(0, 0.5)
self.nf_hbox2.pack_start(self.nf_label, False, False, 10)
self.nf_category_combobox = gtk.ComboBox(self.catfilter)
self.nf_category_combobox.connect(
"changed", self.on_category_combobox_changed)
m, i = self.category_selection.get_selected()
if i is not None:
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
self.nf_category_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a category.")
return None
cell2 = gtk.CellRendererText()
self.nf_category_combobox.pack_start(cell2, True)
self.nf_category_combobox.add_attribute(cell2, 'text', 2)
self.nf_hbox2.pack_start(self.nf_category_combobox, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox2, False, False, 4)
self.nf_hbox3 = gtk.HBox(False, 0)
self.label3 = gtk.Label(" Function Name: ")
self.label3.set_alignment(0, 0.5)
self.nf_hbox3.pack_start(self.label3, False, False, 10)
self.function_name = gtk.Entry()
self.nf_hbox3.pack_start(self.function_name, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox3, False, False, 4)
# Create HBox wrapper for lower content
self.nf_lower_hbox = gtk.HBox(True, 3)
self.nf_cancel = gtk.Button("Close")
self.nf_cancel.connect(
"clicked", self.on_cancel, self.nf_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.add = gtk.Button("Add Function")
self.add.connect("clicked", self.on_new_function_clicked)
# pack buttons
self.nf_lower_hbox.add(self.nf_cancel)
self.nf_lower_hbox.add(self.add)
self.nf_window_vbox.pack_start(self.nf_lower_hbox, False, False, 6)
self.nf_window.add(self.nf_window_vbox)
self.nf_window.show_all()
else:
md = Dialog()
md.on_warn(
"Please create at least one library category prior to adding function subcategories.")
def on_category_combobox_changed(self, combobox, data=None):
i = self.sorted_cat_liststore.convert_child_iter_to_iter(None, combobox.get_active_iter())
self.category_selection.select_iter(i)
def on_new_function_clicked(self, widget, data=None):
library = self.library_liststore.get_value(self.nf_library_combobox.get_active_iter(), 1)
category = self.catfilter.get_value(
self.nf_category_combobox.get_active_iter(), 2)
function = self.function_name.get_text()
if self.function_liststore.iter_n_children(None) == 0:
next_index = 0
else:
next_index = int(self.function_liststore.get_value(
self.function_liststore.iter_nth_child(None,
self.function_liststore.iter_n_children(None) - 1), 0)) + 1
self.function_liststore.append([next_index, library, category, function])
fs = Library("function")
fs.function_entry(next_index, library, category, function)
fs.close()
self.nf_window.destroy()
def on_new_function_window_cancel_clicked(self, widget, data=None):
self.nf_window.destroy()
def refilter_library(self, model, data=None):
self.catfilter.refilter()
if self.catfilter.iter_n_children(None) > 0:
root_iter = self.sorted_cat_liststore.convert_child_iter_to_iter(None, self.catfilter.get_iter_first())
if root_iter is not None:
self.category_selection.select_iter(root_iter)
def refilter_category(self, model, data=None):
self.functionfilter.refilter()
if self.functionfilter.iter_n_children(None) > 0:
root_iter = self.sorted_function_liststore.convert_child_iter_to_iter(None, self.functionfilter.get_iter_first())
if root_iter is not None:
self.function_selection.select_iter(root_iter)
def refilter_function(self, model, data=None):
self.modulefilter.refilter()
if self.modulefilter.iter_n_children(None) > 0:
root_iter = self.sorted_module_liststore.convert_child_iter_to_iter(None, self.modulefilter.get_iter_first())
if root_iter is not None:
self.module_selection.select_iter(root_iter)
def match_func(self, model, iterr, data=None):
return True
def on_EDIT_FUNCTION_activated(self, widget, data=None):
if self.library_liststore.iter_n_children(None) > 0 and self.category_liststore.iter_n_children(None) > 0:
model, iterr = self.function_treeview.get_selection().get_selected()
self.function_selection.select_iter(iterr)
iterr = self.sorted_function_liststore.convert_iter_to_child_iter(None, iterr)
iterr = self.functionfilter.convert_iter_to_child_iter(iterr)
name = self.function_liststore.get_value(iterr, 3)
self.nf_window = gtk.Window()
self.nf_window.connect("delete_event", self.on_delete_event)
self.nf_window.set_title('Edit ' + name)
self.nf_window.set_size_request(320, 170)
self.nf_window.set_position(gtk.WIN_POS_CENTER)
self.nf_window.set_border_width(10)
self.nf_window.set_resizable(False)
self.nf_window_vbox = gtk.VBox(False, 0)
self.nf_hbox = gtk.HBox(False, 0)
self.nf_label = gtk.Label(" Select Library: ")
self.nf_label.set_alignment(0, 0.5)
self.nf_hbox.pack_start(self.nf_label, False, False, 10)
self.nf_library_combobox = gtk.ComboBox(self.library_liststore)
self.nf_library_combobox.connect(
"changed", self.on_library_combobox_changed)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
self.nf_library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
self.nf_cell = gtk.CellRendererText()
self.nf_library_combobox.pack_start(self.nf_cell, True)
self.nf_library_combobox.add_attribute(self.nf_cell, 'text', 1)
self.nf_hbox.pack_start(self.nf_library_combobox, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox, False, False, 4)
self.nf_hbox2 = gtk.HBox(False, 0)
self.nf_label = gtk.Label("Select Category: ")
self.nf_label.set_alignment(0, 0.5)
self.nf_hbox2.pack_start(self.nf_label, False, False, 10)
self.nf_category_combobox = gtk.ComboBox(self.catfilter)
self.nf_category_combobox.connect(
"changed", self.on_category_combobox_changed)
m, i = self.category_selection.get_selected()
if i is not None:
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
self.nf_category_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a category.")
return None
cell2 = gtk.CellRendererText()
self.nf_category_combobox.pack_start(cell2, True)
self.nf_category_combobox.add_attribute(cell2, 'text', 2)
self.nf_hbox2.pack_start(self.nf_category_combobox, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox2, False, False, 4)
self.nf_hbox3 = gtk.HBox(False, 0)
self.label3 = gtk.Label(" Function Name: ")
self.label3.set_alignment(0, 0.5)
self.nf_hbox3.pack_start(self.label3, False, False, 10)
self.function_name = gtk.Entry()
self.function_name.set_text(name)
self.nf_hbox3.pack_start(self.function_name, True, True, 0)
self.nf_window_vbox.pack_start(self.nf_hbox3, False, False, 4)
# Create HBox wrapper for lower content
self.nf_lower_hbox = gtk.HBox(True, 3)
self.nf_cancel = gtk.Button("Close")
self.nf_cancel.connect(
"clicked", self.on_cancel, self.nf_window)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.save = gtk.Button("Save")
self.save.connect("clicked", self.on_save_function_clicked)
# pack buttons
self.nf_lower_hbox.add(self.nf_cancel)
self.nf_lower_hbox.add(self.save)
self.nf_window_vbox.pack_start(self.nf_lower_hbox, False, False, 6)
self.nf_window.add(self.nf_window_vbox)
self.nf_window.show_all()
else:
md = Dialog()
md.on_warn(
"Please create at least one library category prior to adding function subcategories.")
def on_save_function_clicked(self, widget, date=None):
m, i = self.function_selection.get_selected()
i = self.sorted_function_liststore.convert_iter_to_child_iter(None, i)
i = self.functionfilter.convert_iter_to_child_iter(i)
fs = Library("function")
name = self.function_name.get_text()
fs.update_function_entry(self.function_liststore.get_value(i, 0),
self.function_liststore.get_value(i, 1),
self.function_liststore.get_value(i, 2),
name)
fs.close()
self.function_liststore.set(i, 3, name)
self.nf_window.destroy()
md = Dialog()
md.on_info(name + " updated successfully.")
def on_DELETE_FUNCTION_activated(self, widget, data=None):
m, i = self.function_selection.get_selected()
i = self.sorted_function_liststore.convert_iter_to_child_iter(None, i)
i = self.functionfilter.convert_iter_to_child_iter(i)
md = Dialog()
name = self.function_liststore.get_value(i, 3)
response = md.on_confirmation("Delete " + name +
" and all associated modules?", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
response = md.on_confirmation("Are you sure? Deleted modules cannot be recovered.", "Delete " + name)
if response == gtk.RESPONSE_ACCEPT:
self.module_iters= []
self.sorted_module_liststore.foreach(self.remove_module, None)
fs = Library("function")
fs.delete_function_entry(self.function_liststore.get_value(i, 0))
fs.close()
self.function_liststore.remove(i)
for iterr in self.module_iters:
self.module_liststore.remove(iterr)
self.update_module_textview(None, None)
md = Dialog()
md.on_info(name + " deleted successfully.")
def on_NEW_MODULE_activated(self, widget, data=None):
self.create_module_window = gtk.Window()
self.create_module_window.connect("delete_event", self.on_delete_event)
self.create_module_window.set_title('New Module')
#self.create_module_window.set_size_request(440, 594)
self.create_module_window.set_border_width(16)
self.create_module_window.set_position(gtk.WIN_POS_CENTER)
self.create_module_window.set_resizable(False)
self.create_module_window.set_icon_from_file("img/epitome_icon.png")
hbox_wrapper = gtk.HBox(False, 30)
self.create_module_window_vbox = gtk.VBox(False, 0)
vbox_right = gtk.VBox(False, 0)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Library: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
library_combobox = gtk.ComboBox(self.library_liststore)
library_combobox.connect("changed", self.on_library_combobox_changed)
cell = gtk.CellRendererText()
library_combobox.pack_start(cell, True)
library_combobox.add_attribute(cell, 'text', 1)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
hbox.pack_start(library_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Category: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
category_combobox = gtk.ComboBox(self.catfilter)
cell = gtk.CellRendererText()
category_combobox.pack_start(cell, True)
category_combobox.add_attribute(cell, 'text', 2)
m, i = self.category_selection.get_selected()
if i is not None:
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
category_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a category.")
return None
hbox.pack_start(category_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Function: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
function_combobox = gtk.ComboBox(self.functionfilter)
cell = gtk.CellRendererText()
function_combobox.pack_start(cell, True)
function_combobox.add_attribute(cell, 'text', 3)
m, i = self.function_selection.get_selected()
if i is not None:
i = self.sorted_function_liststore.convert_iter_to_child_iter(None, i)
function_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a function.")
return None
hbox.pack_start(function_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
self.cm_hbox3 = gtk.HBox(False, 0)
label = gtk.Label("Module Name: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox3.pack_start(label, False, False, 10)
self.module_name_entry = gtk.Entry()
self.module_name_entry.set_max_length(30)
self.cm_hbox3.pack_start(self.module_name_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox3, False, False, 4)
self.cm_hbox4 = gtk.HBox(False, 0)
label = gtk.Label("Suffix: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox4.pack_start(label, False, False, 10)
self.suffix_entry = gtk.Entry()
self.suffix_entry.set_max_length(1)
self.cm_hbox4.pack_start(self.suffix_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox4, False, False, 4)
self.cm_hbox5 = gtk.HBox(False, 0)
label = gtk.Label("Author: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox5.pack_start(label, False, False, 10)
self.author_entry = gtk.Entry()
self.cm_hbox5.pack_start(self.author_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox5, False, False, 4)
self.cm_hbox7 = gtk.HBox(False, 0)
label = gtk.Label("Documentation URL: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox7.pack_start(label, False, False, 10)
self.doc_url_entry = gtk.Entry()
self.cm_hbox7.pack_start(self.doc_url_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox7, False, False, 4)
hbox = gtk.HBox(False, 10)
label = gtk.Label("Description: ")
label.set_alignment(0, 0.5)
label.set_size_request(200, 20)
hbox.pack_start(label, False, False, 0)
vbox_right.pack_start(hbox, False, False, 4)
self.cm_hbox6 = gtk.HBox(False, 0)
self.swH = gtk.ScrolledWindow()
self.swH.set_size_request(400, 200)
self.swH.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.desc_entry = gtk.TextView()
self.desc_entry.set_editable(True)
self.desc_entry.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.swH.add(self.desc_entry)
self.cm_hbox6.pack_start(self.swH, True, True, 0)
vbox_right.pack_start(self.cm_hbox6, True, True, 4)
self.space = gtk.HBox(False, 4)
self.create_module_window_vbox.pack_start(self.space, False, False, 8)
self.cm_hbox9 = gtk.HBox(False, 10)
label = gtk.Label("Command: ")
label.set_alignment(0, 0.5)
label.set_size_request(200, 20)
self.cm_hbox9.pack_start(label, False, False, 0)
self.create_module_window_vbox.pack_start(self.cm_hbox9, False, False, 4)
self.cm_hbox10 = gtk.HBox(False, 0)
self.swH2 = gtk.ScrolledWindow()
self.swH2.set_size_request(400, 100)
self.swH2.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH2.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.tBuffer = gtk.TextBuffer(None)
self.cmd_entry = gtk.TextView(self.tBuffer)
self.cmd_entry.set_editable(False)
self.cmd_entry.set_cursor_visible(False)
self.cmd_entry.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.cmd_entry.modify_font(pango.FontDescription("Monospace 10"))
self.cmd_entry.modify_bg(
self.cmd_entry.get_state(), gtk.gdk.color_parse("#444"))
self.swH2.add(self.cmd_entry)
self.cm_hbox10.pack_start(self.swH2, True, True, 0)
self.create_module_window_vbox.pack_start(self.cm_hbox10, False, False, 4)
hbox = gtk.HBox(False, 3)
button = gtk.Button("Edit Command ...")
button.connect("clicked", self.on_edit_command_clicked)
button.set_alignment(0.5, 0.5)
button.set_size_request(200, 30)
hbox.pack_end(button, False, False, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
# Create HBox wrapper for lower content
self.create_module_window_hbox = gtk.HBox(True, 3)
self.create_module_window_hbox.set_size_request(400, 30)
self.cancel = gtk.Button("Cancel")
self.cancel.set_size_request(200, 30)
self.cancel.connect(
"clicked", self.on_create_module_window_cancel_clicked)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.add = gtk.Button("Create Module")
self.add.set_size_request(200, 30)
self.add.connect("clicked", self.on_create_module_clicked,
(library_combobox, category_combobox, function_combobox))
# pack buttons
self.create_module_window_hbox.add(self.cancel)
self.create_module_window_hbox.add(self.add)
vbox_right.pack_start(self.create_module_window_hbox, False, False, 4)
hbox_wrapper.pack_start(
self.create_module_window_vbox, False, False, 0)
hbox_wrapper.pack_start(vbox_right, False, False, 0)
self.create_module_window.add(hbox_wrapper)
self.create_module_window.show_all()
def on_edit_command_clicked(self, widget):
start, end = self.tBuffer.get_bounds()
ce = CmdEditor(self.tBuffer.get_text(start, end, False))
self.create_module_window.connect("set-focus", self.set_cmd, ce)
def set_cmd(self, window, widget, ce):
self.tBuffer.set_text(ce.cmd)
def on_create_module_window_cancel_clicked(self, widget, data=None):
self.create_module_window.destroy()
def on_create_module_clicked(self, widget, combobox):
error = 0
msg = "Module creation failed. Please enter the following:"
if combobox[0].get_active_iter() is None:
error = 1
msg += "\n- Library"
if combobox[1].get_active_iter() is None:
error = 1
msg += "\n- Category"
if combobox[2].get_active_iter() is None:
error = 1
msg += "\n- Function"
if self.module_name_entry.get_text_length() == 0:
error = 1
msg += "\n- Module Name"
if self.suffix_entry.get_text_length() == 0:
error = 1
msg += "\n- Suffix"
if self.author_entry.get_text_length() == 0:
error = 1
msg += "\n- Author"
if self.desc_entry.get_buffer().get_char_count() == 0:
error = 1
msg += "\n- Description"
if self.cmd_entry.get_buffer().get_char_count() == 0:
error = 1
msg += "\n- Command"
if error == 1:
md = Dialog()
md.on_warn(msg)
else:
descbuff = self.desc_entry.get_buffer()
desc_start, desc_end = descbuff.get_bounds()
cmdbuff = self.cmd_entry.get_buffer()
cmd_start, cmd_end = cmdbuff.get_bounds()
name = self.module_name_entry.get_text()
lib = self.library_liststore.get_value(combobox[0].get_active_iter(), 1)
cat = self.catfilter.get_value(combobox[1].get_active_iter(), 2)
func = self.functionfilter.get_value(combobox[2].get_active_iter(), 3)
url = self.doc_url_entry.get_text()
module = Module(lib,
cat,
func,
name,
self.suffix_entry.get_text(),
self.author_entry.get_text(),
url,
descbuff.get_text(desc_start, desc_end),
cmdbuff.get_text(cmd_start, cmd_end))
iterr = self.module_liststore.append([lib, cat, func, name, module.filename])
iterr = self.modulefilter.convert_child_iter_to_iter(iterr)
iterr = self.sorted_module_liststore.convert_child_iter_to_iter(None, iterr)
self.module_selection.select_iter(iterr)
md = Dialog()
self.create_module_window.destroy()
md.on_info("Module created successfully.")
module.save()
self.update_module_textview(None, None)
def on_EDIT_MODULE_activated(self, widget, data=None):
op = Open()
#pdb.set_trace()
model, iterr = self.module_treeview.get_selection().get_selected()
iterr = self.sorted_module_liststore.convert_iter_to_child_iter(None, iterr)
module_edit = op.open_module(self.modulefilter.get_value(iterr, 4))
self.create_module_window = gtk.Window()
self.create_module_window.connect("delete_event", self.on_delete_event)
self.create_module_window.set_title('Edit ' + module_edit.name)
#self.create_module_window.set_size_request(440, 594)
self.create_module_window.set_border_width(16)
self.create_module_window.set_position(gtk.WIN_POS_CENTER)
self.create_module_window.set_resizable(False)
self.create_module_window.set_icon_from_file("img/epitome_icon.png")
hbox_wrapper = gtk.HBox(False, 30)
self.create_module_window_vbox = gtk.VBox(False, 0)
vbox_right = gtk.VBox(False, 0)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Library: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
library_combobox = gtk.ComboBox(self.library_liststore)
library_combobox.connect("changed", self.on_library_combobox_changed)
cell = gtk.CellRendererText()
library_combobox.pack_start(cell, True)
library_combobox.add_attribute(cell, 'text', 1)
m, i = self.library_selection.get_selected()
if i is not None:
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
library_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a library.")
return None
hbox.pack_start(library_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Category: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
category_combobox = gtk.ComboBox(self.catfilter)
cell = gtk.CellRendererText()
category_combobox.pack_start(cell, True)
category_combobox.add_attribute(cell, 'text', 2)
m, i = self.category_selection.get_selected()
if i is not None:
i = self.sorted_cat_liststore.convert_iter_to_child_iter(None, i)
category_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a category.")
return None
hbox.pack_start(category_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
hbox = gtk.HBox(False, 0)
label = gtk.Label("Select Function: ")
label.set_alignment(1, 0.5)
label.set_size_request(200, 30)
hbox.pack_start(label, False, False, 10)
function_combobox = gtk.ComboBox(self.functionfilter)
cell = gtk.CellRendererText()
function_combobox.pack_start(cell, True)
function_combobox.add_attribute(cell, 'text', 3)
m, i = self.function_selection.get_selected()
if i is not None:
i = self.sorted_function_liststore.convert_iter_to_child_iter(None, i)
function_combobox.set_active_iter(i)
else:
md = Dialog()
md.on_warn("Please select a function.")
return None
hbox.pack_start(function_combobox, True, True, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
self.cm_hbox3 = gtk.HBox(False, 0)
label = gtk.Label("Module Name: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox3.pack_start(label, False, False, 10)
self.module_name_entry = gtk.Entry()
self.module_name_entry.set_text(module_edit.name)
self.module_name_entry.set_max_length(30)
self.cm_hbox3.pack_start(self.module_name_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox3, False, False, 4)
self.cm_hbox4 = gtk.HBox(False, 0)
label = gtk.Label("Suffix: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox4.pack_start(label, False, False, 10)
self.suffix_entry = gtk.Entry()
self.suffix_entry.set_text(module_edit.suffix)
self.suffix_entry.set_max_length(1)
self.cm_hbox4.pack_start(self.suffix_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox4, False, False, 4)
self.cm_hbox5 = gtk.HBox(False, 0)
label = gtk.Label("Author: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox5.pack_start(label, False, False, 10)
self.author_entry = gtk.Entry()
self.author_entry.set_text(module_edit.author)
self.cm_hbox5.pack_start(self.author_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox5, False, False, 4)
self.cm_hbox7 = gtk.HBox(False, 0)
label = gtk.Label("Documentation URL: ")
label.set_alignment(1, 0.5)
label.set_size_request(130, 30)
self.cm_hbox7.pack_start(label, False, False, 10)
self.doc_url_entry = gtk.Entry()
self.doc_url_entry.set_text(module_edit.url)
self.cm_hbox7.pack_start(self.doc_url_entry, True, True, 0)
self.create_module_window_vbox.pack_start(
self.cm_hbox7, False, False, 4)
hbox = gtk.HBox(False, 10)
label = gtk.Label("Description: ")
label.set_alignment(0, 0.5)
label.set_size_request(200, 20)
hbox.pack_start(label, False, False, 0)
vbox_right.pack_start(hbox, False, False, 4)
self.cm_hbox6 = gtk.HBox(False, 0)
self.swH = gtk.ScrolledWindow()
self.swH.set_size_request(400, 200)
self.swH.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.desc_entry = gtk.TextView()
bufferr = gtk.TextBuffer(None)
bufferr.set_text(module_edit.desc)
self.desc_entry.set_buffer(bufferr)
self.desc_entry.set_editable(True)
self.desc_entry.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.swH.add(self.desc_entry)
self.cm_hbox6.pack_start(self.swH, True, True, 0)
vbox_right.pack_start(self.cm_hbox6, True, True, 4)
self.space = gtk.HBox(False, 4)
self.create_module_window_vbox.pack_start(self.space, False, False, 8)
self.cm_hbox9 = gtk.HBox(False, 10)
label = gtk.Label("Command: ")
label.set_alignment(0, 0.5)
label.set_size_request(200, 20)
self.cm_hbox9.pack_start(label, False, False, 0)
self.create_module_window_vbox.pack_start(self.cm_hbox9, False, False, 4)
self.cm_hbox10 = gtk.HBox(False, 0)
self.swH2 = gtk.ScrolledWindow()
self.swH2.set_size_request(400, 100)
self.swH2.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH2.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.tBuffer = gtk.TextBuffer(None)
self.tBuffer.set_text(module_edit.cmd)
self.cmd_entry = gtk.TextView(self.tBuffer)
self.cmd_entry.set_editable(False)
self.cmd_entry.set_cursor_visible(False)
self.cmd_entry.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.cmd_entry.modify_font(pango.FontDescription("Monospace 10"))
self.cmd_entry.modify_bg(
self.cmd_entry.get_state(), gtk.gdk.color_parse("#444"))
self.swH2.add(self.cmd_entry)
self.cm_hbox10.pack_start(self.swH2, True, True, 0)
self.create_module_window_vbox.pack_start(self.cm_hbox10, False, False, 4)
hbox = gtk.HBox(False, 3)
button = gtk.Button("Edit Command ...")
button.connect("clicked", self.on_edit_command_clicked)
button.set_alignment(0.5, 0.5)
button.set_size_request(200, 30)
hbox.pack_end(button, False, False, 0)
self.create_module_window_vbox.pack_start(hbox, False, False, 4)
# Create HBox wrapper for lower content
self.create_module_window_hbox = gtk.HBox(True, 3)
self.create_module_window_hbox.set_size_request(400, 30)
self.cancel = gtk.Button("Cancel")
self.cancel.set_size_request(200, 30)
self.cancel.connect(
"clicked", self.on_create_module_window_cancel_clicked)
#close_without_saving.connect("clicked", self.save_and_close_unsaved_changes_window)
self.save = gtk.Button("Save Module")
self.save.set_size_request(200, 30)
self.save.connect("clicked", self.on_save_module_clicked,
(library_combobox, category_combobox, function_combobox, module_edit, iterr))
# pack buttons
self.create_module_window_hbox.add(self.cancel)
self.create_module_window_hbox.add(self.save)
vbox_right.pack_start(self.create_module_window_hbox, False, False, 4)
hbox_wrapper.pack_start(
self.create_module_window_vbox, False, False, 0)
hbox_wrapper.pack_start(vbox_right, False, False, 0)
self.create_module_window.add(hbox_wrapper)
self.create_module_window.show_all()
def on_save_module_clicked(self, widget, data):
module = data[3]
iterr = self.sorted_module_liststore.convert_iter_to_child_iter(None, data[4])
iterr = self.modulefilter.convert_iter_to_child_iter(iterr)
error = 0
msg = "Module creation failed. Please enter the following:"
if data[0].get_active_iter() is None:
error = 1
msg += "\n- Library"
if data[1].get_active_iter() is None:
error = 1
msg += "\n- Category"
if data[2].get_active_iter() is None:
error = 1
msg += "\n- Function"
if self.module_name_entry.get_text_length() == 0:
error = 1
msg += "\n- Module Name"
if self.suffix_entry.get_text_length() == 0:
error = 1
msg += "\n- Suffix"
if self.author_entry.get_text_length() == 0:
error = 1
msg += "\n- Author"
if self.desc_entry.get_buffer().get_char_count() == 0:
error = 1
msg += "\n- Description"
if self.cmd_entry.get_buffer().get_char_count() == 0:
error = 1
msg += "\n- Command"
if error == 1:
md = Dialog()
md.on_warn(msg)
else:
descbuff = self.desc_entry.get_buffer()
desc_start, desc_end = descbuff.get_bounds()
cmdbuff = self.cmd_entry.get_buffer()
cmd_start, cmd_end = cmdbuff.get_bounds()
name = self.module_name_entry.get_text()
lib = self.library_liststore.get_value(data[0].get_active_iter(), 1)
cat = self.catfilter.get_value(data[1].get_active_iter(), 2)
func = self.functionfilter.get_value(data[2].get_active_iter(), 3)
url = self.doc_url_entry.get_text()
module.library = lib
module.category = cat
module.function = func
module.name = name
module.suffix = self.suffix_entry.get_text()
module.author = self.author_entry.get_text()
module.url = url
module.desc = descbuff.get_text(desc_start, desc_end)
module.cmd = cmdbuff.get_text(cmd_start, cmd_end)
last_filename = module.filename
module.filename = expanduser("~") + '/epitome/modules/' + name + '.mod'
self.module_liststore.set(iterr, 0, lib, 1, cat, 2, func, 3, name, 4, module.filename)
md = Dialog()
self.create_module_window.destroy()
md.on_info("Module " + name + ".mod updated successfully.")
module.save()
if last_filename != module.filename:
subprocess.call(['rm', last_filename])
self.update_module_textview(None, None)
def on_DELETE_MODULE_activated(self, widget, data=None):
model, iterr = self.module_treeview.get_selection().get_selected()
iterr = self.sorted_module_liststore.convert_iter_to_child_iter(None, iterr)
iterr = self.modulefilter.convert_iter_to_child_iter(iterr)
name = self.module_liststore.get_value(iterr, 3)
filename = self.module_liststore.get_value(iterr, 4)
md = Dialog()
response = md.on_confirmation("Are you sure you wish to delete " + name + ".mod?", "Delete " + name + ".mod")
if response == gtk.RESPONSE_ACCEPT:
subprocess.call(['rm', filename])
self.module_liststore.remove(iterr)
md.on_info("Successfully removed " + name + ".mod from library.")
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("delete_event", self.exit)
self.window.set_title('Library Manager')
self.window.set_size_request(620, 560)
self.window.set_border_width(0)
self.window.set_icon_from_file("img/epitome_icon.png")
main_vbox = gtk.VBox(False, 1)
'''
Library popup menu
'''
library_popup_menu = gtk.Menu()
new = gtk.MenuItem("New Library")
new.connect("activate", self.on_NEW_LIBRARY_activated)
new.show()
edit = gtk.MenuItem("Edit Library...")
edit.connect("activate", self.on_EDIT_LIBRARY_activated)
edit.show()
delete = gtk.MenuItem("Delete Library")
delete.connect("activate", self.on_DELETE_LIBRARY_activated)
delete.show()
library_popup_menu.append(new)
library_popup_menu.append(edit)
library_popup_menu.append(delete)
category_popup_menu = gtk.Menu()
new = gtk.MenuItem("New Category")
new.connect("activate", self.on_NEW_CATEGORY_activated)
new.show()
edit = gtk.MenuItem("Edit Category...")
edit.connect("activate", self.on_EDIT_CATEGORY_activated)
edit.show()
delete = gtk.MenuItem("Delete Category")
delete.connect("activate", self.on_DELETE_CATEGORY_activated)
delete.show()
category_popup_menu.append(new)
category_popup_menu.append(edit)
category_popup_menu.append(delete)
function_popup_menu = gtk.Menu()
new = gtk.MenuItem("New Function")
new.connect("activate", self.on_NEW_FUNCTION_activated)
new.show()
edit = gtk.MenuItem("Edit Function...")
edit.connect("activate", self.on_EDIT_FUNCTION_activated)
edit.show()
delete = gtk.MenuItem("Delete Function")
delete.connect("activate", self.on_DELETE_FUNCTION_activated)
delete.show()
function_popup_menu.append(new)
function_popup_menu.append(edit)
function_popup_menu.append(delete)
module_popup_menu = gtk.Menu()
new = gtk.MenuItem("New Module")
new.connect("activate", self.on_NEW_MODULE_activated)
new.show()
edit = gtk.MenuItem("Edit Module...")
edit.connect("activate", self.on_EDIT_MODULE_activated)
edit.show()
delete = gtk.MenuItem("Delete Module")
delete.connect("activate", self.on_DELETE_MODULE_activated)
delete.show()
module_popup_menu.append(new)
module_popup_menu.append(edit)
module_popup_menu.append(delete)
self.vpane = gtk.VPaned()
self.treeview_hbox = gtk.HBox(True, 6)
'''
Library Column
'''
self.swH0 = gtk.ScrolledWindow()
self.swH0.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH0.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.library_liststore = gtk.ListStore(int, str)
ls = Library("library")
data = ls.read()
for library in data:
Id = library[0]
library = library[1].encode('ascii', 'ignore')
self.library_liststore.append([Id, library])
self.sorted_library_liststore = gtk.TreeModelSort(self.library_liststore)
self.sorted_library_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.library_treeview = gtk.TreeView(self.sorted_library_liststore)
# self.library_treeview.set_show_expanders(False)
self.library_selection = self.library_treeview.get_selection()
self.library_selection.connect("changed", self.refilter_library)
# create col 0 to display Pipeline Object name
self.library_column = gtk.TreeViewColumn('Library')
# append columns to TreeView
self.library_treeview.append_column(self.library_column)
# create a CellRendererText to render the data
self.cell0 = gtk.CellRendererText()
self.library_column.pack_start(self.cell0, True)
self.library_column.add_attribute(self.cell0, 'text', 1)
# allow search on columns
# self.project_treeview.set_search_column(0)
# self.treeview.set_search_column(1)
# self.treeview.set_search_column(2)
# Allow sorting on both columns
# self.project_column.set_sort_column_id(0)
self.library_column.set_sort_column_id(1)
self.library_treeview.connect(
"button-press-event", self.menu_activated, library_popup_menu)
self.swH0.add(self.library_treeview)
'''
Category Column
'''
self.swH1 = gtk.ScrolledWindow()
self.swH1.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.category_liststore = gtk.ListStore(int, str, str)
cls = Library("category")
data = cls.read()
for category in data:
Id = category[0]
library = category[1].encode('ascii', 'ignore')
cat = category[2].encode('ascii', 'ignore')
self.category_liststore.append([Id, library, cat])
self.catfilter = self.category_liststore.filter_new()
self.catfilter.set_visible_func(self.visible_cat)
self.sorted_cat_liststore = gtk.TreeModelSort(self.catfilter)
self.sorted_cat_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.category_treeview = gtk.TreeView(self.sorted_cat_liststore)
self.category_selection = self.category_treeview.get_selection()
self.category_selection.connect("changed", self.refilter_category)
# create col 0 to display Pipeline Object name
self.category_column = gtk.TreeViewColumn('Category')
# append columns to TreeView
self.category_treeview.append_column(self.category_column)
# create a CellRendererText to render the data
self.cell1 = gtk.CellRendererText()
self.category_column.pack_start(self.cell1, True)
self.category_column.add_attribute(self.cell1, 'text', 2)
# allow search on columns
# self.treeview.set_search_column(1)
# self.treeview.set_search_column(2)
# Allow sorting on both columns
self.category_column.set_sort_column_id(2)
self.category_treeview.connect(
"button-press-event", self.menu_activated, category_popup_menu)
self.swH1.add(self.category_treeview)
'''
Function Column
'''
self.swH2 = gtk.ScrolledWindow()
self.swH2.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.function_liststore = gtk.ListStore(int, str, str, str)
fls = Library("function")
data = fls.read()
for tup in data:
Id = tup[0]
library = tup[1].encode('ascii', 'ignore')
category = tup[2].encode('ascii', 'ignore')
function = tup[3].encode('ascii', 'ignore')
self.function_liststore.append([Id, library, category, function])
self.functionfilter = self.function_liststore.filter_new()
self.functionfilter.set_visible_func(self.visible_function)
self.sorted_function_liststore = gtk.TreeModelSort(self.functionfilter)
self.sorted_function_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.function_treeview = gtk.TreeView(self.sorted_function_liststore)
self.function_selection = self.function_treeview.get_selection()
self.function_selection.connect("changed", self.refilter_function)
# create col 0 to display Pipeline Object name
self.function_column = gtk.TreeViewColumn('Function')
# append columns to TreeView
self.function_treeview.append_column(self.function_column)
# create a CellRendererText to render the data
self.cell2 = gtk.CellRendererText()
self.function_column.pack_start(self.cell2, True)
self.function_column.add_attribute(self.cell2, 'text', 3)
# allow search on columns
self.function_treeview.set_search_column(3)
# Allow sorting on both columns
self.function_column.set_sort_column_id(3)
self.function_treeview.connect(
"button-press-event", self.menu_activated, function_popup_menu)
self.swH2.add(self.function_treeview)
'''
Module Column
'''
self.swH3 = gtk.ScrolledWindow()
self.swH3.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH3.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.module_liststore = gtk.ListStore(str, str, str, str, str)
for root, dirs, files in os.walk('/home/cboyce/epitome/modules/'):
for filepath in files:
op = Open()
module = op.open_module(
'/home/cboyce/epitome/modules/' + filepath)
self.module_liststore.append(
[module.library, module.category, module.function, module.name, module.filename])
self.modulefilter = self.module_liststore.filter_new()
self.modulefilter.set_visible_func(self.visible_mod)
self.sorted_module_liststore = gtk.TreeModelSort(self.modulefilter)
self.sorted_module_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.module_treeview = gtk.TreeView(self.sorted_module_liststore)
self.module_selection = self.module_treeview.get_selection()
self.module_selection.connect("changed", self.update_module_textview)
# create col 0 to display Pipeline Object name
self.module_column = gtk.TreeViewColumn('Module')
# append columns to TreeView
self.module_treeview.append_column(self.module_column)
# create a CellRendererText to render the data
self.cell3 = gtk.CellRendererText()
self.module_column.pack_start(self.cell3, True)
self.module_column.add_attribute(self.cell3, 'text', 3)
# allow search on columns
self.module_treeview.set_search_column(3)
# self.treeview.set_search_column(1)
# self.treeview.set_search_column(2)
# Allow sorting on both columns
self.module_column.set_sort_column_id(3)
self.module_treeview.connect(
"button-press-event", self.menu_activated, module_popup_menu)
self.swH3.add(self.module_treeview)
self.treeview_hbox.pack_start(self.swH0, True, True, 0)
self.treeview_hbox.pack_start(self.swH1, True, True, 0)
self.treeview_hbox.pack_start(self.swH2, True, True, 0)
self.treeview_hbox.pack_start(self.swH3, True, True, 0)
'''
Label
'''
#self.label = gtk.Label("Module Description")
'''
TextView
'''
self.textview_hbox = gtk.HBox(False, 0)
self.textview_hbox.set_size_request(500, 200)
self.swH3 = gtk.ScrolledWindow()
self.swH3.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.swH3.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.buffer = gtk.TextBuffer(None)
self.buffer.set_text("(no module selected)")
self.textview = gtk.TextView(self.buffer)
self.textview.set_border_width(10)
self.textview.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("white"))
self.textview.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.textview.set_justification(gtk.JUSTIFY_FILL)
self.swH3.add(self.textview)
self.textview_hbox.pack_start(self.swH3, True, True, 0)
self.vpane.add1(self.treeview_hbox)
#main_vbox.pack_start(self.label, False, False, 5)
self.vpane.add2(self.textview_hbox)
self.vpane.set_position(160)
self.vpane.set_border_width(6)
main_vbox.pack_start(self.vpane, True, True, 0)
self.window.add(main_vbox)
self.window.show_all()
def menu_activated(self, widget, event, menu):
# right click
if event.button == 3:
menu.popup(None, None, None, event.button,
event.get_time(), data=None)
pass
def visible_cat(self, model, iterr, data=None):
treeselection = self.library_treeview.get_selection()
if treeselection.count_selected_rows() > 0:
m, i = treeselection.get_selected()
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
if(model.get_value(iterr, 1) == self.library_liststore.get_value(i, 1)):
return True
return False
def visible_function(self, model, iterr, data=None):
lib_treeselection = self.library_treeview.get_selection()
cat_treeselection = self.category_treeview.get_selection()
if lib_treeselection.count_selected_rows() > 0 and cat_treeselection.count_selected_rows() > 0:
m, i = lib_treeselection.get_selected()
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
n, j = cat_treeselection.get_selected()
j = self.sorted_cat_liststore.convert_iter_to_child_iter(None, j)
if model.get_value(iterr, 1) == self.library_liststore.get_value(i, 1) and model.get_value(iterr, 2) == self.catfilter.get_value(j, 2):
return True
return False
def visible_mod(self, model, iterr, data=None):
lib_treeselection = self.library_treeview.get_selection()
cat_treeselection = self.category_treeview.get_selection()
function_treeselection = self.function_treeview.get_selection()
if lib_treeselection.count_selected_rows() > 0 and cat_treeselection.count_selected_rows() > 0 and function_treeselection.count_selected_rows() > 0:
m, i = lib_treeselection.get_selected()
i = self.sorted_library_liststore.convert_iter_to_child_iter(None, i)
n, j = cat_treeselection.get_selected()
j = self.sorted_cat_liststore.convert_iter_to_child_iter(None, j)
o, k = function_treeselection.get_selected()
k = self.sorted_function_liststore.convert_iter_to_child_iter(None, k)
if model.get_value(iterr, 0) == self.library_liststore.get_value(i, 1) and model.get_value(iterr, 1) == self.catfilter.get_value(j, 2) and model.get_value(iterr, 2) == self.functionfilter.get_value(k, 3):
return True
return False
def update_module_textview(self, widget, data=None):
selection = self.module_treeview.get_selection()
m, i = selection.get_selected()
if i is not None:
i = self.sorted_module_liststore.convert_iter_to_child_iter(None, i)
name = self.modulefilter.get_value(i, 3)
try:
filehandler = open(
expanduser("~") + '/epitome/modules/' + name + '.mod', 'rb')
module = dill.load(filehandler)
self.buffer.set_text('')
# pango tags
bold = self.buffer.create_tag(weight=pango.WEIGHT_BOLD)
title = self.buffer.create_tag(size=18000)
code = self.buffer.create_tag(font="Monospace 10")
crumbs = self.buffer.create_tag(font="Italic 8")
self.buffer.insert_with_tags(self.buffer.get_start_iter(
), module.library + ' > ' + module.category + ' > ' + module.function + '\n\n', crumbs)
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, name + '\n', bold, title)
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, 'Author: ', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert(end, module.author + '\n')
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, 'Date Created: ', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert(end, module.date_created + '\t')
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, 'Last Edited: ', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert(end, module.date_updated + '\n')
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, '\nSuffix: ', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert(end, module.suffix + '\n\n')
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, 'Description:\n', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert(end, module.desc + '\n\n')
start, end = self.buffer.get_bounds()
anchor = self.buffer.create_child_anchor(end)
button = gtk.LinkButton(
module.url, "Click here to access online resource...")
button.show()
self.textview.add_child_at_anchor(button, anchor)
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, '\n\nCommand:\n', bold)
start, end = self.buffer.get_bounds()
self.buffer.insert_with_tags(end, module.cmd + '\n\n', code)
except:
print("")
else:
self.buffer.set_text("(no module selected)")
def main():
gtk.main()
if __name__ == "__main__":
win = LibraryManager()
main()
| gpl-3.0 |
stackforge/tricircle | tricircle/api/app.py | 1 | 2566 | # Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from oslo_config import cfg
from tricircle.common.i18n import _
from tricircle.common import restapp
common_opts = [
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=19999,
help=_("The port to bind to")),
cfg.IntOpt('api_workers', default=1,
help=_("number of api workers")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.IntOpt('pagination_max_limit', min=1, default=2000,
help=_("The maximum number of items returned in a single "
"response, value must be greater or equal to 1")),
]
def setup_app(*args, **kwargs):
config = {
'server': {
'port': cfg.CONF.bind_port,
'host': cfg.CONF.bind_host
},
'app': {
'root': 'tricircle.api.controllers.root.RootController',
'modules': ['tricircle.api'],
'errors': {
400: '/error',
'__force_dict__': True
}
}
}
pecan_config = pecan.configuration.conf_from_dict(config)
# app_hooks = [], hook collection will be put here later
app = pecan.make_app(
pecan_config.app.root,
debug=False,
wrap_app=restapp.auth_app,
force_canonical=False,
hooks=[],
guess_content_type_from_ext=True
)
return app
| apache-2.0 |
ravindrapanda/tensorflow | tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py | 15 | 47753 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# Each hessian is a diagonal of a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive, so it shouldn't return any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testGenerateFeatureSplitCandidatesWithTreeComplexity(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=0,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain - 0.5,
gains[0], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 2.0) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 2])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0.5,
min_node_weight=1.5,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the gain on partition 0 to be -0.5.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(-0.5, gains[0], 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# (-4 + 0.1) / (2 + 1)
expected_left_weight = -1.3
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant(
[[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]])
# Each hessian is a diagonal from a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
sparse_float_column=sparse_column,
init_stamp_token=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive so it shouldn't any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmpty(self):
with self.test_session() as sess:
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
# No values in this feature column in this mini-batch.
values = array_ops.constant([], dtype=dtypes.float32)
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testDegenerativeCase(self):
with self.test_session() as sess:
# One data example only, one leaf and thus one quantile bucket.The same
# situation is when all examples have the same values. This case was
# causing before a failure.
gradients = array_ops.constant([0.2])
hessians = array_ops.constant([0.12])
example_partitions = array_ops.constant([1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.58])
sparse_column = sparse_tensor.SparseTensor(indices, values, [1, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([1, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(0, 1, class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(1, 2, class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([1], partitions)
self.assertAllEqual([0.0], gains)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.58, split_node.split.threshold)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
TsubameDono/codecombat | scripts/analytics/parseStripePayments.py | 97 | 10435 | # Parse Stripe payment info via exported payments.csv files
import sys
from datetime import tzinfo, timedelta, datetime
# TODO: use stripe_customers.csv to match payments to our db data
# Stripe file format
# id,Description,Created (UTC),Amount,Amount Refunded,Currency,Converted Amount,Converted Amount Refunded,Fee,Tax,Converted Currency,Mode,Status,Statement Description,Customer ID,Customer Description,Customer Email,Captured,Card Last4,Card Brand,Card Funding,Card Exp Month,Card Exp Year,Card Name,Card Address Line1,Card Address Line2,Card Address City,Card Address State,Card Address Country,Card Address Zip,Card Issue Country,Card Fingerprint,Card CVC Status,Card AVS Zip Status,Card AVS Line1 Status,Disputed Amount,Dispute Status,Dispute Reason,Dispute Date (UTC),Dispute Evidence Due (UTC),Invoice ID,productID (metadata),userID (metadata),gems (metadata),timestamp (metadata)
def getGemCounts(paymentsFile):
gems = {}
with open(paymentsFile) as f:
first = True
for line in f:
if first:
first = False
else:
data = line.split(',')
amount = int(float(data[3]) * 100)
status = data[12]
statementDescription = data[13]
if status == 'Paid' and not statementDescription == 'Sub':
if not amount in gems:
gems[amount] = 1
else:
gems[amount] += 1
return gems
def getSubCounts(paymentsFile):
subs = {}
with open(paymentsFile) as f:
first = True
for line in f:
if first:
first = False
else:
data = line.split(',')
# created = data[2]
amount = int(float(data[3]) * 100)
# amountRefunded = int(float(data[4]) * 100)
# mode = data[11]
status = data[12]
statementDescription = data[13]
# Look for status = 'Paid', and statementDescription = 'Sub'
# print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(created, amount, amountRefunded, mode, status, statementDescription)
if status == 'Paid' and statementDescription == 'Sub':
if not amount in subs:
subs[amount] = 1
else:
subs[amount] += 1
return subs
def getHoCPriceConversionRates(paymentsFile):
# Show counts from Mixpanel
prices = {
'399': {
# 'start': datetime(2014, 12, 12, 3, 21),
# 'end': datetime(2014, 12, 13, 17, 30),
'Show subscription modal': 31157,
'Finished subscription purchase': 0
},
'599': {
# 'start': datetime(2014, 12, 9, 14, 23),
# 'end': datetime(2014, 12, 11, 0, 34),
'Show subscription modal': 31044,
'Finished subscription purchase': 0
},
'999': {
# 'start': datetime(2014, 9, 1),
# 'end': datetime(2014, 12, 9, 14, 23),
# 'start2': datetime(2014, 12, 11, 0, 34),
# 'end2': datetime(2014, 12, 12, 3, 21),
# 'start3': datetime(2014, 12, 13, 17, 30),
'Show subscription modal': 86883,
'Finished subscription purchase': 0
},
'1499': {
# 'start': datetime(2014, 12, 11, 1),
# 'end': datetime(2014, 12, 12, 3, 21),
'Show subscription modal': 19519,
'Finished subscription purchase': 0
}
}
# TODO: may be one 1499 sale
priceTest = {
'ch_158LyeKaReE7xLUdnt0m9pjb': True,
'ch_158OPLKaReE7xLUdcqYQ5qst': True,
'ch_158jkBKaReE7xLUd305I3WBy': True
}
# Find 'Finished subscription purchase' event from Stripe data
startDate = datetime(2014, 12, 8)
endDate = datetime(2014, 12, 20)
print startDate, 'to', endDate
with open(paymentsFile) as f:
first = True
for line in f:
if first:
first = False
else:
data = line.split(',')
paymentID = data[0]
created = data[2] # 2014-12-14 06:01
createdDate = datetime(int(created[0:4]), int(created[5:7]), int(created[8:10]), int(created[11:13]), int(created[14:16]))
if createdDate < startDate or createdDate >= endDate:
continue
if paymentID in priceTest:
amount = 1499
else:
amount = int(float(data[3]) * 100)
amountStr = str(amount)
# amountRefunded = int(float(data[4]) * 100)
# mode = data[11]
status = data[12]
statementDescription = data[13]
# Look for status = 'Paid', and statementDescription = 'Sub'
# print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(created, amount, amountRefunded, mode, status, statementDescription)
if status == 'Paid' and statementDescription == 'Sub':
prices[amountStr]['Finished subscription purchase'] += 1
# Calculate conversion rates
for key, item in prices.iteritems():
item['Conversion Rate'] = float(item['Finished subscription purchase']) / item['Show subscription modal']
item['Value Per User'] = float(item['Finished subscription purchase']) / item['Show subscription modal'] * int(key)
return prices
def getPreHoCPriceConversionRates(paymentsFile):
# Pre-HoC but after full stop paywall in forest
# Show count from Mixpanel
prices = {
'999': {
'Show subscription modal': 3447,
'Finished subscription purchase': 0
}
}
# Find 'Finished subscription purchase' event from Stripe data
startDate = datetime(2014, 12, 6)
endDate = datetime(2014, 12, 8)
print startDate, 'to', endDate
with open(paymentsFile) as f:
first = True
for line in f:
if first:
first = False
else:
data = line.split(',')
paymentID = data[0]
created = data[2] # 2014-12-14 06:01
createdDate = datetime(int(created[0:4]), int(created[5:7]), int(created[8:10]), int(created[11:13]), int(created[14:16]))
if createdDate < startDate or createdDate >= endDate:
continue
amount = int(float(data[3]) * 100)
amountStr = str(amount)
status = data[12]
statementDescription = data[13]
if status == 'Paid' and statementDescription == 'Sub':
prices[amountStr]['Finished subscription purchase'] += 1
# Calculate conversion rates
for key, item in prices.iteritems():
item['Conversion Rate'] = float(item['Finished subscription purchase']) / item['Show subscription modal']
item['Value Per User'] = float(item['Finished subscription purchase']) / item['Show subscription modal'] * int(key)
return prices
def getPostHoCPriceConversionRates(paymentsFile):
# Pre-HoC but after full stop paywall in forest
# Show count from Mixpanel
prices = {
'999': {
'Show subscription modal': 13339,
'Finished subscription purchase': 0
}
}
# Find 'Finished subscription purchase' event from Stripe data
startDate = datetime(2014, 12, 20)
endDate = datetime(2015, 1, 4)
print startDate, 'to', endDate
with open(paymentsFile) as f:
first = True
for line in f:
if first:
first = False
else:
data = line.split(',')
paymentID = data[0]
created = data[2] # 2014-12-14 06:01
createdDate = datetime(int(created[0:4]), int(created[5:7]), int(created[8:10]), int(created[11:13]), int(created[14:16]))
if createdDate < startDate or createdDate >= endDate:
continue
amount = int(float(data[3]) * 100)
amountStr = str(amount)
status = data[12]
statementDescription = data[13]
if status == 'Paid' and statementDescription == 'Sub':
prices[amountStr]['Finished subscription purchase'] += 1
# Calculate conversion rates
for key, item in prices.iteritems():
item['Conversion Rate'] = float(item['Finished subscription purchase']) / item['Show subscription modal']
item['Value Per User'] = float(item['Finished subscription purchase']) / item['Show subscription modal'] * int(key)
return prices
if __name__ == '__main__':
paymentsFile = 'stripe_payments.csv'
if len(sys.argv) is 2:
paymentsFile = sys.argv[1]
print 'Processing', paymentsFile
print 'Subs'
print getSubCounts(paymentsFile)
print 'Gems'
print getGemCounts(paymentsFile)
print 'Pre-HoC Conversion Rates'
priceConversionRates = getPreHoCPriceConversionRates(paymentsFile)
print 'Price, converted, shown, conversion rate, value per user'
for key, item in priceConversionRates.iteritems():
print key, item['Finished subscription purchase'], item['Show subscription modal'], "%.4f%%" % (item['Conversion Rate'] * 100), "%.4f cents" % (item['Conversion Rate'] * int(key))
print 'HoC Conversion Rates'
priceConversionRates = getHoCPriceConversionRates(paymentsFile)
print 'Price, converted, shown, conversion rate, value per user'
for key, item in priceConversionRates.iteritems():
print key, item['Finished subscription purchase'], item['Show subscription modal'], "%.4f%%" % (item['Conversion Rate'] * 100), "%.4f cents" % (item['Conversion Rate'] * int(key))
print 'Post-HoC Conversion Rates'
priceConversionRates = getPostHoCPriceConversionRates(paymentsFile)
print 'Price, converted, shown, conversion rate, value per user'
for key, item in priceConversionRates.iteritems():
print key, item['Finished subscription purchase'], item['Show subscription modal'], "%.4f%%" % (item['Conversion Rate'] * 100), "%.4f cents" % (item['Conversion Rate'] * int(key))
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.