code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import numpy as np
class lemketableau:
def __init__(self,M,q,maxIter = 100):
n = len(q)
self.T = np.hstack((np.eye(n),-M,-np.ones((n,1)),q.reshape((n,1))))
self.n = n
self.wPos = np.arange(n)
self.zPos = np.arange(n,2*n)
self.W = 0
self.Z = 1
self.Y = 2
self.Q = 3
TbInd = np.vstack((self.W*np.ones(n,dtype=int),
np.arange(n,dtype=int)))
TnbInd = np.vstack((self.Z*np.ones(n,dtype=int),
np.arange(n,dtype=int)))
DriveInd = np.array([[self.Y],[0]])
QInd = np.array([[self.Q],[0]])
self.Tind = np.hstack((TbInd,TnbInd,DriveInd,QInd))
self.maxIter = maxIter
def lemkeAlgorithm(self):
initVal = self.initialize()
if not initVal:
return np.zeros(self.n),0,'Solution Found'
for k in range(self.maxIter):
stepVal = self.step()
if self.Tind[0,-2] == self.Y:
# Solution Found
z = self.extractSolution()
return z,0,'Solution Found'
elif not stepVal:
return None,1,'Secondary ray found'
return None,2,'Max Iterations Exceeded'
def initialize(self):
q = self.T[:,-1]
minQ = np.min(q)
if minQ < 0:
ind = np.argmin(q)
self.clearDriverColumn(ind)
self.pivot(ind)
return True
else:
return False
def step(self):
q = self.T[:,-1]
a = self.T[:,-2]
ind = np.nan
minRatio = np.inf
for i in range(self.n):
if a[i] > 0:
newRatio = q[i] / a[i]
if newRatio < minRatio:
ind = i
minRatio = newRatio
if minRatio < np.inf:
self.clearDriverColumn(ind)
self.pivot(ind)
return True
else:
return False
def extractSolution(self):
z = np.zeros(self.n)
q = self.T[:,-1]
for i in range(self.n):
if self.Tind[0,i] == self.Z:
z[self.Tind[1,i]] = q[i]
return z
def partnerPos(self,pos):
v,ind = self.Tind[:,pos]
if v == self.W:
ppos = self.zPos[ind]
elif v == self.Z:
ppos = self.wPos[ind]
else:
ppos = None
return ppos
def pivot(self,pos):
ppos = self.partnerPos(pos)
if ppos is not None:
self.swapColumns(pos,ppos)
self.swapColumns(pos,-2)
return True
else:
self.swapColumns(pos,-2)
return False
def swapMatColumns(self,M,i,j):
Mi = np.array(M[:,i],copy=True)
Mj = np.array(M[:,j],copy=True)
M[:,i] = Mj
M[:,j] = Mi
return M
def swapPos(self,v,ind,newPos):
if v == self.W:
self.wPos[ind] = newPos % (2*self.n+2)
elif v == self.Z:
self.zPos[ind] = newPos % (2*self.n+2)
def swapColumns(self,i,j):
iInd = self.Tind[:,i]
jInd = self.Tind[:,j]
v,ind = iInd
self.swapPos(v,ind,j)
v,ind = jInd
self.swapPos(v,ind,i)
self.Tind = self.swapMatColumns(self.Tind,i,j)
self.T = self.swapMatColumns(self.T,i,j)
def clearDriverColumn(self,ind):
a = self.T[ind,-2]
self.T[ind] /= a
for i in range(self.n):
if i != ind:
b = self.T[i,-2]
self.T[i] -= b * self.T[ind]
def ind2str(self,indvec):
v,pos = indvec
if v == self.W:
s = 'w%d' % pos
elif v == self.Z:
s = 'z%d' % pos
elif v == self.Y:
s = 'y'
else:
s = 'q'
return s
def indexStringArray(self):
indstr = np.array([self.ind2str(indvec) for indvec in self.Tind.T],dtype=object)
return indstr
def indexedTableau(self):
indstr = self.indexStringArray()
return np.vstack((indstr,self.T))
def __repr__(self):
IT = self.indexedTableau()
return IT.__repr__()
def __str__(self):
IT = self.indexedTableau()
return IT.__str__()
def lemkelcp(M,q,maxIter=100):
"""
sol = lemkelcp(M,q,maxIter)
Uses Lemke's algorithm to copute a solution to the
linear complementarity problem:
Mz + q >= 0
z >= 0
z'(Mz+q) = 0
The inputs are given by:
M - an nxn numpy array
q - a length n numpy array
maxIter - an optional number of pivot iterations. Set to 100 by default
The solution is a tuple of the form:
z,exit_code,exit_string = sol
The entries are summaries in the table below:
|z | exit_code | exit_string |
-----------------------------------------------------------
| solution to LCP | 0 | 'Solution Found' |
| None | 1 | 'Secondary ray found' |
| None | 2 | 'Max Iterations Exceeded' |
"""
tableau = lemketableau(M,q,maxIter)
return tableau.lemkeAlgorithm()
| AndyLamperski/lemkelcp | lemkelcp/lemkelcp.py | Python | mit | 5,431 |
import xml.etree.ElementTree as ET
import datetime
import sys
import openpyxl
import re
import dateutil
def main():
print 'Number of arguments:', len(sys.argv), 'arguments.' #DEBUG
print 'Argument List:', str(sys.argv) #DEBUG
Payrate = raw_input("Enter your pay rate: ") #DEBUG
sNumber = raw_input("Enter 900#: ") #DEBUG
xml = ET.parse("xml.xml") #DEBUG
root = xml.getroot()
root = root[3][0] #Go directly to worksheet/table
sheet = openpyxl.load_workbook(sys.argv[1], data_only=True).active
writeName(root)
writeEmployeeNum(root)
writeStudentNum(sNumber)
writePayRate(payRate)
#At this point all that is left are the times
for x in root.findall(".//*"):
if x.text != None:
dates.append(x.text)
for x in char_range('G','Z'):
writeTimes(x + '17' , dates)
def writeTimes (position, dateList):
match = next(x[0] for x in enumerate(dateList) if x[1] == sheet[position].value)
jobCode = dateList[num+4]
if jobCode == 900:
raise error("Cannot start day with 900 break")
else:
sheet[date] = roundTime(
def roundTime(time):
date = dateutil.parser.parse(x)
if date.minute <= 7
return date.replace(minute=0)
else if date.minute >= 8 and date.minute <= 22:
return date.replace(minute=15)
else if date.minute >= 23 and date.minute <= 37:
return date.replace(minute=30)
else if date.minute >= 38 and date.minute <= 52:
return date.replace(minute=45)
else if date.minute >= 53:
if date.hour == 23:
raise error("Worked overnight or did not clock out")
else:
date += datetime.timedelta(minutes= (60-date.minute))
#Rounds time to next hour by adding minutes until 60
return date
else:
raise error("Something went wrong in roundTime")
def writeName(tree):
name = tree[-1][4][0].text
sheet['I8'] = name
def writeEmployeeNum(tree):
num = root[2][0][0].text
sheet['4D'] = re.match('.*?([0-9]+)$', num).group(1)
def writeStudentNum(num):
sheet['8S']=num
def writePayRate(num):
sheet['6k']=num
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2`, inclusive."""
"""Courtesy http://stackoverflow.com/questions/7001144/range-over-character-in-python"""
for c in xrange(ord(c1), ord(c2)+1):
yield chr(c)
main()
| JamesPavek/payroll | timesheet.py | Python | mit | 2,615 |
from PySide import QtCore, QtGui
class MakinFrame(QtGui.QFrame):
mousegeser = QtCore.Signal(int,int)
def __init__(self,parent=None):
super(MakinFrame,self).__init__(parent)
self.setMouseTracking(True)
def setMouseTracking(self, flag):
def recursive_set(parent):
for child in parent.findChildren(QtCore.QObject):
try:
child.setMouseTracking(flag)
except:
pass
recursive_set(child)
QtGui.QWidget.setMouseTracking(self,flag)
recursive_set(self)
def mouseMoveEvent(self, me):
a = QtGui.QFrame.mouseMoveEvent(self,me)
self.mousegeser.emit(me.x(), me.y())
return a
| imakin/PersonalAssistant | GameBot/src_py/makinreusable/makinframe.py | Python | mit | 610 |
from graphics_module.objects import *
import numpy as np
def make_pixels_array_basic(amount):
return np.full(10,Pixel(), dtype=np.object)
def make_pixels_array_config_based(config):
if config.colorscheme == "b&w":
c = Color()
elif config.colorscheme == "light":
c = Color(r=245,g=235,b=234,a=0.85) #"light" or whatever to be slightly colorized dots
if config.aplha == True:
lol = 4 #random influenced aplha
#and so on
def get_color(config):
if not config:#has attribute "lower_limit": I don't know
lower_limit = 230
| sindresf/The-Playground | Python/Machine Learning/LSTM Music Visualizer/LSTM Music Visualizer/graphics_module/initialization.py | Python | mit | 576 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Nate Coraor <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zfs_permissions
short_description: Manage zfs administrative permissions
description:
- Manages ZFS file system administrative permissions on Solaris and FreeBSD. See zfs(1M) for more information about the properties.
version_added: "1.10"
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
required: true
choices: [present, absent]
users:
description:
- Users to whom permission(s) should be granted, separated by commas.
required: false
groups:
description:
- Groups to whom permission(s) should be granted, separated by commas.
required: false
everyone:
description:
- Apply permissions to everyone.
required: false
default: false
choices: ['on','off']
permissions:
description:
- The permission(s) to delegate, separated by commas (required if C(state) is C(present))
required: false
choices: ['allow','clone','create','destroy',...]
local:
description:
- Apply permissions to C(name) "locally" (C(zfs allow -l))
required: false
default: null
choices: ['on','off']
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d))
required: false
default: null
choices: ['on','off']
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present))
required: false
default: false
choices: ['on','off']
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Grant `zfs allow` and `unallow` permission to the `adm` user with local+descendents scope
- zfs_permissions: name=rpool/myfs users=adm permissions=allow,unallow
# Grant `zfs send` to everyone, plus the group `backup`
- zfs_permissions: name=rpool/myvol groups=backup everyone=yes permissions=send
# Grant `zfs send,receive` to users `foo` and `bar` with local scope only
- zfs_permissions: name=rpool/myfs users=foo,bar permissions=send,receive local=yes
# Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
- zfs_permissions: name=rpool/myfs state=absent everyone=yes
'''
import sys
class ZfsPermissions(object):
def __init__(self, module):
self.module = module
self.name = module.params.get('name')
self.state = module.params.get('state')
self.users = module.params.get('users') or []
self.groups = module.params.get('groups') or []
self.everyone = module.boolean(module.params.get('everyone'))
self.perms = module.params.get('permissions') or []
self.recursive = module.boolean(module.params.get('recursive'))
self.scope = None
self.changed = False
self.__current_perms = None
if self.state == 'present' and not self.perms:
self.module.fail_json(msg='The `permissions` option is required for state=present')
if self.state == 'present' and not (self.users or self.groups or self.everyone):
self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
for splittable in ('users', 'groups', 'perms'):
if getattr(self, splittable):
setattr(self, splittable, getattr(self, splittable).split(','))
local = module.boolean(module.params.get('local'))
descendents = module.boolean(module.params.get('descendents'))
if (local and descendents) or (not local and not descendents):
self.scope = 'ld'
elif local:
self.scope = 'l'
elif descendents:
self.scope = 'd'
else:
self.module.fail_json(msg='Impossible value for local and descendents')
self.subcommand = 'allow'
self.recursive_opt = []
if self.state == 'absent':
self.subcommand = 'unallow'
if self.recursive:
self.recursive_opt = ['-r']
else:
self.recursive_opt = []
self.run()
@property
def current_perms(self):
if self.__current_perms is None:
rc, out, err = self.run_command(['zfs', 'allow', self.name])
if rc:
self.module.fail_json(msg='Getting permissions for %s failed: %s' % (self.name, err))
perms = dict(l = dict(u=dict(), g=dict(), e=[]),
d = dict(u=dict(), g=dict(), e=[]),
ld = dict(u=dict(), g=dict(), e=[]))
reading = None
for line in out.splitlines():
if line == 'Local permissions:':
reading = 'l'
elif line == 'Descendent permissions:':
reading = 'd'
elif line == 'Local+Descendent permissions:':
reading = 'ld'
elif line.startswith('\tuser '):
user, cur_perms = line.split()[1:3]
perms[reading]['u'][user] = cur_perms.split(',')
elif line.startswith('\tgroup '):
group, cur_perms = line.split()[1:3]
perms[reading]['g'][group] = cur_perms.split(',')
elif line.startswith('\teveryone '):
perms[reading]['e'] = line.split()[1].split(',')
self.__current_perms = perms
return self.__current_perms
def run_command(self, cmd):
progname = cmd[0]
cmd[0] = self.module.get_bin_path(progname, True)
return self.module.run_command(cmd)
def change_required(self, ent_type):
# zfs allow/unallow are idempotent, so we only need to do this for Ansible's changed flag
rval = []
if ent_type == 'u':
entities = self.users
elif ent_type == 'g':
entities = self.groups
for ent in entities:
ent_perms = self.current_perms[self.scope][ent_type].get(ent, None)
if self.state == 'present' and ent_perms is None:
rval.append(ent)
elif self.state == 'absent' and ent_perms is not None:
rval.append(ent)
elif ent_perms is not None:
for perm in self.perms:
if ((self.state == 'present' and perm not in ent_perms) or
(self.state == 'absent' and perm in ent_perms)):
# at least one desired permission is absent, or
# at least one undesired permission is present
rval.append(ent)
break
return rval
def run(self):
def run_cmd(args):
cmd = ['zfs', self.subcommand] + self.recursive_opt + ['-%s' % self.scope] + args
if self.perms:
cmd = cmd + [','.join(self.perms)]
cmd = cmd + [self.name]
if self.module.check_mode:
return 'Check mode skipped execution of: %s' % ' '.join(cmd)
rc, out, err = self.run_command(cmd)
if rc:
msg = 'Changing permissions with `%s` failed: %s' % (' '.join(cmd), err)
self.module.fail_json(msg=msg)
return out
stdout = ''
for ent_type in ('u', 'g'):
change = self.change_required(ent_type)
if change:
args = ['-%s' % ent_type, ','.join(change)]
stdout += run_cmd(args)
self.changed = True
if self.everyone:
everyone_perms = self.current_perms[self.scope]['e']
if self.state == 'absent' and not self.perms and everyone_perms:
args = ['-e']
stdout += run_cmd(args)
self.changed = True
for perm in self.perms:
if ((self.state == 'present' and perm not in everyone_perms) or
(self.state == 'absent' and perm in everyone_perms)):
#
args = ['-e']
stdout += run_cmd(args)
self.changed = True
break
exit_args = dict(changed=self.changed, state=self.state)
if self.changed:
exit_args.update(msg='ZFS permissions updated', stdout=stdout)
self.module.exit_json(**exit_args)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(default="present", choices=["absent", "present"]),
users = dict(default=None),
groups = dict(default=None),
everyone = dict(default=False, choices=BOOLEANS),
permissions = dict(default=None),
local = dict(default=None, choices=BOOLEANS),
descendents = dict(default=None, choices=BOOLEANS),
recursive = dict(default=False, choices=BOOLEANS)
),
supports_check_mode = True
)
zfs_permissions = ZfsPermissions(module)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| martenson/ansible-common-roles | paths/library/zfs_permissions.py | Python | mit | 9,934 |
__author__ = 'olga'
import numpy as np
from prettyplotlib.colors import blue_red, blues_r, reds
from prettyplotlib.utils import remove_chartjunk, maybe_get_fig_ax
def pcolormesh(*args, **kwargs):
"""
Use for large datasets
Non-traditional `pcolormesh` kwargs are:
- xticklabels, which will put x tick labels exactly in the center of the
heatmap block
- yticklables, which will put y tick labels exactly aligned in the center
of the heatmap block
- xticklabels_rotation, which can be either 'horizontal' or 'vertical'
depending on how you want the xticklabels rotated. The default is
'horizontal', but if you have xticklabels that are longer, you may want
to do 'vertical' so they don't overlap.
- yticklabels_rotation, which can also be either 'horizontal' or
'vertical'. The default is 'horizontal' and in most cases,
that's what you'll want to stick with. But the option is there if you
want.
- center_value, which will be the centered value for a divergent
colormap, for example if you have data above and below zero, but you want
the white part of the colormap to be equal to 10 rather than 0,
then specify 'center_value=10'.
"""
# Deal with arguments in kwargs that should be there, or need to be taken
# out
fig, ax, args, kwargs = maybe_get_fig_ax(*args, **kwargs)
# If x and y axis are passed in arguments, gets correct data
# Ticks will work with x and y data, although it would be pointless to use
# both x/y and custom ticks
if len(args) == 3:
x = args[0]
y = args[1]
data = args[2]
elif len(args) == 1:
data = args[0]
kwargs.setdefault('vmax', data.max())
kwargs.setdefault('vmin', data.min())
center_value = kwargs.pop('center_value', 0)
# If
divergent_data = False
if kwargs['vmax'] > 0 and kwargs['vmin'] < 0:
divergent_data = True
kwargs['vmax'] += center_value
kwargs['vmin'] += center_value
# If we have both negative and positive values, use a divergent colormap
if 'cmap' not in kwargs:
# Check if this is divergent
if divergent_data:
kwargs['cmap'] = blue_red
elif kwargs['vmax'] <= 0:
kwargs['cmap'] = blues_r
elif kwargs['vmax'] > 0:
kwargs['cmap'] = reds
if 'xticklabels' in kwargs:
xticklabels = kwargs['xticklabels']
kwargs.pop('xticklabels')
else:
xticklabels = None
if 'yticklabels' in kwargs:
yticklabels = kwargs['yticklabels']
kwargs.pop('yticklabels')
else:
yticklabels = None
if 'xticklabels_rotation' in kwargs:
xticklabels_rotation = kwargs['xticklabels_rotation']
kwargs.pop('xticklabels_rotation')
else:
xticklabels_rotation = 'horizontal'
if 'yticklabels_rotation' in kwargs:
yticklabels_rotation = kwargs['yticklabels_rotation']
kwargs.pop('yticklabels_rotation')
else:
yticklabels_rotation = 'horizontal'
ax_colorbar = kwargs.pop('ax_colorbar', None)
orientation_colorbar = kwargs.pop('orientation_colorbar', 'vertical')
p = ax.pcolormesh(*args, **kwargs)
# ax.set_ylim(0, x.shape[0])
# ax.set_xlim(0, x.shape[1])
# Get rid of ALL axes
remove_chartjunk(ax, ['top', 'right', 'left', 'bottom'])
if xticklabels is not None and any(xticklabels):
if len(args) == 1:
xticks = np.arange(0.5, data.shape[1] + 0.5)
else:
xticks = []
for i in np.arange(len(x) - 1):
half = float(x[i + 1] - x[i]) / 2. + x[i]
xticks.append(half)
xticks = np.array(xticks)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=xticklabels_rotation)
if yticklabels is not None and any(yticklabels):
if len(args) == 1:
yticks = np.arange(0.5, data.shape[1] + 0.5)
else:
yticks = []
for i in np.arange(len(y) - 1):
half = float(y[i + 1] - y[i]) / 2. + y[i]
yticks.append(half)
yticks = np.array(yticks)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, rotation=yticklabels_rotation)
# Show the scale of the colorbar
cbar = fig.colorbar(p, cax=ax_colorbar, use_gridspec=True,
orientation=orientation_colorbar)
return p, cbar
| olgabot/prettyplotlib | prettyplotlib/_pcolormesh.py | Python | mit | 4,451 |
"""
To use this, create a settings.py file and make these variables:
TOKEN=<oath token for github>
ORG=<your org in github>
DEST=<Path to download to>
"""
from github import Github
from subprocess import call
import os
from settings import TOKEN, ORG, DEST
def download():
"""Quick and Dirty Download all repos function"""
os.chdir(DEST)
print "Downloading to destination: ", os.getcwd()
g = Github(TOKEN)
repos = []
for repo in g.get_organization(ORG).get_repos():
print "Fetching Repo Name: %s" % repo.name
repos.append("[email protected]:%s/%s.git" % (ORG, repo.name))
total = len(repos)
print "Found %s repos" % total
count = 0
for repo in repos:
count +=1
print "Cloning Repo [%s]/[%s]: %s" % (count, total, repo)
call([u'git', u'clone', repo])
download() | sqor/3rdeye | fetch_repos.py | Python | mit | 789 |
from distutils.core import setup, Extension
setup(
name = 'iMX233_GPIO',
version = '0.1.0',
author = 'Stefan Mavrodiev',
author_email = '[email protected]',
url = 'https://www.olimex.com/',
license = 'MIT',
description = 'Control GPIOs on iMX233-OLinuXino.',
long_description = open('README.txt').read() + open('CHANGES.txt').read(),
classifiers = [ 'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Home Automation',
'Topic :: Software Development :: Embedded Systems'
],
ext_modules = [Extension('iMX233_GPIO', ['source/imx233.c'])],
package_dir={'': 'source'},
packages=[''],
)
| mikevoyt/iMX233_GPIO | setup.py | Python | mit | 1,271 |
from flask import Blueprint
from flask import current_app
from flask import request
from flask import jsonify
from flask import abort
from flask import render_template
from flask import redirect
from flask import url_for
from flask import flash
from werkzeug.exceptions import NotFound
from printus.web.models import Report
from printus.web.models import User
from printus.web.forms import UserForm
from printus.web.forms import ReportForm
from printus.web.forms import LoginForm
from printus.web.forms import SignupForm
from printus.web.forms import ContactForm
from printus.web.extensions import login_manager
from flask.ext.login import login_required, current_user, login_user, logout_user
bp = Blueprint('general', __name__, template_folder='templates')
@bp.route('/')
@login_required
def index():
try:
page = long(request.args.get('page', 1))
except Exception:
page = 1
try:
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
except NotFound:
page = 1
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
return render_template('reports.index.html', pagination=pagination)
@bp.route('/reports/new', methods=['GET', 'POST'])
@login_required
def reports_new():
form = ReportForm()
if form.validate_on_submit():
flash('Report created')
return redirect(url_for('general.index'))
return render_template('reports.new.html', form=form)
@bp.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
form = UserForm(obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.add(current_user)
db.session.commit()
return render_template('profile.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
signupForm = SignupForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data, password=form.password.data).first()
if not user:
return render_template("login.html", form=form, signupForm=signupForm)
else:
login_user(user)
return redirect(request.args.get("next") or url_for("general.index"))
return render_template("login.html", form=form, signupForm=signupForm)
@bp.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
if form.validate_on_submit():
return redirect(request.args.get('next') or url_for('general.index'))
return render_template("signup.html", form=form)
@bp.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out.')
return redirect(url_for('general.index'))
@bp.route('/contact_us')
@login_required
def contact_us():
form = ContactForm()
if form.validate_on_submit():
return redirect(url_for('general.index'))
return render_template('contact_us.html', form=form)
| matrixise/printus | old_code/printus/web/views/general/__init__.py | Python | mit | 2,782 |
"""
commswave
=========
Takes device communications up and down according to a timefunction.
Comms will be working whenever the timefunction returns non-zero.
Configurable parameters::
{
"timefunction" : A timefunction definition
"threshold" : (optional) Comms will only work when the timefunction is returning >= threshold. If missing then any non-zero value will make comms work.
"gate_properties" : (optional) ["list", "of", "properties"] If this is defined, then instead of taking whole comms up and down, only these specific properties are gated
}
Device properties created::
{
}
"""
from .device import Device
from common import importer
import logging
class Commswave(Device):
def __init__(self, instance_name, time, engine, update_callback, context, params):
"""Take Comms up and down according to some time function"""
tf = params["commswave"]["timefunction"]
self.comms_timefunction = importer.get_class("timefunction", list(tf.keys())[0])(engine, self, tf[list(tf.keys())[0]])
self.comms_tf_threshold = params["commswave"].get("threshold", None)
self.comms_gate_properties = params["commswave"].get("gate_properties", None)
self.messages_sent = 0
self.messages_attempted = 0
super(Commswave,self).__init__(instance_name, time, engine, update_callback, context, params)
def timefunction_says_communicate(self):
thresh = 0.0
if self.comms_tf_threshold is not None:
thresh = self.comms_tf_threshold
return self.comms_timefunction.state() > thresh
def comms_ok(self):
if self.comms_gate_properties is not None: # If we're gating individual properties, then don't gate overall comms
return super(Commswave, self).comms_ok()
else:
self.messages_attempted += 1
is_ok = super(Commswave, self).comms_ok()
is_ok = is_ok and self.timefunction_says_communicate()
if is_ok:
self.messages_sent += 1
return is_ok
def transmit(self, the_id, ts, properties, force_comms):
if self.comms_gate_properties is not None: # We're gating properties
if not self.timefunction_says_communicate():
for p in self.comms_gate_properties:
properties.pop(p, None) # Remove the property, if it's there
super(Commswave, self).transmit(the_id, ts, properties, force_comms)
def external_event(self, event_name, arg):
super(Commswave, self).external_event(event_name, arg)
def close(self):
super(Commswave,self).close()
logging.info("Comms report for " + str(self.properties["$id"]) + " " +
str(self.messages_sent) + " sent ("+str(100 * self.messages_sent/self.messages_attempted) + "%) from " +
str(self.messages_attempted) + " total")
# Private methods
## (we don't actually need to tick, as we can instantaneously look up timefunction state whenever we need to)
## def tick_commswave(self, _):
## self.ok_commswave = self.comms_timefunction.state()
## self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self)
| DevicePilot/synth | synth/devices/commswave.py | Python | mit | 3,252 |
from setuptools import setup, find_packages
with open('requirements.txt') as reqs:
inst_reqs = reqs.read().split('\n')
setup(
name='autobot',
version='0.1.0',
packages=find_packages(),
author='Mikael Knutsson',
author_email='[email protected]',
description='A bot framework made according to actual software principles',
long_description=open('README.md').read(),
classifiers=['License :: OSI Approved :: BSD License'],
install_requires=inst_reqs,
entry_points={
'console_scripts': ['autobot = autobot.main:main',
'autobot_init = autobot.init:main']
}
)
| mikn/autobot | setup.py | Python | mit | 648 |
"""File related wrapper functions to streamline common use cases"""
import manage as manage
from manage import find_file, find_file_re, list_dir
import operation as operation
from operation import hash_file, read_file, slice_file, write_file
| qevo/py_file_helper | file_helper/__init__.py | Python | mit | 243 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""test_03b_subcmd_primer3.py
Test primer3 subcommand for pdp script. These tests require primer3 v2+.
This test suite is intended to be run from the repository root using:
pytest -v
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact:
[email protected]
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import shutil
from argparse import Namespace
import pytest
from diagnostic_primers.scripts import subcommands
from tools import PDPTestCase, get_primer3_version, modify_namespace
# Defined as global so it can be seen by the TestPrimer3Subcommand() class
# and setUpClass() classmethod.
OUTDIR = os.path.join("tests", "test_output", "pdp_primer3")
# Available primer3 version as global so that pytest.skipif() can see it
PRIMER3_VERSION = get_primer3_version()
class TestPrimer3Subcommand(PDPTestCase):
"""Class defining tests of the pdp primer3 subcommand."""
@classmethod
def setUpClass(TestPrimer3Subcommand):
# Clean up old output directory
if os.path.isdir(OUTDIR):
shutil.rmtree(OUTDIR)
def setUp(self):
"""Set parameters for tests."""
self.confdir = os.path.join("tests", "test_input", "pdp_primer3")
self.outdir = OUTDIR
self.targetdir = os.path.join("tests", "test_targets", "pdp_primer3")
self.p3_exe = "primer3_core"
self.scheduler = "multiprocessing"
self.workers = None
# null logger instance that does nothing
self.logger = logging.getLogger("TestPrimer3Subcommand logger")
self.logger.addHandler(logging.NullHandler())
# path to thermodynamic parameters (needed for Travis/testing)
self.therm_param_path = os.path.join(
"tests", "test_input", "primer3", "primer3_config"
)
# base Namespace
self.base_namespace = Namespace(
primer3_dir=self.outdir,
primer3_exe=self.p3_exe,
primer3_force=True,
scheduler=self.scheduler,
workers=4,
verbose=True,
p3_hybridprobe=False,
p3_filter=False,
disable_tqdm=True,
p3_param_path=self.therm_param_path,
p3_numreturn=10,
p3_osize=20,
p3_minsize=18,
p3_maxsize=22,
p3_wt_lt=2,
p3_wt_gt=2,
p3_opttm=59,
p3_mintm=58,
p3_maxtm=60,
p3_ogcpercent=55,
p3_mingc=30,
p3_maxgc=80,
p3_psizeopt=100,
p3_psizemin=50,
p3_psizemax=150,
p3_maxpolyx=3,
p3_osizeopt=20,
p3_ominsize=13,
p3_omaxsize=30,
p3_otmopt=69,
p3_otmmin=68,
p3_otmmax=70,
p3_ogcopt=55,
p3_ogcmin=30,
p3_ogcmax=80,
recovery=False,
)
@pytest.mark.skipif(PRIMER3_VERSION[0] < 2, reason="requires primer3 v2+")
def test_primer3_01_run(self):
"""primer3 subcommand recapitulates primer design for small input set.
pdp primer3 -v \
--outdir=tests/test_output/pdp_primer3/subset \
tests/test_input/pdp_primer3/subsetconf.json \
tests/test_output/pdp_primer3/subsetep3conf.json
"""
subcommands.subcmd_primer3(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(self.confdir, "subsetconf.json"),
"outfilename": os.path.join(self.outdir, "subsetep3conf.json"),
"primer3_dir": os.path.join(self.outdir, "subset"),
},
),
self.logger,
)
# Check file contents
self.assertDirsEqual(
os.path.join(self.outdir, "subset"), os.path.join(self.targetdir, "subset")
)
@pytest.mark.skipif(PRIMER3_VERSION[0] < 2, reason="requires primer3 v2+")
def test_primer3_02_force(self):
"""primer3 subcommand executes and overwrites existing output.
This is the same test as test_primer3_01_run:
pdp primer3 -v -f \
--outdir=tests/test_output/pdp_primer3/subset \
tests/test_input/pdp_primer3/subsetconf.json \
tests/test_output/pdp_primer3/subsetep3conf.json
"""
self.test_primer3_01_run()
@pytest.mark.skipif(PRIMER3_VERSION[0] < 2, reason="requires primer3 v2+")
def test_primer3_03_noforce(self):
"""Script exits when not forcing primer3 output overwrite of existing output.
pdp primer3 -v \
--outdir=tests/test_output/pdp_primer3/subset \
tests/test_input/pdp_primer3/subsetconf.json \
tests/test_output/pdp_primer3/subsetep3conf.json
"""
with pytest.raises(SystemExit):
subcommands.subcmd_primer3(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(self.confdir, "subsetconf.json"),
"outfilename": os.path.join(self.outdir, "subsetep3conf.json"),
"primer3_dir": os.path.join(self.outdir, "subset"),
"primer3_force": False,
},
),
self.logger,
)
@pytest.mark.skipif(PRIMER3_VERSION[0] < 2, reason="requires primer3 v2+")
def test_invalid_conf_file(self):
"""Script exits when primer3 config file has wrong suffix.
pdp primer3 -v \
--outdir=tests/test_output/pdp_primer3/subset \
tests/test_input/pdp_primer3/testprodigalconf.nojson \
tests/test_output/pdp_primer3/ep3conf.json
"""
with pytest.raises(SystemExit):
subcommands.subcmd_primer3(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(
self.confdir, "testprodigalconf.nojson"
),
"outfilename": os.path.join(self.outdir, "ep3conf.json"),
},
),
self.logger,
)
@pytest.mark.skipif(PRIMER3_VERSION[0] < 2, reason="requires primer3 v2+")
def test_tsv_conf_file(self):
"""Error raised when .conf file provided for primer3.
pdp primer3 -v \
--outdir=tests/test_output/pdp_primer3/subset \
tests/test_input/pdp_primer3/testin.conf \
tests/test_output/pdp_primer3/ep3conf.json
"""
with pytest.raises(ValueError):
subcommands.subcmd_primer3(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(self.confdir, "testin.conf"),
"outfilename": os.path.join(self.outdir, "ep3conf.json"),
},
),
self.logger,
)
| widdowquinn/find_differential_primers | tests/test_03b_subcmd_primer3.py | Python | mit | 8,352 |
import os
def run(name='test1.py'):
filename = os.getcwd() + name
exec(compile(open(filename).read(), filename, 'exec'))
| karljakoblarsson/Rattan-Geometry | Utils.py | Python | mit | 130 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import sys
import time
from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models import test_run_results
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
# can be overridden by a keyword argument in TestList.add().
class TestInstance(object):
def __init__(self, name):
self.name = name
self.base = name[(name.rfind("/") + 1):name.rfind(".")]
self.crash = False
self.web_process_crash = False
self.exception = False
self.keyboard = False
self.error = ''
self.timeout = False
self.is_reftest = False
self.device_failure = False
# The values of each field are treated as raw byte strings. They
# will be converted to unicode strings where appropriate using
# FileSystem.read_text_file().
self.actual_text = self.base + '-txt'
self.actual_checksum = self.base + '-checksum'
# We add the '\x8a' for the image file to prevent the value from
# being treated as UTF-8 (the character is invalid)
self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
self.expected_text = self.actual_text
self.expected_image = self.actual_image
self.actual_audio = None
self.expected_audio = None
# This is an in-memory list of tests, what we want them to produce, and
# what we want to claim are the expected results.
class TestList(object):
def __init__(self):
self.tests = {}
def add(self, name, **kwargs):
test = TestInstance(name)
for key, value in kwargs.items():
test.__dict__[key] = value
self.tests[name] = test
def add_reftest(self, name, reference_name, same_image, crash=False):
self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
if same_image:
self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
else:
self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
def keys(self):
return self.tests.keys()
def __contains__(self, item):
return item in self.tests
def __getitem__(self, item):
return self.tests[item]
#
# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
#
TOTAL_TESTS = 114
TOTAL_SKIPS = 29
UNEXPECTED_PASSES = 1
UNEXPECTED_FAILURES = 25
def unit_test_list():
tests = TestList()
tests.add('failures/expected/crash.html', crash=True)
tests.add('failures/expected/exception.html', exception=True)
tests.add('failures/expected/device_failure.html', device_failure=True)
tests.add('failures/expected/timeout.html', timeout=True)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
tests.add('failures/expected/image.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/expected/image_checksum.html',
actual_checksum='image_checksum_fail-checksum',
actual_image='image_checksum_fail-png')
tests.add('failures/expected/audio.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/missing_check.html',
expected_image='missing_check-png')
tests.add('failures/expected/missing_image.html', expected_image=None)
tests.add('failures/expected/missing_audio.html', expected_audio=None,
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/newlines_leading.html',
expected_text="\nfoo\n", actual_text="foo\n")
tests.add('failures/expected/newlines_trailing.html',
expected_text="foo\n\n", actual_text="foo\n")
tests.add('failures/expected/newlines_with_excess_CR.html',
expected_text="foo\r\r\r\n", actual_text="foo\n")
tests.add('failures/expected/testharness.html',
actual_text='This is a testharness.js-based test.\nFAIL: assert fired\n.Harness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/crash_then_text.html')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/text_then_crash.html')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
tests.add('passes/testharness.html',
actual_text='This is a testharness.js-based test.\nPASS: assert is fine\nHarness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
# This adds a different virtual reference to ensure that that also works.
tests.add('virtual/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
# in order to fully control the test output and to demonstrate that
# we don't need a real filesystem to run the tests.
def add_unit_tests_to_mock_filesystem(filesystem):
# Add the test_expectations file.
filesystem.maybe_make_directory('/mock-checkout/LayoutTests')
if not filesystem.exists('/mock-checkout/LayoutTests/TestExpectations'):
filesystem.write_text_file('/mock-checkout/LayoutTests/TestExpectations', """
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/crash_then_text.html [ Failure ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/audio.html [ Failure ]
Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
Bug(test) failures/expected/missing_check.html [ Missing Pass ]
Bug(test) failures/expected/missing_image.html [ Missing Pass ]
Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
Bug(test) failures/expected/missing_text.html [ Missing Pass ]
Bug(test) failures/expected/newlines_leading.html [ Failure ]
Bug(test) failures/expected/newlines_trailing.html [ Failure ]
Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/testharness.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
Bug(test) failures/expected/keyboard.html [ WontFix ]
Bug(test) failures/expected/exception.html [ WontFix ]
Bug(test) failures/expected/device_failure.html [ WontFix ]
Bug(test) failures/unexpected/pass.html [ Failure ]
Bug(test) passes/skipped/skip.html [ Skip ]
Bug(test) passes/text.html [ Pass ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
== test.html test-ref.html
== multiple-match-success.html mismatching-ref.html
== multiple-match-success.html matching-ref.html
== multiple-match-failure.html mismatching-ref.html
== multiple-match-failure.html second-mismatching-ref.html
!= multiple-mismatch-success.html mismatching-ref.html
!= multiple-mismatch-success.html second-mismatching-ref.html
!= multiple-mismatch-failure.html mismatching-ref.html
!= multiple-mismatch-failure.html matching-ref.html
== multiple-both-success.html matching-ref.html
== multiple-both-success.html mismatching-ref.html
!= multiple-both-success.html second-mismatching-ref.html
== multiple-both-failure.html matching-ref.html
!= multiple-both-failure.html second-mismatching-ref.html
!= multiple-both-failure.html matching-ref.html
""")
# FIXME: This test was only being ignored because of missing a leading '/'.
# Fixing the typo causes several tests to assert, so disabling the test entirely.
# Add in a file should be ignored by port.find_test_files().
#files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
def add_file(test, suffix, contents):
dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
base = test.base
filesystem.maybe_make_directory(dirname)
filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
# Add each test and the expected output, if any.
test_list = unit_test_list()
for test in test_list.tests.values():
add_file(test, test.name[test.name.rfind('.'):], '')
if test.is_reftest:
continue
if test.actual_audio:
add_file(test, '-expected.wav', test.expected_audio)
continue
add_file(test, '-expected.txt', test.expected_text)
add_file(test, '-expected.png', test.expected_image)
filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
# Clear the list of written files so that we can watch what happens during testing.
filesystem.clear_written_files()
class TestPort(Port):
port_name = 'test'
default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
'test-linux-x86_64',
'test-mac-snowleopard', 'test-mac-leopard',
'test-win-win7', 'test-win-xp',
)
FALLBACK_PATHS = {
'xp': ['test-win-win7', 'test-win-xp'],
'win7': ['test-win-win7'],
'leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
'snowleopard': ['test-mac-snowleopard'],
'lucid': ['test-linux-x86_64', 'test-win-win7'],
}
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
# FIXME: crbug.com/279494. This needs to be in the "real layout tests
# dir" in a mock filesystem, rather than outside of the checkout, so
# that tests that want to write to a TestExpectations file can share
# this between "test" ports and "real" ports. This is the result of
# rebaseline_unittest.py having tests that refer to "real" port names
# and real builders instead of fake builders that point back to the
# test ports. rebaseline_unittest.py needs to not mix both "real" ports
# and "test" ports
self._generic_expectations_path = '/mock-checkout/LayoutTests/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
if self._name.startswith('test-win'):
self._operating_system = 'win'
elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
'test-win-xp': 'xp',
'test-win-win7': 'win7',
'test-mac-leopard': 'leopard',
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
self._version = version_map[self._name]
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
# FIXME: We override this just to keep the perf tests happy.
return [('blink', self.layout_tests_dir())]
def buildbot_archives_baselines(self):
return self._name != 'test-win-xp'
def default_pixel_tests(self):
return True
def _path_to_driver(self):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
return 'MOCK _path_to_driver'
def default_child_processes(self):
return 1
def check_build(self, needs_http, printer):
return test_run_results.OK_EXIT_STATUS
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def default_configuration(self):
return 'Release'
def diff_image(self, expected_contents, actual_contents):
diffed = actual_contents != expected_contents
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents or not expected_contents:
return (True, None)
if diffed:
return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
return (None, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped'])
def name(self):
return self._name
def operating_system(self):
return self._operating_system
def _path_to_wdiff(self):
return None
def default_results_directory(self):
return '/tmp/layout-test-results'
def setup_test_run(self):
pass
def _driver_class(self):
return TestDriver
def start_http_server(self, additional_dirs=None, number_of_servers=None):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
def _path_to_lighttpd_modules(self):
return "/usr/lib/lighttpd"
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
def _path_to_apache(self):
return "/usr/sbin/httpd"
def _path_to_apache_config_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
def path_to_generic_test_expectations_file(self):
return self._generic_expectations_path
def _port_specific_expectations_files(self):
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
def all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
test_configurations.append(TestConfiguration(
version=version,
architecture=architecture,
build_type=build_type))
return test_configurations
def _all_systems(self):
return (('leopard', 'x86'),
('snowleopard', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'))
def _all_build_types(self):
return ('debug', 'release')
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def virtual_test_suites(self):
return [
VirtualTestSuite('passes', 'passes', ['--virtual-arg'], use_legacy_naming=True),
VirtualTestSuite('skipped', 'failures/expected', ['--virtual-arg2'], use_legacy_naming=True),
]
class TestDriver(Driver):
"""Test/Dummy implementation of the driver interface."""
next_pid = 1
def __init__(self, *args, **kwargs):
super(TestDriver, self).__init__(*args, **kwargs)
self.started = False
self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
def run_test(self, driver_input, stop_when_done):
if not self.started:
self.started = True
self.pid = TestDriver.next_pid
TestDriver.next_pid += 1
start_time = time.time()
test_name = driver_input.test_name
test_args = driver_input.args or []
test = self._port._tests[test_name]
if test.keyboard:
raise KeyboardInterrupt
if test.exception:
raise ValueError('exception from ' + test_name)
if test.device_failure:
raise DeviceFailure('device failure in ' + test_name)
audio = None
actual_text = test.actual_text
crash = test.crash
web_process_crash = test.web_process_crash
if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
actual_text = 'flaky text failure'
if 'crash_then_text.html' in test_name:
if test_name in self._port._flakes:
actual_text = 'text failure'
else:
self._port._flakes.add(test_name)
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
if 'text_then_crash.html' in test_name:
if test_name in self._port._flakes:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
else:
self._port._flakes.add(test_name)
actual_text = 'text failure'
if actual_text and test_args and test_name == 'passes/args.html':
actual_text = actual_text + ' ' + ' '.join(test_args)
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
if crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
elif web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
if stop_when_done:
self.stop()
if test.actual_checksum == driver_input.image_hash:
image = None
else:
image = test.actual_image
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid)
def stop(self):
self.started = False
| lordmos/blink | Tools/Scripts/webkitpy/layout_tests/port/test.py | Python | mit | 29,134 |
from jupyter_workflow.data import get_fremont_data
import pandas as pd
def test_fremont_data():
data = get_fremont_data()
assert all(data.columns == ['West','East','Total'])
assert isinstance(data.index,pd.DatetimeIndex)
| irenalanc/JupyterPythonPals | jupyter_workflow/tests/test_data.py | Python | mit | 234 |
#Unused
def fail():
for t in [TypeA, TypeB]:
x = TypeA()
run_test(x)
#OK by name
def OK1(seq):
for _ in seq:
do_something()
print("Hi")
#OK counting
def OK2(seq):
i = 3
for x in seq:
i += 1
return i
#OK check emptiness
def OK3(seq):
for thing in seq:
return "Not empty"
return "empty"
#OK iteration over range
def OK4(n):
r = range(n)
for i in r:
print("x")
#OK named as unused
def OK5(seq):
for unused_x in seq:
print("x")
#ODASA-3794
def OK6(seq):
for thing in seq:
if sum(1 for s in STATUSES
if thing <= s < thing + 100) >= quorum:
return True
#OK -- Implicitly using count
def OK7(seq):
for x in seq:
queue.add(None)
def OK8(seq):
for x in seq:
output.append("</item>")
#Likewise with parameters
def OK7(seq, queue):
for x in seq:
queue.add(None)
def OK8(seq, output):
for x in seq:
output.append("</item>")
#Not OK -- Use a constant, but also a variable
def fail2(sequence):
for x in sequence:
for y in sequence:
do_something(x+1)
def fail3(sequence):
for x in sequence:
do_something(x+1)
for y in sequence:
do_something(x+1)
def fail4(coll, sequence):
while coll:
x = coll.pop()
for s in sequence:
do_something(x+1)
#OK See ODASA-4153 and ODASA-4533
def fail5(t):
x, y = t
return x
class OK9(object):
cls_attr = 0
def __init__(self):
self.attr = self.cls_attr
__all__ = [ 'hello' ]
__all__.extend(foo())
maybe_defined_in_all = 17
#ODASA-5895
def rand_list():
return [ random.random() for i in range(100) ]
def kwargs_is_a_use(seq):
for arg in seq:
func(**arg)
#A deletion is a use, but this is almost certainly an error
def cleanup(sessions):
for sess in sessions:
# Original code had some comment about deleting sessions
del sess
# For SuspiciousUnusedLoopIterationVariable.ql
# ok
for x in list(range(100)):
print('hi')
# ok
for y in list(list(range(100))):
print('hi')
| github/codeql | python/ql/test/query-tests/Variables/unused/test.py | Python | mit | 2,148 |
from __future__ import division
import random
import matrix
from tile import Tile
class Island(object):
def __init__(self, width=300, height=300):
self.radius = None
self.shore_noise = None
self.rect_shore = None
self.shore_lines = None
self.peak = None
self.spokes = None
self.tiles = [[None] * width for _ in range(height)]
def cells_to_tiles(self, *cells):
"""
Apply a Cell(x, y, z) into an Island tile height.
"""
for x, y, z in cells:
self.tiles[x][y] = Tile(x, y, z)
def flood_fill(self, start=None):
"""
Sets all None tiles to Tile(x, y, -1) within the island shore.
"""
if self.peak:
center_x, center_y = self.peak.x, self.peak.y
elif start:
center_x, center_y = start.x, start.y
else:
raise ValueError('Must define peak or start cell for flood fill.')
print('Flood filling')
seen = set()
start = (center_x, center_y)
stack = [start]
while True:
adjacent = False # Has no adjacent unvisited pixels
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]: # Check 4 neighbours
x, y = start[0] + dx, start[1] + dy
if (x, y) in seen:
continue
else:
if self.tiles[x][y] is None:
adjacent = True
stack.append((x, y))
self.tiles[x][y] = Tile(x, y, -1) # Set height -1
seen.add((x, y))
if not adjacent:
stack.pop()
if not stack:
break
else:
start = stack[-1]
def normalize(self):
max_height = 1
for row in self.tiles:
for tile in row:
if tile is not None:
if tile.height > max_height:
max_height = tile.height
for row in self.tiles:
for tile in row:
if tile is not None:
if tile.height > 0: # Ignore negative tiles
tile.height = float(tile.height) / max_height
elif tile.height < 0:
tile.height = -1
def height_fill(self):
attempt = 0
last_empty_count = 0
while self.has_empty:
empties = self.empties()
empty_count = len(empties)
print('Island has {} empty tiles'.format(empty_count))
if empty_count == last_empty_count:
attempt += 1
last_empty_count = empty_count
if attempt > 10: break;
random.shuffle(empties)
while empties:
i, j = empties.pop()
tile = self.tiles[i][j]
if tile and tile.height == -1:
averages = []
for span in range(1, 5):
ring_total = 0
neighbour_count = 0
ring_avg = 0
for x, y in matrix.find_neighbours_2D(self.tiles, (i, j), span):
try:
value = self.tiles[x][y].height
# print('value: {}'.format(value))
except (IndexError, AttributeError):
continue
if value in [-1,]:
continue
ring_total += value
neighbour_count += 1
if ring_total:
ring_avg = ring_total/neighbour_count
# averages.append(ring_avg * 9 / span ** 0.9) # Further away == less impact
averages.append(ring_avg) # Further away == less impact
if averages:
# print(averages)
overall = sum(averages)/len(averages)
# print('overall: {}'.format(overall))
tile.height = overall
@property
def has_empty(self):
return any(True if tile.height == -1 else False
for row in self.tiles for tile in row if tile is not None)
def empties(self):
empty_cells = []
for i in range(len(self.tiles)):
for j in range(len(self.tiles[0])):
if self.tiles[i][j] is not None and self.tiles[i][j].height == -1:
empty_cells.append((i, j))
return empty_cells
| supermitch/Island-Gen | island.py | Python | mit | 4,697 |
from __future__ import absolute_import, unicode_literals
from builtins import str
import os
import pytest
import io
from glob import glob
from psd_tools import PSDImage
from psd2svg import psd2svg
FIXTURES = [
p for p in glob(
os.path.join(os.path.dirname(__file__), 'fixtures', '*.psd'))
]
@pytest.mark.parametrize('psd_file', FIXTURES)
def test_convert(tmpdir, psd_file):
psd2svg(psd_file, tmpdir.dirname)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_io(tmpdir, psd_file):
with open(psd_file, "rb") as f:
assert isinstance(psd2svg(f), str)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_psd(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
psd2svg(psd)
@pytest.mark.parametrize('psd_file', FIXTURES[2:3])
def test_input_layer(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
assert psd2svg(psd[0]).startswith("<")
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_output_io(tmpdir, psd_file):
with io.StringIO() as f:
assert f == psd2svg(psd_file, f)
| kyamagu/psd2svg | tests/test_convert.py | Python | mit | 1,077 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations(object):
"""ExpressRouteConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
put_express_route_connection_parameters, # type: "_models.ExpressRouteConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
put_express_route_connection_parameters, # type: "_models.ExpressRouteConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteConnection"]
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.ExpressRouteConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnection"
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnectionList"
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_express_route_connections_operations.py | Python | mit | 22,003 |
# -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Non-Roman Alphabet Agreement Party Name grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestNPAGrammar(unittest.TestCase):
"""
Tests that the NPA grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('nra_agreement_party')
def test_valid_full(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('012345678', result.ip_n)
self.assertEqual('PARTY NAME', result.ip_name)
self.assertEqual('PARTY WRITER NAME', result.ip_writer_name)
self.assertEqual('ES', result.language_code)
def test_valid_min(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains none of the optional fields.
"""
record = 'NPA0000123400000023000000000PARTY NAME PARTY WRITER NAME '
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('000000000', result.ip_n)
self.assertEqual('PARTY NAME', result.ip_name)
self.assertEqual('PARTY WRITER NAME', result.ip_writer_name)
self.assertEqual(None, result.language_code)
def test_extended_character(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains none of the optional fields.
"""
record = 'NPA0000123400000023000000000PARTY NAME \xc6\x8f PARTY WRITER NAME \xc6\x8f '
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('000000000', result.ip_n)
self.assertEqual('PARTY NAME \xc6\x8f', result.ip_name)
self.assertEqual('PARTY WRITER NAME \xc6\x8f', result.ip_writer_name)
self.assertEqual(None, result.language_code)
class TestNPAGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('nra_agreement_party')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
| weso/CWR-DataApi | tests/grammar/factory/record/test_npa.py | Python | mit | 4,113 |
# -*- coding:utf-8 -*-
from collections import defaultdict
import numpy
class ThompsonAgent:
def __init__(self, seed=None):
self._succeeds = defaultdict(int)
self._fails = defaultdict(int)
self._np_random = numpy.random.RandomState(seed)
def choose(self, arms, features=None):
return max(arms, key=lambda arm: self._score(arm))
def _score(self, arm):
return self._np_random.beta(
self._succeeds[arm] + 0.5,
self._fails[arm] + 0.5)
def update(self, arm, reward, arms=None, features=None):
if reward > 0:
self._succeeds[arm] += 1
else:
self._fails[arms] += 1
| ohtaman/pynm | pynm/reinforce/bandit/thompson.py | Python | mit | 683 |
"""Sensitive variant calling using VarDict.
Defaults to using the faster, equally sensitive Java port:
https://github.com/AstraZeneca-NGS/VarDictJava
if 'vardict' or 'vardict-java' is specified in the configuration. To use the
VarDict perl version:
https://github.com/AstraZeneca-NGS/VarDict
specify 'vardict-perl'.
"""
import os
import itertools
import sys
import toolz as tz
import pybedtools
from bcbio import broad, utils
from bcbio.bam import highdepth
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bamprep, vcfutils
def _is_bed_file(target):
return target and isinstance(target, basestring) and os.path.isfile(target)
def _vardict_options_from_config(items, config, out_file, target=None):
opts = ["-c 1", "-S 2", "-E 3", "-g 4"]
# ["-z", "-F", "-c", "1", "-S", "2", "-E", "3", "-g", "4", "-x", "0",
# "-k", "3", "-r", "4", "-m", "8"]
resources = config_utils.get_resources("vardict", config)
if resources.get("options"):
opts += resources["options"]
assert _is_bed_file(target)
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
target = shared.remove_lcr_regions(target, items)
target = _enforce_max_region_size(target, items[0])
opts += [target] # this must be the last option
return opts
def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 1Mb.
Larger sections have high memory usage on VarDictJava and failures
on VarDict. This creates minimum windows from the input BED file
to avoid these issues. Downstream VarDict merging sorts out any
variants across windows.
"""
max_size = 1e6
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return ""
def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in itertools.izip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
strandbias = "teststrandbias.R"
var2vcf = "var2vcf_valid.pl"
opts = (" ".join(_vardict_options_from_config(items, config, out_file, target))
if _is_bed_file(target) else "")
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if highdepth.get_median_coverage(items[0]) > 5000 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
remove_dup = vcfutils.remove_dup_cl()
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
r_setup = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
cmd = ("{r_setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {sample} -b {bamfile} {opts} "
"| {strandbias}"
"| {var2vcf} -N {sample} -E -f {freq} {var2vcf_opts} "
"| {fix_ambig} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
out_file = (annotation.add_dbsnp(out_file, assoc_files["dbsnp"], config)
if assoc_files.get("dbsnp") else out_file)
return out_file
def _safe_to_float(x):
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = _safe_to_float(parts[5])
dp = _safe_to_float(sample_ft.get("DP"))
af = _safe_to_float(sample_ft.get("AF"))
nm = _safe_to_float(sample_ft.get("NM"))
mq = _safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = _safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 55 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vcffilter = config_utils.get_program("vcffilter", config)
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
strandbias = "testsomatic.R"
var2vcf = "var2vcf_paired.pl"
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts = " ".join(_vardict_options_from_config(items, config, out_file, target))
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if highdepth.get_median_coverage(items[0]) > 5000 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"| %s -x 'bcbio.variation.freebayes.call_somatic(x)'" %
os.path.join(os.path.dirname(sys.executable), "py"))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
0, dd.get_aligner(paired.tumor_data)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
r_setup = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
cmd = ("{r_setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| {strandbias} "
"| {var2vcf} -P 0.9 -m 4.25 -f {freq} {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"{freq_filter} "
"{somatic_filter} | {fix_ambig} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
out_file = (annotation.add_dbsnp(out_file, assoc_files["dbsnp"], config)
if assoc_files.get("dbsnp") else out_file)
return out_file
def get_vardict_command(data):
"""
convert variantcaller specification to proper vardict command, handling
string or list specification
"""
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict
| lpantano/bcbio-nextgen | bcbio/variation/vardict.py | Python | mit | 15,244 |
"""add account id
Revision ID: 3734300868bc
Revises: 3772e5bcb34d
Create Date: 2013-09-30 18:07:21.729288
"""
# revision identifiers, used by Alembic.
revision = '3734300868bc'
down_revision = '3772e5bcb34d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('account_profile', sa.Column('account_id', sa.Integer(11)))
pass
def downgrade():
pass
| vsilent/smarty-bot | alembic/versions/3734300868bc_add_account_id.py | Python | mit | 391 |
pkg_dnf = {
'collectd': {},
'collectd-chrony': {},
'collectd-curl': {},
'collectd-curl_json': {},
'collectd-curl_xml': {},
'collectd-netlink': {},
'rrdtool': {},
}
if node.os == 'fedora' and node.os_version >= (26):
pkg_dnf['collectd-disk'] = {}
svc_systemd = {
'collectd': {
'needs': ['pkg_dnf:collectd'],
},
}
files = {
'/etc/collectd.conf': {
'source': 'collectd.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'collectd': node.metadata.get('collectd', {}),
},
'needs': ['pkg_dnf:collectd', 'pkg_dnf:rrdtool'],
'triggers': ['svc_systemd:collectd:restart'],
},
'/etc/collectd.d/nut.conf': {
'delete': True,
'needs': ['pkg_dnf:collectd'],
},
}
actions = {}
directories = {
'/etc/collectd.d/plugins': {
'mode': '0755',
'needs': ['pkg_dnf:collectd'],
},
'/etc/collectd.d/types': {
'mode': '0755',
'needs': ['pkg_dnf:collectd'],
},
}
git_deploy = {}
if node.metadata.get('collectd', {}).get('write_rrd', True):
pkg_dnf['collectd-rrdtool'] = {
'triggers': ['svc_systemd:collectd:restart'],
}
if node.metadata.get('collectd', {}).get('client'):
files['/etc/collectd.d/client.conf'] = {
'source': 'client.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'client': node.metadata.get('collectd', {}).get('client', {}),
},
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
if node.metadata.get('collectd', {}).get('server'):
files['/etc/collectd.d/server.conf'] = {
'source': 'server.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'server': node.metadata.get('collectd', {}).get('server', {}),
},
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
files['/etc/collectd.d/collectd.auth'] = {
'source': 'server_auth/{}.auth'.format(node.name),
'mode': '0600',
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
if node.has_bundle('firewalld'):
port = node.metadata.get('collectd', {}).get('server', {}).get('port', '25826')
if node.metadata.get('collectd', {}).get('server', {}).get('firewalld_permitted_zone'):
zone = node.metadata.get('collectd', {}).get('server', {}).get('firewalld_permitted_zone')
actions['firewalld_add_collectd_zone_{}'.format(zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(zone, port),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('default_zone'):
default_zone = node.metadata.get('firewalld', {}).get('default_zone')
actions['firewalld_add_collectd_zone_{}'.format(default_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(default_zone, port),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(default_zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('custom_zones', False):
for interface in node.metadata['interfaces']:
custom_zone = node.metadata.get('interfaces', {}).get(interface).get('firewalld_zone')
actions['firewalld_add_collectd_zone_{}'.format(custom_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(custom_zone, port),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(custom_zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
else:
actions['firewalld_add_https'] = {
'command': 'firewall-cmd --permanent --add-port={}/udp'.format(port),
'unless': 'firewall-cmd --list-ports | grep {}/udp'.format(port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
if node.metadata.get('collectd', {}).get('cgp', {}):
cgp_install_path = node.metadata.get('collectd', {}).get('cgp', {}).get('install_path')
directories['{}'.format(cgp_install_path)] = {
'mode': '0755',
}
git_deploy['{}'.format(cgp_install_path)] = {
'needs': [
'directory:{}'.format(cgp_install_path)
],
'repo': 'https://github.com/pommi/CGP.git',
'rev': 'master',
}
files['{}/conf/config.local.php'.format(cgp_install_path)] = {
'source': 'cgp_config',
'mode': '0644',
'needs': ['git_deploy:{}'.format(cgp_install_path)],
}
if node.has_bundle('monit'):
files['/etc/monit.d/collectd'] = {
'source': 'monit',
'mode': '0600',
'content_type': 'mako',
'context': {
'server': node.metadata.get('collectd', {}).get('server', {}),
},
'triggers': ['svc_systemd:monit:restart'],
}
| rullmann/bundlewrap-collectd | items.py | Python | mit | 5,646 |
import tkinter as tk
from time import sleep
from playsound import playsound
import config
import fasttick
from helpmessage import fasttick_help_message
import misc
from tickerwindow import TickerWindow
class GUIfasttick(TickerWindow):
def __init__(self, app):
super().__init__(app)
misc.delete_ancient_pickles('fasttick_history')
self.draw_labels()
self.draw_buttons()
self.draw_lists()
self.draw_timer()
self.timer_update()
def draw_labels(self):
self.labelName.grid(row=3, column=0, sticky='NSWE')
self.labelChange.config(text='Rate')
self.labelChange.grid(row=3, column=1, sticky='NSWE')
self.labelVol.grid(row=3, column=2, sticky='NSWE')
self.labelBuf.grid(row=3, rowspan=2, column=3, columnspan=2, sticky='NSWE')
def draw_buttons(self):
self.sortByName.grid(row=4, column=0, sticky='NSWE')
self.sortByChange.grid(row=4, column=1, sticky='NSWE')
self.sortByVol.grid(row=4, column=2, sticky='NSWE')
self.notifyBell.grid(row=4, column=3, sticky='NSWE')
self.help.grid(row=3, column=4, sticky='E')
def on_click_help(self):
helpWindow = tk.Toplevel()
helpWindow.title('Help')
frameBuf = tk.Frame(helpWindow, width=192, bg=config.MAIN_BG)
frameBuf.grid(row=0, rowspan=4, column=0, columnspan=3)
message = tk.Message(frameBuf, bg=config.MAIN_BG, fg=config.TEXT_COLOR,
width=192, text=fasttick_help_message)
message.grid(row=0, columnspan=3)
dismissButton = tk.Button(frameBuf, text='Dismiss', command=helpWindow.destroy)
dismissButton.grid(row=1, column=1)
def draw_lists(self):
self.yScroll.grid(row=5, column=3, sticky='NSWE')
self.listName.grid(row=5, column=0, sticky='NSWE')
self.listChange.grid(row=5, column=1, sticky='NSWE')
self.listVol.grid(row=5, column=2, sticky='NSWE')
def draw_timer(self):
self.timerLabel.grid(row=5, column=4, ipadx=8)
self.timerFrame.grid(row=5, column=4, columnspan=3)
self.timerDisp.grid(row=5, column=4)
self.timerValue = config.FASTTICK_RATE
def timer_update(self):
if self.timerValue == 3:
self.async = self.pool.apply_async(fasttick.heartbeat)
if self.timerValue == 0:
while True:
if self.async.ready():
break
for i in range(1, 4):
if self.async.ready():
break
self.timerDisp.config(text=f'{"." * i}', font=('', 20))
self.app.update()
sleep(1)
self.ticker_data = self.async.get()
self.sort_ticker()
if self.notifyIsActive and self.ticker_data:
playsound('media/notification_sound.mp3')
self.timerValue = config.FASTTICK_RATE
values = divmod(self.timerValue, 60)
minutes = values[0]
seconds = values[1]
self.timerDisp.config(text=f'{minutes}:{seconds:0>2}', font=('', 20))
self.timerValue -= 1
self.app.after(1000, self.timer_update) | JevinJ/Bittrex-Notify | src/GUIfasttick.py | Python | mit | 3,213 |
import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.Base import Generator4d
from _Dist.NeuralNetworks.h_RNN.RNN import Basic3d
from _Dist.NeuralNetworks.NNUtil import Activations
class Basic4d(Basic3d):
def _calculate(self, x, y=None, weights=None, tensor=None, n_elem=1e7, is_training=False):
return super(Basic4d, self)._calculate(x, y, weights, tensor, n_elem / 10, is_training)
class CNN(Basic4d):
def __init__(self, *args, **kwargs):
self.height, self.width = kwargs.pop("height", None), kwargs.pop("width", None)
super(CNN, self).__init__(*args, **kwargs)
self._name_appendix = "CNN"
self._generator_base = Generator4d
self.conv_activations = None
self.n_filters = self.filter_sizes = self.poolings = None
def init_model_param_settings(self):
super(CNN, self).init_model_param_settings()
self.conv_activations = self.model_param_settings.get("conv_activations", "relu")
def init_model_structure_settings(self):
super(CNN, self).init_model_structure_settings()
self.n_filters = self.model_structure_settings.get("n_filters", [32, 32])
self.filter_sizes = self.model_structure_settings.get("filter_sizes", [(3, 3), (3, 3)])
self.poolings = self.model_structure_settings.get("poolings", [None, "max_pool"])
if not len(self.filter_sizes) == len(self.poolings) == len(self.n_filters):
raise ValueError("Length of filter_sizes, n_filters & pooling should be the same")
if isinstance(self.conv_activations, str):
self.conv_activations = [self.conv_activations] * len(self.filter_sizes)
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
if self.height is None or self.width is None:
assert len(x.shape) == 4, "height and width are not provided, hence len(x.shape) should be 4"
self.height, self.width = x.shape[1:3]
if len(x.shape) == 2:
x = x.reshape(len(x), self.height, self.width, -1)
else:
assert self.height == x.shape[1], "height is set to be {}, but {} found".format(self.height, x.shape[1])
assert self.width == x.shape[2], "width is set to be {}, but {} found".format(self.height, x.shape[2])
if x_test is not None and len(x_test.shape) == 2:
x_test = x_test.reshape(len(x_test), self.height, self.width, -1)
super(CNN, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
def _define_input_and_placeholder(self):
self._is_training = tf.placeholder(tf.bool, name="is_training")
self._tfx = tf.placeholder(tf.float32, [None, self.height, self.width, self.n_dim], name="X")
self._tfy = tf.placeholder(tf.float32, [None, self.n_class], name="Y")
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
for i, (filter_size, n_filter, pooling) in enumerate(zip(
self.filter_sizes, self.n_filters, self.poolings
)):
net = tf.layers.conv2d(net, n_filter, filter_size, padding="same")
net = tf.layers.batch_normalization(net, training=self._is_training)
activation = self.conv_activations[i]
if activation is not None:
net = getattr(Activations, activation)(net, activation)
net = tf.layers.dropout(net, training=self._is_training)
if pooling is not None:
net = tf.layers.max_pooling2d(net, 2, 2, name="pool")
fc_shape = np.prod([net.shape[i].value for i in range(1, 4)])
net = tf.reshape(net, [-1, fc_shape])
super(CNN, self)._build_model(net)
| carefree0910/MachineLearning | _Dist/NeuralNetworks/i_CNN/CNN.py | Python | mit | 3,860 |
import re
import functools
from slackbot.bot import respond_to
from app.modules.shogi_input import ShogiInput, UserDifferentException, KomaCannotMoveException
from app.modules.shogi_output import ShogiOutput
from app.slack_utils.user import User
from app.helper import channel_info, should_exist_shogi
@respond_to('start with <?@?([\d\w_-]+)>?')
@channel_info
def start_shogi(channel, message, opponent_name):
slacker = message._client.webapi
user = User(slacker)
opponent_id = user.username_to_id(opponent_name)
if opponent_id is None:
# In case of mention. In mention, slack transform username to userid
# like @username to <@UOIFJ83F>
opponent_id = opponent_name
if not user.user_in_channel(opponent_id, channel.channel_id):
message.reply("Error, sorry. Opponent is not found in this channel")
return
shogi = ShogiInput.init(channel_id=channel.channel_id, users=[{
"id": channel.own_id,
"name": user.id_to_username(channel.own_id),
}, {
"id": opponent_id,
"name": user.id_to_username(opponent_id),
}])
if shogi is None:
message.reply("Shogi started already by a user. Sorry.\nIf you want to quit shogi which already exists, please say this command `resign`")
else:
message.reply("Shogi started: " + shogi.id)
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
koma_names = [
"歩兵?",
"と金?",
"成?香車?",
"成?桂馬?",
"成?銀将?",
"金将?",
"角行?",
"馬",
"飛車?",
"龍",
"王将?",
"玉将?",
]
koma_names_string_regex = "|".join(koma_names)
@respond_to("^([一二三四五六七八九123456789123456789]{2})?(同)?(" + koma_names_string_regex + ")([上右下左引寄直打]{1,2})?つ?(成)?")
@channel_info
@should_exist_shogi
def koma_move(channel, message, position, dou, koma, sub_position=None, promote=None):
movement_str = "".join(
[x for x in [position, dou, koma, sub_position, promote] if x is not None])
try:
ShogiInput.move(movement_str, channel.channel_id, channel.own_id)
except UserDifferentException:
message.reply("You cannot move this because *it's not your turn*")
except KomaCannotMoveException:
message.reply("You cannot move this with your message *{}*".format(movement_str))
finally:
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to("set (all) mode")
@channel_info
@should_exist_shogi
def set_mode(channel, message, arg):
if arg == "all":
ShogiInput.setAllMode(channel.channel_id)
message.reply("Done! All member can move now!")
@respond_to("今?.*の?.*状態.*を?教.*え?て?")
@respond_to("now")
@respond_to("局面.*")
@respond_to("board")
@channel_info
@should_exist_shogi
def board_info(channel, message):
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to(".*降参.*")
@respond_to(".*resign.*")
@respond_to(".*負けました.*")
@respond_to(".*まけました.*")
@respond_to(".*まいりました.*")
@respond_to(".*参りました.*")
@respond_to(".*ありません.*")
@channel_info
@should_exist_shogi
def resign(channel, message):
message.send("最終局面")
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
ShogiInput.clear(channel.channel_id)
@respond_to("待った")
@channel_info
@should_exist_shogi
def matta(channel, message):
try:
ShogiInput.matta(channel.channel_id, channel.own_id)
message.send("mattaed")
except UserDifferentException:
message.reply("You cannot matta because *it's not your turn*")
except KomaCannotMoveException:
message.reply("You cannot matta because koma not moved")
finally:
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji(board)
message.send(board_str)
@respond_to(".*ひふみん[eye, アイ, あい]?")
@respond_to(".*反転.*")
@channel_info
@should_exist_shogi
def hifumin(channel, message):
board = ShogiInput.get_shogi_board(channel.channel_id)
board_str = ShogiOutput.make_board_emoji_reverse(board)
message.send(board_str)
| setokinto/slack-shogi | app/shogi.py | Python | mit | 4,557 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import os
import re
import torch
from fairseq.file_io import PathManager
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt")
else:
pt_regexp = re.compile(r"checkpoint(\d+)\.pt")
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception(
"Found {} checkpoint files but need at least {}", len(entries), n
)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description="Tool to average the params of input checkpoints to "
"produce a new checkpoint",
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the '
'path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by'
' input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be'
' averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would'
' be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (
args.num_epoch_checkpoints is not None
or args.num_update_checkpoints is not None
), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints"
assert (
args.num_epoch_checkpoints is None or args.num_update_checkpoints is None
), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints"
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs,
num,
is_update_based,
upper_bound=args.checkpoint_upper_bound,
)
print("averaging checkpoints: ", args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, "wb") as f:
torch.save(new_state, f)
print("Finished writing averaged checkpoint to {}".format(args.output))
if __name__ == "__main__":
main()
| pytorch/fairseq | scripts/average_checkpoints.py | Python | mit | 6,075 |
# input lib
from pygame.locals import *
import pygame, string
class ConfigError(KeyError): pass
class Config:
""" A utility for configuration """
def __init__(self, options, *look_for):
assertions = []
for key in look_for:
if key[0] in options.keys(): exec('self.'+key[0]+' = options[\''+key[0]+'\']')
else: exec('self.'+key[0]+' = '+key[1])
assertions.append(key[0])
for key in options.keys():
if key not in assertions: raise ConfigError(key+' not expected as option')
class Input:
""" A text input for pygame apps """
def __init__(self, **options):
""" Options: x, y, font, color, restricted, maxlength, prompt """
self.options = Config(options, ['x', '0'], ['y', '0'], ['font', 'pygame.font.Font(None, 32)'],
['color', '(0,0,0)'], ['restricted', '\'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!"#$%&\\\'()*+,-./:;<=>?@[\]^_`{|}~\''],
['maxlength', '-1'], ['prompt', '\'\''])
self.x = self.options.x; self.y = self.options.y
self.font = self.options.font
self.color = self.options.color
self.restricted = self.options.restricted
self.maxlength = self.options.maxlength
self.prompt = self.options.prompt; self.value = ''
self.shifted = False
def set_pos(self, x, y):
""" Set the position to x, y """
self.x = x
self.y = y
def set_font(self, font):
""" Set the font for the input """
self.font = font
def draw(self, surface):
""" Draw the text input to a surface """
text = self.font.render(self.prompt+self.value, 1, self.color)
surface.blit(text, (self.x, self.y))
def getText(self):
return self.value
def hasTyped(self):
if self.value =="":
return False
else:
return True
def update(self, events):
""" Update the input based on passed events """
for event in events:
if event.type == KEYUP:
if event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = False
if event.type == KEYDOWN:
if event.key == K_BACKSPACE: self.value = self.value[:-1]
elif event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = True
elif event.key == K_SPACE: self.value += ' '
if not self.shifted:
if event.key == K_a and 'a' in self.restricted: self.value += 'a'
elif event.key == K_b and 'b' in self.restricted: self.value += 'b'
elif event.key == K_c and 'c' in self.restricted: self.value += 'c'
elif event.key == K_d and 'd' in self.restricted: self.value += 'd'
elif event.key == K_e and 'e' in self.restricted: self.value += 'e'
elif event.key == K_f and 'f' in self.restricted: self.value += 'f'
elif event.key == K_g and 'g' in self.restricted: self.value += 'g'
elif event.key == K_h and 'h' in self.restricted: self.value += 'h'
elif event.key == K_i and 'i' in self.restricted: self.value += 'i'
elif event.key == K_j and 'j' in self.restricted: self.value += 'j'
elif event.key == K_k and 'k' in self.restricted: self.value += 'k'
elif event.key == K_l and 'l' in self.restricted: self.value += 'l'
elif event.key == K_m and 'm' in self.restricted: self.value += 'm'
elif event.key == K_n and 'n' in self.restricted: self.value += 'n'
elif event.key == K_o and 'o' in self.restricted: self.value += 'o'
elif event.key == K_p and 'p' in self.restricted: self.value += 'p'
elif event.key == K_q and 'q' in self.restricted: self.value += 'q'
elif event.key == K_r and 'r' in self.restricted: self.value += 'r'
elif event.key == K_s and 's' in self.restricted: self.value += 's'
elif event.key == K_t and 't' in self.restricted: self.value += 't'
elif event.key == K_u and 'u' in self.restricted: self.value += 'u'
elif event.key == K_v and 'v' in self.restricted: self.value += 'v'
elif event.key == K_w and 'w' in self.restricted: self.value += 'w'
elif event.key == K_x and 'x' in self.restricted: self.value += 'x'
elif event.key == K_y and 'y' in self.restricted: self.value += 'y'
elif event.key == K_z and 'z' in self.restricted: self.value += 'z'
elif event.key == K_0 and '0' in self.restricted: self.value += '0'
elif event.key == K_1 and '1' in self.restricted: self.value += '1'
elif event.key == K_2 and '2' in self.restricted: self.value += '2'
elif event.key == K_3 and '3' in self.restricted: self.value += '3'
elif event.key == K_4 and '4' in self.restricted: self.value += '4'
elif event.key == K_5 and '5' in self.restricted: self.value += '5'
elif event.key == K_6 and '6' in self.restricted: self.value += '6'
elif event.key == K_7 and '7' in self.restricted: self.value += '7'
elif event.key == K_8 and '8' in self.restricted: self.value += '8'
elif event.key == K_9 and '9' in self.restricted: self.value += '9'
elif event.key == K_BACKQUOTE and '`' in self.restricted: self.value += '`'
elif event.key == K_MINUS and '-' in self.restricted: self.value += '-'
elif event.key == K_EQUALS and '=' in self.restricted: self.value += '='
elif event.key == K_LEFTBRACKET and '[' in self.restricted: self.value += '['
elif event.key == K_RIGHTBRACKET and ']' in self.restricted: self.value += ']'
elif event.key == K_BACKSLASH and '\\' in self.restricted: self.value += '\\'
elif event.key == K_SEMICOLON and ';' in self.restricted: self.value += ';'
elif event.key == K_QUOTE and '\'' in self.restricted: self.value += '\''
elif event.key == K_COMMA and ',' in self.restricted: self.value += ','
elif event.key == K_PERIOD and '.' in self.restricted: self.value += '.'
elif event.key == K_SLASH and '/' in self.restricted: self.value += '/'
elif self.shifted:
if event.key == K_a and 'A' in self.restricted: self.value += 'A'
elif event.key == K_b and 'B' in self.restricted: self.value += 'B'
elif event.key == K_c and 'C' in self.restricted: self.value += 'C'
elif event.key == K_d and 'D' in self.restricted: self.value += 'D'
elif event.key == K_e and 'E' in self.restricted: self.value += 'E'
elif event.key == K_f and 'F' in self.restricted: self.value += 'F'
elif event.key == K_g and 'G' in self.restricted: self.value += 'G'
elif event.key == K_h and 'H' in self.restricted: self.value += 'H'
elif event.key == K_i and 'I' in self.restricted: self.value += 'I'
elif event.key == K_j and 'J' in self.restricted: self.value += 'J'
elif event.key == K_k and 'K' in self.restricted: self.value += 'K'
elif event.key == K_l and 'L' in self.restricted: self.value += 'L'
elif event.key == K_m and 'M' in self.restricted: self.value += 'M'
elif event.key == K_n and 'N' in self.restricted: self.value += 'N'
elif event.key == K_o and 'O' in self.restricted: self.value += 'O'
elif event.key == K_p and 'P' in self.restricted: self.value += 'P'
elif event.key == K_q and 'Q' in self.restricted: self.value += 'Q'
elif event.key == K_r and 'R' in self.restricted: self.value += 'R'
elif event.key == K_s and 'S' in self.restricted: self.value += 'S'
elif event.key == K_t and 'T' in self.restricted: self.value += 'T'
elif event.key == K_u and 'U' in self.restricted: self.value += 'U'
elif event.key == K_v and 'V' in self.restricted: self.value += 'V'
elif event.key == K_w and 'W' in self.restricted: self.value += 'W'
elif event.key == K_x and 'X' in self.restricted: self.value += 'X'
elif event.key == K_y and 'Y' in self.restricted: self.value += 'Y'
elif event.key == K_z and 'Z' in self.restricted: self.value += 'Z'
elif event.key == K_0 and ')' in self.restricted: self.value += ')'
elif event.key == K_1 and '!' in self.restricted: self.value += '!'
elif event.key == K_2 and '@' in self.restricted: self.value += '@'
elif event.key == K_3 and '#' in self.restricted: self.value += '#'
elif event.key == K_4 and '$' in self.restricted: self.value += '$'
elif event.key == K_5 and '%' in self.restricted: self.value += '%'
elif event.key == K_6 and '^' in self.restricted: self.value += '^'
elif event.key == K_7 and '&' in self.restricted: self.value += '&'
elif event.key == K_8 and '*' in self.restricted: self.value += '*'
elif event.key == K_9 and '(' in self.restricted: self.value += '('
elif event.key == K_BACKQUOTE and '~' in self.restricted: self.value += '~'
elif event.key == K_MINUS and '_' in self.restricted: self.value += '_'
elif event.key == K_EQUALS and '+' in self.restricted: self.value += '+'
elif event.key == K_LEFTBRACKET and '{' in self.restricted: self.value += '{'
elif event.key == K_RIGHTBRACKET and '}' in self.restricted: self.value += '}'
elif event.key == K_BACKSLASH and '|' in self.restricted: self.value += '|'
elif event.key == K_SEMICOLON and ':' in self.restricted: self.value += ':'
elif event.key == K_QUOTE and '"' in self.restricted: self.value += '"'
elif event.key == K_COMMA and '<' in self.restricted: self.value += '<'
elif event.key == K_PERIOD and '>' in self.restricted: self.value += '>'
elif event.key == K_SLASH and '?' in self.restricted: self.value += '?'
if len(self.value) > self.maxlength and self.maxlength >= 0: self.value = self.value[:-1]
| antismap/MICshooter | sources/lib/eztext.py | Python | mit | 11,212 |
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name = "mobileclick",
description = "mobileclick provides baseline methods and utility scripts for the NTCIR-12 MobileClick-2 task",
author = "Makoto P. Kato",
author_email = "[email protected]",
license = "MIT License",
url = "https://github.com/mpkato/mobileclick",
version='0.2.0',
packages=[
'mobileclick',
'mobileclick.nlp',
'mobileclick.methods',
'mobileclick.scripts'
],
install_requires = [
'BeautifulSoup',
'nltk>=3.1',
'numpy'],
entry_points = {
'console_scripts': [
'mobileclick_download_training_data=mobileclick.scripts.mobileclick_download_training_data:main',
'mobileclick_download_test_data=mobileclick.scripts.mobileclick_download_test_data:main',
'mobileclick_random_ranking_method=mobileclick.scripts.mobileclick_random_ranking_method:main',
'mobileclick_lang_model_ranking_method=mobileclick.scripts.mobileclick_lang_model_ranking_method:main',
'mobileclick_random_summarization_method=mobileclick.scripts.mobileclick_random_summarization_method:main',
'mobileclick_lang_model_summarization_method=mobileclick.scripts.mobileclick_lang_model_summarization_method:main',
'mobileclick_lang_model_two_layer_summarization_method=mobileclick.scripts.mobileclick_lang_model_two_layer_summarization_method:main',
],
},
tests_require=['nose']
)
| mpkato/mobileclick | setup.py | Python | mit | 1,565 |
'''
charlie.py
---class for controlling charlieplexed SparkFun 8x7 LED Array with the Raspberry Pi
Relies upon RPi.GPIO written by Ben Croston
The MIT License (MIT)
Copyright (c) 2016 Amanda Cole
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import RPi.GPIO as GPIO, time, random
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Charlie:
'''
Class for control of the charlieplexed SparkFun 8x7 LED Array.
'''
def __init__(self, pins):
'''
pins: type 'list', list of ints for array pins a-h, in order [a,b,c,d,e,f,g,h]
'''
if len(pins) != 8:
print("You must specify eight, and only eight, pins.")
raise ValueError
for pin in pins:
if type(pin) != int:
print("Pins must be of type int.")
raise TypeError
GPIO.setup(pin, GPIO.OUT, initial = False)
a = pins[0]
b = pins[1]
c = pins[2]
d = pins[3]
e = pins[4]
f = pins[5]
g = pins[6]
h = pins[7]
self.array = [[[h,g],[g,h],[f,h],[e,h],[d,h],[c,h],[b,h],[a,h]], \
[[h,f],[g,f],[f,g],[e,g],[d,g],[c,g],[b,g],[a,g]], \
[[h,e],[g,e],[f,e],[e,f],[d,f],[c,f],[b,f],[a,f]], \
[[h,d],[g,d],[f,d],[e,d],[d,e],[c,e],[b,e],[a,e]], \
[[h,c],[g,c],[f,c],[e,c],[d,c],[c,d],[b,d],[a,d]], \
[[h,b],[g,b],[f,b],[e,b],[d,b],[c,b],[b,c],[a,c]], \
[[h,a],[g,a],[f,a],[e,a],[d,a],[c,a],[b,a],[a,b]]]
self.ALL_PINS = [a,b,c,d,e,f,g,h]
def switchOrigin(self):
'''
Places origin [0,0] in the diagonally opposite corner of where its current position.
'''
switched_array = self.array
switched_array.reverse()
for i in switched_array:
i.reverse()
self.array = switched_array
def clearDisplay(self):
'''
Clears display.
'''
GPIO.setup(self.ALL_PINS, GPIO.IN)
def displayPoint(self, coord):
'''
coord: type 'list', coordinates of single pixel to be lit
Lights a single pixel.
'''
self.clearDisplay()
GPIO.setup(self.array[coord[0]][coord[1]][0], GPIO.OUT, initial = 1)
GPIO.setup(self.array[coord[0]][coord[1]][1], GPIO.OUT, initial = 0)
def test(self):
'''
Displays all pixels in array, one at a time, starting with [0,0] and ending with [6,7].
'''
x = 0
y = 0
while y < 8:
self.displayPoint([x,y])
time.sleep(0.1)
x += 1
if x >= 7:
x = 0
y += 1
self.clearDisplay()
def display(self, pixels, duration):
'''
pixels: type 'list', list of pixels to be lit each in coordinate form [x,y]
duration: type 'int', duration to display coordinates
Lights specified pixels in array
'''
positives = []
for coord in pixels:
if self.array[coord[0]][coord[1]][0] not in positives:
positives.append([self.array[coord[0]][coord[1]][0],[]])
for i in positives: #[[a,[]],[b,[]],[h,[]]]
for coord in pixels:
if self.array[coord[0]][coord[1]][0] == i[0]:
if self.array[coord[0]][coord[1]][1] not in i[1]:
i[1].append(self.array[coord[0]][coord[1]][1])
t = 0
pause = 0.02/len(positives)
while t < duration:
for i in range(0, len(positives)):
self.clearDisplay()
GPIO.setup(positives[i][0], GPIO.OUT, initial = True)
GPIO.setup(positives[i][1], GPIO.OUT, initial = False)
time.sleep(pause)
t += pause
self.clearDisplay()
def screensaver(self, duration, fill = .5):
'''
duration: type 'int', duration to keep screensaver on
fill: type 'float', proportion of array to fill with pixels at any given time
Randomly displays pixels on array.
'''
if fill > 1 or fill < 0:
print("fill must be of type 'float' between 0 and 1...using default value instead.")
fill = 0.5
t = 0
while t < duration:
coords = []
while len(coords) < fill*56:
coord = [random.randint(0,6), random.randint(0,7)]
if coord not in coords:
coords.append(coord)
self.display(coords, 0.15)
t += 0.1
| mandyRae/pythonic-charlieplex | charlie.py | Python | mit | 5,714 |
# vi: ts=8 sts=4 sw=4 et
#
# robot.py: web robot detection
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
import bisect
import logging
class RobotSignatures(object):
"""A repository of robot signatures.
The repository is used for detecting web robots by matching their
'User-Agent' HTTP header.
"""
def __init__(self):
"""Constructor."""
self.m_robots = []
self.m_files = []
self.m_change_context = None
@classmethod
def _create(cls, api):
"""Factory method."""
robots = cls()
robots._set_change_manager(api.changes)
section = api.config.ns('draco2')
datadir = section['datadirectory']
path = os.path.join(datadir, 'robots.ini')
robots.add_file(path)
docroot = section['documentroot']
path = os.path.join(docroot, 'robots.ini')
robots.add_file(path)
return robots
def _set_change_manager(self, changes):
"""Use change manager `changes'."""
self.m_change_context = changes.get_context('draco2.draco.robot')
self.m_change_context.add_callback(self._change_callback)
def _change_callback(self, api):
"""Change manager callback (when files in the ctx change)."""
self.m_robots = []
for fname in self.m_files:
self._parse_file(fname)
logger = logging.getLogger('draco2.draco.robot')
logger.debug('Reloaded robot signatures (change detected).')
def add_file(self, fname):
"""Load robot signatures from file `fname'."""
self.m_files.append(fname)
self._parse_file(fname)
if self.m_change_context:
self.m_change_context.add_file(fname)
def _parse_file(self, fname):
"""Parse a robot signatures file."""
try:
fin = file(fname)
except IOError:
return
for line in fin:
line = line.strip()
if not line or line.startswith('#'):
continue
self.m_robots.append(line.lower())
fin.close()
self.m_robots.sort()
def match(self, agent):
"""Match user agent string `agent' against the signatures.
The match operation done is a prefix match, i.e. we have a match
if `agent' matches a prefix of a registered signature.
"""
agent = agent.lower()
i = bisect.bisect_right(self.m_robots, agent)
return i > 0 and agent.startswith(self.m_robots[i-1])
| geertj/draco2 | draco2/draco/robot.py | Python | mit | 2,805 |
from django.conf.urls import patterns, url
from rai00base.raccordement import getModeleRaccordement, createRaccordement, deleteRaccordement, listRaccordement
urlpatterns = patterns('',
url('getModeleRaccordement/$', getModeleRaccordement),
url('createRaccordement/$', createRaccordement),
url('deleteRaccordement/$', deleteRaccordement),
url('listRaccordement/$', listRaccordement),
) | DarioGT/docker-carra | src/rai00base/urls.py | Python | mit | 401 |
value_None = object()
class FactoryException(Exception):
pass
class Factory:
class Item:
def __init__(self, factory, i):
self.factory = factory
self.i = i
@property
def value(self):
return self.factory.value(self.i)
@value.setter
def value(self, value):
self.i = self.factory.i(value)
def copy(self):
return self.factory.item(self.i)
def __eq__(self, other):
try:
return self.factory is other.factory and self.i == other.i
except AttributeError:
return self.value == other
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.factory) ^ hash(self.i)
def __int__(self):
return self.i
def __str__(self):
return self.factory.istr(self)
def __repr__(self):
return f'Item({self.factory.istr(self)})'
@staticmethod
def istr(item):
return str(item.value)
def i(self, value):
raise NotImplementedError
def item(self, i=None, value=value_None):
if not self.check_ivalue(i, value):
raise FactoryException('factory.item(): index and values do not match')
if i is None:
i = 0 if value is value_None else self.i(value)
return self.Item(self, i) # this might be annoying for union...
def check_ivalue(self, i, value):
return i is None or value is value_None or self.value(i) == value
def isitem(self, item):
try:
return item.factory is self and 0 <= item.i < self.nitems
except AttributeError:
return False
@property
def items(self):
return map(self.item, range(self.nitems))
def __iter__(self):
return self.items
def __len__(self):
return self.nitems
| bigblindbais/pytk | src/pytk/factory/factory.py | Python | mit | 1,952 |
import pandas as pd
from pandas import DataFrame
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
#notice what i did, since it is an object
df['H-L'] = df.High - df.Low
print df.head()
df['100MA'] = pd.rolling_mean(df['Close'], 100)
# must do a slice, since there will be no value for 100ma until 100 points
print df[200:210]
df['Difference'] = df['Close'].diff()
print df.head()
| PythonProgramming/Pandas-Basics-with-2.7 | pandas 5 - Column Operations (Basic mathematics, moving averages).py | Python | mit | 416 |
import numpy as np
from nlpaug.augmenter.spectrogram import SpectrogramAugmenter
from nlpaug.util import Action
import nlpaug.model.spectrogram as nms
class LoudnessAug(SpectrogramAugmenter):
"""
Augmenter that change loudness on mel spectrogram by random values.
:param tuple zone: Default value is (0.2, 0.8). Assign a zone for augmentation. By default, no any augmentation
will be applied in first 20% and last 20% of whole audio.
:param float coverage: Default value is 1 and value should be between 0 and 1. Portion of augmentation.
If `1` is assigned, augment operation will be applied to target audio segment. For example, the audio
duration is 60 seconds while zone and coverage are (0.2, 0.8) and 0.7 respectively. 42
seconds ((0.8-0.2)*0.7*60) audio will be augmented.
:param tuple factor: Default value is (0.5, 2). Volume change value will be picked within the range of this
tuple value. Volume will be reduced if value is between 0 and 1. Otherwise, volume will be increased.
:param str name: Name of this augmenter
"""
def __init__(self, name='Loudness_Aug', zone=(0.2, 0.8), coverage=1., factor=(0.5, 2), verbose=0,
silence=False, stateless=True):
super().__init__(action=Action.SUBSTITUTE, zone=zone, coverage=coverage, factor=factor,
verbose=verbose, name=name, silence=silence, stateless=stateless)
self.model = nms.Loudness()
def substitute(self, data):
# https://arxiv.org/pdf/2001.01401.pdf
loudness_level = self.get_random_factor()
time_start, time_end = self.get_augment_range_by_coverage(data)
if not self.stateless:
self.time_start, self.time_end, self.loudness_level = time_start, time_end, loudness_level
return self.model.manipulate(data, loudness_level=loudness_level, time_start=time_start, time_end=time_end)
| makcedward/nlpaug | nlpaug/augmenter/spectrogram/loudness.py | Python | mit | 1,919 |
"""
HLS and Color Threshold
-----------------------
You've now seen that various color thresholds can be applied to find the lane lines in images. Here we'll explore
this a bit further and look at a couple examples to see why a color space like HLS can be more robust.
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def run():
"""
Run different HLS and its thresholds.
"""
image = mpimg.imread('test6.jpg')
# Converting original to gray
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Threshold for original image
thresh = (180, 255)
binary = np.zeros_like(gray)
binary[(gray > thresh[0]) & (gray <= thresh[1])] = 1
red = image[:, :, 0]
green = image[:, :, 1]
blue = image[:, :, 2]
thresh_2 = (200, 255)
binary_2 = np.zeros_like(red)
binary_2[(red > thresh_2[0]) & (red <= thresh_2[1])] = 1
# Converting image to HLS
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# Splitting HSL
hue = hls[:, :, 0]
lightness = hls[:, :, 1]
saturation = hls[:, :, 2]
# Threshold for saturation
thresh_3 = (90, 255)
binary_3 = np.zeros_like(saturation)
binary_3[(saturation > thresh_3[0]) & (saturation <= thresh_3[1])] = 1
# Threshold for Hue
thresh_4 = (15, 100)
binary_4 = np.zeros_like(hue)
binary_4[(hue > thresh_4[0]) & (hue <= thresh_4[1])] = 1
# -------------------- Figure -----------------------
f = plt.figure()
size_x, size_y = (4, 4)
f.add_subplot(size_x, size_y, 1)
plt.imshow(image)
plt.title("Original")
f.add_subplot(size_x, size_y, 2)
plt.imshow(gray, cmap='gray')
plt.title("Gray")
f.add_subplot(size_x, size_y, 3)
plt.imshow(binary, cmap='gray')
plt.title("Threshold of ({}, {})".format(thresh[0], thresh[1]))
f.add_subplot(size_x, size_y, 4)
plt.imshow(red, cmap='gray')
plt.title("Red")
f.add_subplot(size_x, size_y, 5)
plt.imshow(green, cmap='gray')
plt.title("Green")
f.add_subplot(size_x, size_y, 6)
plt.imshow(blue, cmap='gray')
plt.title("Blue")
f.add_subplot(size_x, size_y, 7)
plt.imshow(binary_2, cmap='gray')
plt.title("Threshold of Red color")
f.add_subplot(size_x, size_y, 8)
plt.imshow(hue, cmap='gray')
plt.title("Hue")
f.add_subplot(size_x, size_y, 9)
plt.imshow(lightness, cmap='gray')
plt.title("Lightness")
f.add_subplot(size_x, size_y, 10)
plt.imshow(saturation, cmap='gray')
plt.title("Saturation")
f.add_subplot(size_x, size_y, 11)
plt.imshow(binary_3, cmap='gray')
plt.title("Threshold of saturation")
f.add_subplot(size_x, size_y, 12)
plt.imshow(binary_4, cmap='gray')
plt.title("Threshold of hue")
plt.show()
if __name__ == '__main__':
run()
| akshaybabloo/Car-ND | Term_1/advanced_lane_finding_10/color_space_10_8.py | Python | mit | 2,835 |
#!/usr/bin/python
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import argparse
from datetime import datetime, timedelta, tzinfo
from textwrap import dedent
import json
from random import choice
import webbrowser
import itertools
import logging
import threading
from threading import Thread
from os.path import expanduser, expandvars, dirname, exists, join
log = logging.getLogger()
logging.basicConfig()
import pendulum
import six
from six.moves import input
from tzlocal import get_localzone
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.protocol.states import EventType
from kazoo.handlers.threading import KazooTimeoutError
import colorama
from colorama import Fore, Back, Style
from solrzkutil.util import netcat, text_type, parse_zk_hosts, get_leader, get_server_by_id
from solrzkutil.parser import parse_admin_dump, parse_admin_cons
from solrzkutil.healthy import (check_zookeeper_connectivity,
check_ephemeral_sessions_fast,
check_ephemeral_znode_consistency,
check_ephemeral_dump_consistency,
check_watch_sessions_clients,
check_watch_sessions_duplicate,
check_queue_sizes,
check_watch_sessions_valid,
check_overseer_election,
get_solr_session_ids,
multi_admin_command)
__application__ = 'solr-zkutil'
COMMANDS = {
# key: cli-value
# do not change the keys, but you may freely change the values of the tuple, to modify
# the command or description.
'solr': ('live-nodes', 'List Solr Live Nodes from ZooKeeper'),
'clusterstate': ('clusterstate', 'List Solr Collections and Nodes'),
'watch': ('watch', 'Watch a ZooKeeper Node for Changes'),
'test': ('test', 'Test Each Zookeeper Ensemble node for replication and client connectivity'), # TODO
'status': ('stat', 'Check ZooKeeper ensemble status'),
'config': ('config', 'Show connection strings, or set environment configuration'),
'admin': ('admin', 'Execute a ZooKeeper administrative command'),
'ls': ('ls', 'List a ZooKeeper Node'),
'sessions': ('session-reset', 'Reset ZooKeeper sessions, each client will receive a SESSION EXPIRED notification, and will automatically reconnect. Solr ephemeral nodes should re-register themselves.'),
'health': ('health', 'Test/Check the health of Zookeeper and Solr, any errors or problems will be printed to the console.')
}
CONFIG_DIRNAME = __application__
HEADER_STYLE = Back.CYAN + Fore.WHITE + Style.BRIGHT
HEADER_JUST = 10
TITLE_STYLE = Fore.CYAN + Style.BRIGHT
INFO_STYLE = Fore.YELLOW + Style.BRIGHT
ERROR_STYLE = Back.WHITE + Fore.RED + Style.BRIGHT
INPUT_STYLE = Fore.WHITE + Style.BRIGHT
BLUE_STYLE = Fore.BLUE + Style.BRIGHT
DIFF_STYLE = Fore.MAGENTA + Style.BRIGHT
STATS_STYLE = Fore.MAGENTA + Style.BRIGHT
GREEN_STYLE = Fore.GREEN + Style.BRIGHT
ZK_LIVE_NODES = '/live_nodes'
ZK_CLUSTERSTATE = '/clusterstate.json'
MODE_LEADER = 'leader'
# the first event will always be triggered immediately to show the existing state of the node
# instead of saying 'watch event' tell the user we are just displaying initial state.
WATCH_COUNTER = 0
ZK_ADMIN_CMDS = {
'conf': {
'help': 'Print details about serving configuration.',
'example': '',
'version': '3.3.0',
},
'cons': {
'help': ('List full connection/session details for all clients connected to this server. '
'Includes information on numbers of packets received/sent, session id, operation '
'latencies, last operation performed, etc...'),
'example': '',
'version': '3.3.0',
},
'crst':{
'help': 'Reset connection/session statistics for all connections.',
'example': '',
'version': '3.3.0',
},
'dump':{
'help': 'Lists the outstanding sessions and ephemeral nodes. This only works on the leader.',
'example': '',
'version': '',
},
'envi':{
'help': 'Print details about serving environment',
'example': '',
'version': '',
},
'ruok':{
'help': 'Tests if server is running in a non-error state. The server will respond with imok if it is running. Otherwise it will not respond at all.',
'example': '',
'version': '',
},
'srst':{
'help': 'Reset server statistics.',
'example': '',
'version': '',
},
'srvr':{
'help': 'Lists full details for the server.',
'example': '',
'version': '3.3.0',
},
'stat':{
'help': 'Lists brief details for the server and connected clients.',
'example': '',
'version': '',
},
'wchs':{
'help': 'Lists brief information on watches for the server.',
'example': '',
'version': '3.3.0',
},
'wchc':{
'help': 'Lists detailed information on watches for the server, by session. (may be expensive)',
'example': '',
'version': '3.3.0',
},
'dirs':{
'help': 'Shows the total size of snapshot and log files in bytes',
'example': '',
'version': '3.5.1',
},
'wchp':{
'help': 'Lists detailed information on watches for the server, by path. This outputs a list of paths (znodes) with associated sessions.',
'example': '',
'version': '3.3.0',
},
'mntr': {
'help': 'Outputs a list of variables that could be used for monitoring the health of the cluster.',
'example': '3.4.0'
},
'isro':{
'help': 'Tests if server is running in read-only mode. The server will respond with "ro" if in read-only mode or "rw" if not in read-only mode.',
'example': '',
'version': '3.4.0',
},
'gtmk':{
'help': 'Gets the current trace mask as a 64-bit signed long value in decimal format. See stmk for an explanation of the possible values.',
'example': '',
'version': '',
},
'stmk':{
'help': 'Sets the current trace mask. The trace mask is 64 bits, where each bit enables or disables a specific category of trace logging on the server.',
'example': '',
'version': '',
},
}
ZNODE_DEBUG_ATTRS = [
'aversion',
'cversion',
'version',
'numChildren',
'ctime',
'mtime',
'czxid',
'mzxid',
'pzxid',
'dataLength',
'ephemeralOwner',
]
NEW_TAB = 2
def config_path():
conf = None
if os.name == 'nt':
conf = os.path.expandvars("%%appdata%%/.%s/environments.json" % CONFIG_DIRNAME)
else:
conf = os.path.expanduser("~/.%s/environments.json" % CONFIG_DIRNAME)
return conf
def config():
conf = config_path()
if not exists(conf):
if not exists(dirname(conf)):
os.makedirs(dirname(conf))
open(conf, mode='w').write(dedent('''
{
"DEV": "localhost:2181",
"QA": "localhost:2181",
"PILOT": "localhost:2181",
"PROD": "localhost:2181"
}
'''))
return json.loads(open(conf, mode='r').read().strip())
def style_header(text, width = 0):
if not text:
return ''
width = max(len(text) + HEADER_JUST * 2, width)
pad = ' ' * width
output = '\n%s%s\n%s\n%s%s\n' % (HEADER_STYLE, pad, text.center(width), pad, Style.RESET_ALL)
return output
def style_text(text, styles, ljust=0, rjust=0, cen=0, lpad=0, rpad=0, pad=0, char=' ', restore=''):
if not text:
return ''
# Ensure we have unicode in both python 2/3
text = text_type(text)
styles = text_type(styles)
char = text_type(char)
restore = text_type(restore)
reset_all = text_type(Style.RESET_ALL)
style = ''.join(styles)
text = text.ljust(ljust, char)
text = text.rjust(rjust, char)
text = text.center(cen, char)
text = char*(lpad+pad) + text + char*(rpad+pad)
return '%s%s%s%s' % (style, text, reset_all, restore)
#return style + text + Style.RESET_ALL + restore
def style_multiline(text, styles, ljust=0, rjust=0, cen=0, lpad=0, rpad=0, pad=0, char=' '):
if not text:
return ''
lines = text.split('\n')
fmt_text = ''
for text in lines:
text = style_text(text, styles, ljust, rjust, cen, lpad, rpad, pad, char)
fmt_text += text + '\n'
return fmt_text
def update_config(configuration=None, add=None):
"""
Update the environments configuration on-disk.
"""
existing_config = config()
conf = config_path()
print(style_header('Zookeeper Environments'))
print("")
print(style_text('config:', TITLE_STYLE, pad=2), end='')
print(style_text(conf, INPUT_STYLE))
print(style_multiline(json.dumps(existing_config, indent=4, sort_keys=True), INFO_STYLE, lpad=4))
if not configuration and not add:
return
new_config = existing_config
if configuration:
new_config = configuration
if add:
new_config.update(add)
new_config = json.dumps(new_config, indent=4, sort_keys=True)
print("")
print(style_text('new config:', TITLE_STYLE, pad=2))
print(style_multiline(new_config, INFO_STYLE, lpad=4))
print("")
# Get permission to replace the existing configuration.
if input(style_text("Replace configuration? (y/n): ", INPUT_STYLE)).lower() not in ('y', 'yes'):
print(" ...Cancel")
return
open(conf, mode='w').write(new_config)
print(style_text(' ...Saved', INPUT_STYLE, pad=2))
def clusterstate(zookeepers, all_hosts, node='clusterstate.json'):
"""
Print clusterstatus.json contents
"""
zk_hosts = parse_zk_hosts(zookeepers, all_hosts=all_hosts)
print('')
# we'll keep track of differences for this node between zookeepers.
# because zookeeper keeps all nodes in-sync, there shouldn't be differences between the
# nodes... but there might be if you are having replication problems.
first_state = None
for host in zk_hosts:
# connect to zookeeper
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
# If the node doesn't exist... just let the user know.
if not zk.exists(node):
node_str = style_text(node, BLUE_STYLE, restore=ERROR_STYLE)
zk_str = style_text(host, BLUE_STYLE, restore=ERROR_STYLE)
print(style_text('No node [%s] on %s' % (node_str, zk_str), ERROR_STYLE))
continue
print(style_header('Response From: %s [%s]' % (host, node)))
state = bytes.decode(zk.get(node)[0])
if not first_state:
first_state = state
lines_1 = first_state.split('\n')
lines_2 = state.split('\n')
# Print the content of the file, highlighting lines that do not match between hosts.
for idx, line in enumerate(lines_2):
if len(lines_1)-1 < idx or line != lines_1[idx]:
style = DIFF_STYLE
else:
style = INFO_STYLE
print(style_text(line, style, lpad=4))
zk.stop()
def show_node(zookeepers, node, all_hosts=False, leader=False, debug=False, interactive=False):
"""
Show a zookeeper node on one or more servers.
If the node has children, the children are displayed,
If the node doesn't have children, the contents of the node are displayed.
If leader is specified, only the leader is queried for the node
If all_hosts is specified, each zk host provided is queried individually... if the results
are different between nodes, the child nodes that are different will be highlighted.
returns children of the requested node.
"""
zk_hosts = parse_zk_hosts(zookeepers, all_hosts=all_hosts, leader=leader)
# we'll keep track of differences for this node between zookeepers.
# because zookeeper keeps all nodes in-sync, there shouldn't be differences between the
# nodes... but there might be if you are having replication problems.
all_children = set()
for host in zk_hosts:
# connect to zookeeper
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
print('')
# If the node doesn't exist... just let the user know.
if not zk.exists(node):
node_str = style_text(node, BLUE_STYLE, restore=ERROR_STYLE)
zk_str = style_text(host, BLUE_STYLE, restore=ERROR_STYLE)
print(style_text('No node [%s] on %s' % (node_str, zk_str), ERROR_STYLE, pad=2))
continue
if len(zk_hosts) == 1:
print(style_header('Response From: %s [%s]' % (host, node)))
else:
print(style_text('Response From: %s [%s]' % (host, node), HEADER_STYLE, pad=2))
# Query ZooKeeper for the node.
content, zstats = zk.get(node)
# print(dir(zstats))
# print(getattr(zstats, 'czxid'))
# --- Print Node Stats -------------------------
znode_unix_time = zstats.mtime / 1000
#
# local_timezone = time.tzname[time.localtime().tm_isdst] DO NOT USE THIS
is_dst = time.daylight and time.localtime().tm_isdst
offset_hour = time.altzone / 3600 if is_dst else time.timezone / 3600
timezone = 'Etc/GMT%+d' % offset_hour
mod_time = pendulum.fromtimestamp(znode_unix_time, timezone)
mod_time = mod_time.in_timezone(timezone)
local_time_str = mod_time.to_day_datetime_string()
version = str(zstats.version) or str(zstats.cversion)
if debug:
dbg_rjust = max(map(len, ZNODE_DEBUG_ATTRS))
print(style_text("Node Stats:", TITLE_STYLE, lpad=2))
for attr_name in ZNODE_DEBUG_ATTRS:
attr_val = getattr(zstats, attr_name)
if 'time' in attr_name and attr_val > 1:
attr_val = pendulum.fromtimestamp(int(attr_val) / 1000, timezone).in_timezone(timezone).to_day_datetime_string()
print(style_text(attr_name, STATS_STYLE, lpad=4, rjust=dbg_rjust), style_text(attr_val, INPUT_STYLE))
else:
print(style_text('Modified:', STATS_STYLE, lpad=2, rjust=9), style_text(local_time_str, INPUT_STYLE))
print(style_text('Version:', STATS_STYLE, lpad=2, rjust=9), style_text(version, INPUT_STYLE))
print('')
# --- Print Child Nodes, or Node Content -------
if not zstats.numChildren:
zcontent = bytes.decode(content or b'')
if zcontent:
print(style_text("Contents:", TITLE_STYLE, lpad=2))
print(style_multiline(zcontent, INFO_STYLE, lpad=4))
else:
print(style_text("... No child nodes", INFO_STYLE, lpad=2))
else:
children = zk.get_children(node)
children.sort()
cwidth = max([len(c) for c in children])
print(style_text("Child Nodes:", TITLE_STYLE, lpad=2))
for ch in children:
child_path = node+ch if node.endswith('/') else node+'/'+ch
_, czstats = zk.get(child_path)
if all_children and ch not in all_children:
# if this child is unique / different to this zk host, color it differently.
print(style_text(ch, INPUT_STYLE, lpad=4, ljust=cwidth), end='')
else:
print(style_text(ch, INFO_STYLE, lpad=4, ljust=cwidth), end='')
mod_ver = czstats.version or czstats.cversion
print(style_text('v:', STATS_STYLE, lpad=3), style_text(str(mod_ver), INPUT_STYLE, ljust=3), end='')
print(style_text('eph:', STATS_STYLE, lpad=3), style_text('yes' if czstats.ephemeralOwner else 'no', INPUT_STYLE), end='')
mod_datetime = datetime.utcfromtimestamp(czstats.mtime / 1000)
mod_elapsed = datetime.utcnow() - mod_datetime
if mod_elapsed >= timedelta(hours=48):
mod_style = ''
elif mod_elapsed >= timedelta(hours=2):
mod_style = INPUT_STYLE
elif mod_elapsed >= timedelta(minutes=10):
mod_style = GREEN_STYLE
elif mod_elapsed >= timedelta(minutes=1):
mod_style = INFO_STYLE
else:
mod_style = STATS_STYLE
if mod_datetime.year != 1970:
mod_desc = pendulum.fromtimestamp(czstats.mtime / 1000, 'UTC').diff_for_humans()
else:
mod_desc = 'none'
print(style_text('mod:', STATS_STYLE, lpad=3), style_text(mod_desc, mod_style))
zk.stop()
all_children = all_children | set(children)
return list(all_children)
def watch(zookeepers, node, leader=False):
"""
Watch a particular zookeeper node for changes.
"""
zk_hosts = parse_zk_hosts(zookeepers, leader=leader)[0]
def my_listener(state):
if state == KazooState.LOST:
# Register somewhere that the session was lost
print(style_text('Connection Lost', ERROR_STYLE, pad=2))
elif state == KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print(style_text('Connection Suspended', ERROR_STYLE, pad=2))
else:
# Handle being connected/reconnected to Zookeeper
# what are we supposed to do here?
print(style_text('Connected/Reconnected', INFO_STYLE, pad=2))
zk = KazooClient(hosts=zk_hosts, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
zk_ver = '.'.join(map(str, zk.server_version()))
zk_host = zk.hosts[zk.last_zxid]
zk_host = ':'.join(map(str, zk_host))
zk.add_listener(my_listener)
# check if the node exists ...
if not zk.exists(node):
node_str = style_text(node, BLUE_STYLE, restore=ERROR_STYLE)
zk_str = style_text(zk_host, BLUE_STYLE, restore=ERROR_STYLE)
print('')
print(style_text('No node [%s] on %s' % (node_str, zk_str), ERROR_STYLE, pad=2))
return
print(style_header('Watching [%s] on %s v%s' % (node, zk_host, zk_ver)))
# put a watch on my znode
children = zk.get_children(node)
# If there are children, watch them.
if children or node.endswith('/'):
@zk.ChildrenWatch(node)
def watch_children(children):
global WATCH_COUNTER
WATCH_COUNTER += 1
if WATCH_COUNTER <= 1:
child_watch_str = 'Child Nodes:'
else:
child_watch_str = 'Node Watch Event: '
children.sort()
print('')
print(style_text(child_watch_str, TITLE_STYLE))
for ch in children:
print(style_text(ch, INFO_STYLE, lpad=2))
print('')
else:
# otherwise watch the node itself.
@zk.DataWatch(node)
def watch_data(data, stat, event):
global WATCH_COUNTER
WATCH_COUNTER += 1
data = data.decode('utf-8')
if WATCH_COUNTER <= 1:
data_watch_str = 'Content: (%s)'
else:
data_watch_str = 'Data Watch Event: (v%s)'
print('')
print(style_text(data_watch_str % stat.version, TITLE_STYLE))
print(style_multiline(data, INFO_STYLE, lpad=2))
print('')
CHAR_WIDTH = 60
counter = 0
while True:
# draw a .... animation while we wait, so the user knows its working.
counter += 1
if not counter % CHAR_WIDTH:
print('\r', ' '*CHAR_WIDTH, '\r', end='')
print(style_text('.', INFO_STYLE), end='')
time.sleep(0.05)
zk.stop()
def admin_command(zookeepers, command, all_hosts=False, leader=False):
"""
Execute an administrative command
"""
command = text_type(command) # ensure we have unicode py2/py3
zk_hosts = parse_zk_hosts(zookeepers, all_hosts=all_hosts, leader=leader)
for host in zk_hosts:
print('')
# use netcat, so we don't modify any transaction values by executing an admin command.
strcmd = command.encode('utf-8')
hostaddr, port = host.split(':')
status = netcat(hostaddr, port, strcmd)
if len(zk_hosts) == 1:
print(style_header('ZK Command [%s] on %s' % (command, host)))
else:
print(style_text('ZK Command [%s] on %s' % (command, host), HEADER_STYLE, pad=2))
print(style_multiline(status, INFO_STYLE, lpad=2))
def sessions_reset(zookeepers, server_id=None, ephemeral=False, solr=False):
"""
Reset connections/sessions to Zookeeper.
"""
# TODO support --clients / --solrj option ?
if server_id:
zk_host = parse_zk_hosts(zookeepers, server_id=server_id)[0]
else:
zk_host = parse_zk_hosts(zookeepers, leader=True)[0]
def get_all_sessions(zk_client):
# Get connection/session information
conn_results = multi_admin_command(zk_client, b'cons')
conn_data = map(parse_admin_cons, conn_results)
conn_data = list(itertools.chain.from_iterable(conn_data))
# Get a dict of all valid zookeeper sessions as integers
return {con['sid']: con['client'][0] for con in conn_data if con.get('sid')}
search = []
if ephemeral:
search += ["ephemeral sessions"]
else:
search += ["sessions"]
if solr:
if search:
search += ['that are']
search += ['solr servers']
if server_id:
search += ['on serverId: %d (%s)' % (server_id, zk_host)]
else:
search += ['on all ensemble members']
print(style_header('RESETTING %s' % ' '.join(search)))
hostaddr, port = zk_host.split(':')
dump = netcat(hostaddr, port, b'dump')
dump_data = parse_admin_dump(dump)
all_sessions = get_all_sessions(KazooClient(zookeepers))
sessions_before = set(all_sessions.keys())
sessions = []
if server_id is not None:
sessions = sorted(all_sessions.keys())
else:
sessions = sorted(all_sessions.keys())
if ephemeral:
sessions = [s for s in sessions if s in dump_data['ephemerals']]
if solr:
sessions = healthy.get_solr_session_ids(KazooClient(zookeepers))
if not sessions:
print(style_text("No sessions matching criteria", STATS_STYLE, lpad=2))
return
##################################
### THREADING IMPLEMENTATION #####
SESSION_TIMEOUT = 30
print(style_text("Sessions will now be reset. This will take %d secs\n" % SESSION_TIMEOUT, TITLE_STYLE, lpad=2))
tlock = threading.Lock()
def break_session(zookeepers, session):
with tlock:
s_style = style_text("%s" % str(session_id), STATS_STYLE)
print(style_text("Resetting session: %s(%s)" % (s_style, all_sessions[session_id]), INFO_STYLE, lpad=2))
zk = None
try:
zk = KazooClient(hosts=zk_host, client_id=(session_id, b''), max_retries=3, retry_delay=0.5)
zk.start(SESSION_TIMEOUT)
zk.get('/live_nodes')
time.sleep(SESSION_TIMEOUT)
except KazooTimeoutError as e:
with tlock:
print('ZK Timeout host: [%s], %s' % (zk_host, e))
except Exception as e:
with tlock:
print(style_text("Error Resetting session: %s" % e, ERROR_STYLE, lpad=2))
finally:
if zk:
zk.stop()
with tlock:
print(style_text("Disconnect session: %s" % s_style, INFO_STYLE, lpad=2))
wait = []
for session_id in sessions:
t = Thread(target=break_session, args=(zookeepers, session_id))
t.start()
wait.append(t)
# wait for all threads to finish
for wait_thread in wait:
wait_thread.join()
#########################################
# wait some time for sessions to come back.
time.sleep(5)
sessions_after = get_all_sessions(KazooClient(zookeepers))
sessions_kept = sessions_before & set(sessions_after.keys())
print(style_text("\nSESSIONS BEFORE RESET (%d)" % len(sessions_before), TITLE_STYLE, lpad=2))
for sid in sorted(sessions_before):
if sid in sessions_kept:
print(style_text(str(sid), INFO_STYLE, lpad=4))
else:
print(style_text(str(sid), STATS_STYLE, lpad=4))
print(style_text("\nSESSIONS AFTER RESET (%d)" % len(sessions_after), TITLE_STYLE, lpad=2))
for sid in sorted(sessions_after):
if sid in sessions_kept:
print(style_text(str(sid), INFO_STYLE, lpad=4))
else:
print(style_text(str(sid)+'(new)', STATS_STYLE, lpad=4))
def health_check(zookeepers):
zk_client = KazooClient(zookeepers)
for check in (check_zookeeper_connectivity,
check_ephemeral_sessions_fast,
check_ephemeral_znode_consistency,
check_ephemeral_dump_consistency,
check_watch_sessions_clients,
check_watch_sessions_duplicate,
check_queue_sizes,
check_watch_sessions_valid,
check_overseer_election):
print(style_text("RUNNING: %s" % check.__name__, TITLE_STYLE, lpad=2))
try:
errors = check(zk_client)
except Exception as e:
print(healthy.get_exception_traceback())
print(style_text("ERROR RUNNING %s" % check.__name__, ERROR_STYLE, lpad=4))
if not errors:
print(style_text("NO ERRORS in %s" % check.__name__, STATS_STYLE, lpad=4))
else:
print(style_text("ERRORS from %s" % check.__name__, INFO_STYLE, lpad=4))
for err in errors:
print(style_text(err, ERROR_STYLE, lpad=8))
def cli():
"""
Build the CLI menu
"""
def verify_json(arg):
try:
data = json.loads(arg)
except ValueError as e:
raise argparse.ArgumentTypeError("invalid json: %s" % e)
return data
def verify_env(arg):
try:
env_config = config()
except ValueError as e:
raise argparse.ArgumentTypeError('Cannot read configuration %s' % e)
if arg not in env_config:
raise argparse.ArgumentTypeError('Invalid Environment %s ... Valid: [%s]' % (arg, ', '.join(list(env_config))))
return env_config[arg]
def verify_zk(arg):
hosts = arg.split('/')[0]
hosts = hosts.split(',')
if ' ' in arg:
raise argparse.ArgumentTypeError("There should be no spaces between zookeeper hosts: %s" % arg)
for zk in hosts:
hostport = zk.split(':')
if len(hostport) == 1:
raise argparse.ArgumentTypeError("Port is required for: %s... default: 2181" % zk)
else:
_, port = hostport
if not port.isdigit():
raise argparse.ArgumentTypeError("Port must be numeric for: %s" % zk)
return arg
def verify_add(arg):
if '=' not in arg:
raise argparse.ArgumentTypeError("You must use the syntax ENVIRONMENT=127.0.0.1:2181")
env, zk = arg.split('=')
verify_zk(zk)
return {env.strip(): zk.strip()}
def verify_node(arg):
if not arg.startswith('/'):
raise argparse.ArgumentTypeError("Zookeeper nodes start with /")
return arg
def verify_cmd(arg):
if arg.lower() not in ZK_ADMIN_CMDS:
raise argparse.ArgumentTypeError("Invalid command '%s'... \nValid Commands: %s" % (arg, '\n '.join(ZK_ADMIN_CMDS)))
return arg.lower()
# Top level parser
parser = argparse.ArgumentParser(prog=__application__)
subparsers = parser.add_subparsers(help='--- available sub-commands ---', dest='command')
try:
env_config = config()
except ValueError:
env_config = {}
zk_argument = {
'args': ['-z', '--zookeepers'],
'kwargs': {
'required': False,
'default': None,
'type': verify_zk,
'help': ('Zookeeper connection string, with optional root... \n'
'eg. 127.0.0.1:2181 or 10.10.1.5:2181/root \n'
'NOTE: --zookeepers or --env must be specified!')
}
}
env_argument = {
'args': ['-e', '--env'],
'kwargs': {
'required': False,
'default': None,
'type': verify_env,
'help': ('Connect to zookeeper using one of the configured environments. \n'
'Note: to view or modify config use the "%s" sub-command. \n'
'Configured Environments: [%s]' % (COMMANDS['config'][0], ', '.join(list(env_config))))
}
}
all_argument = {
'args': ['-a', '--all-hosts'],
'kwargs': {
'default': False,
'required': False,
'action': 'store_true',
'help': 'Show response from all zookeeper hosts'
}
}
leader_argument = {
'args': ['-l', '--leader'],
'kwargs': {
'default': False,
'required': False,
'action': 'store_true',
'help': 'Query ensemble leader only'
}
}
debug_argument = {
'args': ['--debug'],
'kwargs': {
'default': False,
'required': False,
'action': 'store_true',
'help': 'Show debug stats'
}
}
# -- SOLR - LIVE NODES -----------
cmd, about = COMMANDS['solr']
solr = subparsers.add_parser(cmd, help=about)
solr.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
solr.add_argument(*env_argument['args'], **env_argument['kwargs'])
solr.add_argument(*all_argument['args'], **all_argument['kwargs'])
solr.add_argument('-b', '--browser', default=False, required=False,
action='store_true', help='Open solr-admin in web-browser for resolved host')
solr.add_argument(*leader_argument['args'], **leader_argument['kwargs'])
# -- SOLR - CLUSTERSTATE -------
cmd, about = COMMANDS['clusterstate']
cluster = subparsers.add_parser(cmd, help=about)
cluster.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
cluster.add_argument(*env_argument['args'], **env_argument['kwargs'])
cluster.add_argument(*all_argument['args'], **all_argument['kwargs'])
# -- WATCH ----------------------
cmd, about = COMMANDS['watch']
watches = subparsers.add_parser(cmd, help=about)
watches.add_argument('node', help='Zookeeper node', type=verify_node)
watches.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
watches.add_argument(*env_argument['args'], **env_argument['kwargs'])
watches.add_argument(*leader_argument['args'], **leader_argument['kwargs'])
# -- LS -------------------------
cmd, about = COMMANDS['ls']
ls = subparsers.add_parser(cmd, help=about)
ls.add_argument('node', help='Zookeeper node', type=verify_node) # positional argument
ls.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
ls.add_argument(*env_argument['args'], **env_argument['kwargs'])
ls.add_argument(*all_argument['args'], **all_argument['kwargs'])
ls.add_argument(*leader_argument['args'], **leader_argument['kwargs'])
ls.add_argument(*debug_argument['args'], **debug_argument['kwargs'])
# -- STATUS ---------------------
cmd, about = COMMANDS['status']
status = subparsers.add_parser(cmd, help=about)
status.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
status.add_argument(*env_argument['args'], **env_argument['kwargs'])
status.add_argument(*leader_argument['args'], **leader_argument['kwargs'])
# -- ADMIN ---------------------
cmd, about = COMMANDS['admin']
admin = subparsers.add_parser(cmd, help=about)
admin.add_argument('cmd', help='ZooKeeper Administrative Command', type=verify_cmd)
admin.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
admin.add_argument(*env_argument['args'], **env_argument['kwargs'])
admin.add_argument(*all_argument['args'], **all_argument['kwargs'])
admin.add_argument(*leader_argument['args'], **leader_argument['kwargs'])
# -- CONFIG ---------------------
cmd, about = COMMANDS['config']
envs = subparsers.add_parser(cmd, help=about)
envs.add_argument('-c', '--configuration', default=None, required=False, type=verify_json,
help='Set the environments configuration located at %s, string passed must be valid json ' % config_path())
envs.add_argument('-a', '--add', default=None, required=False, type=verify_add,
help=('add/update an environment variable using the syntax KEY=VALUE,\n'
'eg. DEV=zk01.dev.com:2181,zk02.dev.com:2181'))
# -- SESSIONS RESET -------------
cmd, about = COMMANDS['sessions']
session = subparsers.add_parser(cmd, help=about)
session.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
session.add_argument(*env_argument['args'], **env_argument['kwargs'])
session.add_argument('-id', '--server-id', type=int, default=None, required=False,
help='reset connections only for the matched server id, if not specified ALL sessions are reset')
session.add_argument('--ephemeral', action='store_true', required=False,
help='reset sessions with ephemeral nodes only')
session.add_argument('--solr', action='store_true', required=False,
help='reset sessions from solr nodes only')
# -- HEALTH -------------------
cmd, about = COMMANDS['health']
session = subparsers.add_parser(cmd, help=about)
session.add_argument(*zk_argument['args'], **zk_argument['kwargs'])
session.add_argument(*env_argument['args'], **env_argument['kwargs'])
return parser
def main(argv=None):
colorama.init(autoreset=True) # initialize color handling for windows terminals.
parser = cli()
args = vars(parser.parse_args(argv[1:]))
cmd = args['command']
if (('zookeepers' in args and 'env' in args)
and not any((args['env'], args['zookeepers']))):
print(" 'zookeepers', or 'env' argument is required", end='')
print(' ', style_text("Add --help to your command for help", ERROR_STYLE, pad=1))
return
if args.get('env') and not args.get('zookeepers'):
# when env is specified and valid, but zookeepers is not
# env should have been resolved to a zookeeper host string.
args['zookeepers'] = args['env']
# -- COMMAND HANDLERS --------------------------------------------------------------------------
if cmd == COMMANDS['solr'][0]:
hosts = show_node(zookeepers=args['zookeepers'], node=ZK_LIVE_NODES, all_hosts=args['all_hosts'], leader=args['leader'])
if args.get('browser') and hosts:
solr_admin = choice(hosts).replace('_solr', '/solr')
# C:\Users\Scott\AppData\Local\Google\Chrome\Application\chrome.exe
# webbrowser._tryorder
webbrowser.get().open('http://'+solr_admin, new=NEW_TAB, autoraise=True)
elif cmd == COMMANDS['clusterstate'][0]:
clusterstate(zookeepers=args['zookeepers'], all_hosts=args['all_hosts'])
elif cmd == COMMANDS['ls'][0]:
show_node(zookeepers=args['zookeepers'], node=args['node'], all_hosts=args['all_hosts'], leader=args['leader'], debug=args['debug'])
elif cmd == COMMANDS['watch'][0]:
watch(zookeepers=args['zookeepers'], node=args['node'], leader=args['leader'])
elif cmd == COMMANDS['config'][0]:
update_config(configuration=args['configuration'], add=args['add'])
elif cmd == COMMANDS['status'][0]:
# TODO improve this command so it is a combination of mntr, stat, cons, and ruok
admin_command(zookeepers=args['zookeepers'], command='stat', leader=args['leader'])
elif cmd == COMMANDS['admin'][0]:
admin_command(zookeepers=args['zookeepers'], command=args['cmd'], all_hosts=args['all_hosts'], leader=args['leader'])
elif cmd == COMMANDS['sessions'][0]:
sessions_reset(zookeepers=args['zookeepers'], server_id=args['server_id'], ephemeral=args['ephemeral'], solr=args['solr'])
elif cmd == COMMANDS['health'][0]:
health_check(zookeepers=args['zookeepers'])
else:
parser.print_help()
print("")
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
sys.exit('\n') | bendemott/solr-zkutil | solrzkutil/__init__.py | Python | mit | 38,530 |
import cv2
import numpy as np
import sys
def reshapeImg(img, l, w, p):
# reshape image to (l, w) and add/remove pixels as needed
while len(img) % p != 0:
img = np.append(img, 255)
olds = img.size / p
news = l*w
if news < olds:
img = img[:p*news]
elif news > olds:
img = np.concatenate( (img, np.zeros(p*(news-olds))) )
return img.reshape(l, w, p)
if __name__ == "__main__":
# Usage: python create-image2.py <height> <width> <array>
# for random image do <array> = random
# array format string = xyz...
l = int(sys.argv[1])
w = int(sys.argv[2])
s = sys.argv[3]
# create random image if no array given
if s == "random":
arr = np.random.randint(256, size=l*w*3)
else:
arr = np.asarray([int(s[i:i+3]) for i in range(0, len(s)-3, 3)])
# write image to new file
cv2.imwrite("img.png", reshapeImg(arr, l, w, 3))
| andykais/painting-base2 | examples/create-image2.py | Python | mit | 839 |
import numpy
import pandas
import statsmodels.api as sm
'''
In this exercise, we will perform some rudimentary practices similar to those of
an actual data scientist.
Part of a data scientist's job is to use her or his intuition and insight to
write algorithms and heuristics. A data scientist also creates mathematical models
to make predictions based on some attributes from the data that they are examining.
We would like for you to take your knowledge and intuition about the Titanic
and its passengers' attributes to predict whether or not the passengers survived
or perished. You can read more about the Titanic and specifics about this dataset at:
http://en.wikipedia.org/wiki/RMS_Titanic
http://www.kaggle.com/c/titanic-gettingStarted
In this exercise and the following ones, you are given a list of Titantic passengers
and their associated information. More information about the data can be seen at the
link below:
http://www.kaggle.com/c/titanic-gettingStarted/data.
For this exercise, you need to write a simple heuristic that will use
the passengers' gender to predict if that person survived the Titanic disaster.
You prediction should be 78% accurate or higher.
Here's a simple heuristic to start off:
1) If the passenger is female, your heuristic should assume that the
passenger survived.
2) If the passenger is male, you heuristic should
assume that the passenger did not survive.
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the passenger's id (which can be accessed
via passenger["PassengerId"]) and the associated value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger is predicted to have survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
And if a passenger is predicted to have perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the Titantic data that you will be working with
at the link below:
https://www.dropbox.com/s/r5f9aos8p9ri9sa/titanic_data.csv
'''
def simple_heuristic(file_path):
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
if passenger['Sex'] == 'female':
predictions[passenger_id] = 1
else:
predictions[passenger_id] = 0
#print predictions
return predictions
| rmhyman/DataScience | Lesson1/titanic_data_heuristic1.py | Python | mit | 2,937 |
# -*- coding: utf-8 -*-
"""
Translator module that uses the Google Translate API.
Adapted from Terry Yin's google-translate-python.
Language detection added by Steven Loria.
"""
from __future__ import absolute_import
import json
import re
import codecs
from textblob.compat import PY2, request, urlencode
from textblob.exceptions import TranslatorError
class Translator(object):
"""A language translator and detector.
Usage:
::
>>> from textblob.translate import Translator
>>> t = Translator()
>>> t.translate('hello', from_lang='en', to_lang='fr')
u'bonjour'
>>> t.detect("hola")
u'es'
"""
url = "http://translate.google.com/translate_a/t"
headers = {'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) '
'AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19')}
def translate(self, source, from_lang=None, to_lang='en', host=None, type_=None):
"""Translate the source text from one language to another."""
if PY2:
source = source.encode('utf-8')
data = {"client": "p", "ie": "UTF-8", "oe": "UTF-8",
"sl": from_lang, "tl": to_lang, "text": source}
json5 = self._get_json5(self.url, host=host, type_=type_, data=data)
return self._get_translation_from_json5(json5)
def detect(self, source, host=None, type_=None):
"""Detect the source text's language."""
if PY2:
source = source.encode('utf-8')
if len(source) < 3:
raise TranslatorError('Must provide a string with at least 3 characters.')
data = {"client": "p", "ie": "UTF-8", "oe": "UTF-8", "text": source}
json5 = self._get_json5(self.url, host=host, type_=type_, data=data)
lang = self._get_language_from_json5(json5)
return lang
def _get_language_from_json5(self, content):
json_data = json.loads(content)
if 'src' in json_data:
return json_data['src']
return None
def _get_translation_from_json5(self, content):
result = u""
json_data = json.loads(content)
if 'sentences' in json_data:
result = ''.join([s['trans'] for s in json_data['sentences']])
return _unescape(result)
def _get_json5(self, url, host=None, type_=None, data=None):
encoded_data = urlencode(data).encode('utf-8')
req = request.Request(url=url, headers=self.headers, data=encoded_data)
if host or type_:
req.set_proxy(host=host, type=type_)
resp = request.urlopen(req)
content = resp.read()
return content.decode('utf-8')
def _unescape(text):
"""Unescape unicode character codes within a string.
"""
pattern = r'\\{1,2}u[0-9a-fA-F]{4}'
decode = lambda x: codecs.getdecoder('unicode_escape')(x.group())[0]
return re.sub(pattern, decode, text)
| nvoron23/TextBlob | textblob/translate.py | Python | mit | 2,924 |
'''
These classes specify the attributes
that a view object can have when editing views
'''
__author__ = 'William Emfinger'
__copyright__ = 'Copyright 2016, ROSMOD'
__credits__ = ['William Emfinger', 'Pranav Srinivas Kumar']
__license__ = 'GPL'
__version__ = '0.4'
__maintainer__ = 'William Emfinger'
__email__ = '[email protected]'
__status__ = 'Production'
from meta import Attribute
objects = ['Container', 'Association']
# model related
class Object(Attribute):
tooltip = 'What kind of object is being viewed.'
options = objects
def __init__(self, value):
super(Object, self).__init__('list', value)
# drawing related
class Layout_Style(Attribute):
tooltip = 'How are the children arranged in this object.'
options = ['horizontal', 'vertical', 'grid', 'anchor']
def __init__(self, value):
super(Layout_Style, self).__init__('list', value)
class Width(Attribute):
tooltip = 'Width of the object.'
def __init__(self, value):
super(Width, self).__init__('float', value)
class Height(Attribute):
tooltip = 'Height of the object.'
def __init__(self, value):
super(Height, self).__init__('float', value)
class Draw_Style(Attribute):
tooltip = 'How the object is drawn.'
options = ['icon', 'ellipse', 'rect', 'round rect', 'hidden']
def __init__(self, value):
super(Draw_Style, self).__init__('list', value)
class Icon(Attribute):
tooltip = 'Icon displayed as background of the object.'
def __init__(self, value):
super(Icon, self).__init__('file_png', value)
class Color(Attribute):
tooltip = 'What color will the object be drawn with.'
def __init__(self, value):
super(Color, self).__init__('string', value)
class Text_Location(Attribute):
tooltip = 'Where will text be located?'
options = ['top', 'bottom', 'left', 'right', 'center']
def __init__(self, value):
super(Text_Location, self).__init__('list', value)
class Text_Horizontal_Alignment(Attribute):
tooltip = 'Horizontal Alignment of text'
options = ['left', 'right', 'horizontal center', 'justify']
def __init__(self, value):
super(Text_Horizontal_Alignment, self).__init__('list', value)
class Text_Vertical_Alignment(Attribute):
tooltip = 'Vertical Alignment of text'
options = ['top', 'bottom', 'vertical center']
def __init__(self, value):
super(Text_Vertical_Alignment, self).__init__('list', value)
# Layout configuration related
class Layout_Config(Attribute):
options = ['horizontal', 'vertical', 'grid', 'anchor']
editable = False
def __init__(self, value):
super(Layout_Config, self).__init__('dictionary_list', value)
class Root(Attribute):
tooltip = 'What acts as the local anchor for this object?'
options = ['top left', 'top right',
'bottom left', 'bottom right',
'center left', 'center right',
'top center', 'bottom center']
def __init__(self, value):
super(Root, self).__init__('list', value)
class Anchor(Attribute):
tooltip = 'What other object:point acts as the anchor for this object?'
options = ['top left', 'top right',
'bottom left', 'bottom right',
'center left', 'center right',
'top center', 'bottom center']
editable = False
def __init__(self, value):
super(Anchor, self).__init__('dictionary_reference', value)
# Association
class Source(Attribute):
tooltip = 'What is the external oobject source reference for this object'
def __init__(self, value):
super(Source, self).__init__('string', value)
class Destination(Attribute):
tooltip = 'What is the object destination reference for this object'
def __init__(self, value):
super(Destination, self).__init__('string', value)
| finger563/editor | src/view_attributes.py | Python | mit | 3,890 |
#! /usr/bin/env python3
import vk
import sys
import json
# get access token
#app_id = 4360605
#url = "http://api.vkontakte.ru/oauth/authorize?client_id=" + str(app_id) + "&scope=4&redirect_uri=http://api.vk.com/blank.html&display=page&response_type=token"
#webbrowser.open_new_tab(url)
#exit()
word = sys.argv[1]
txt_file = open(word + '.txt', "w")
html_file = open(word + '.html', "w")
vkapi = vk.API(access_token='copy_token_here')
result = vkapi.groups.search(q = word, offset = 0, count = 100)
#print(result['count'])
#exit()
json_tree = result['items']
for item in json_tree:
link = 'http://vk.com/club' + str(item['id'])
name = item['name']
tag_link = '<a href="' + link + '">' + link + '</a>' + '\t' + name + '<br>'
txt_file.write(link + '\n')
html_file.write(tag_link + '\n')
txt_file.close()
html_file.close()
| dm-urievich/learn_vk_api | test_api.py | Python | mit | 849 |
#HV Control &
#Read and Plot from the PMT
#This code is to record the data that is received into the Teensy's ADC.
#Includes the HV control and replotting the results at the end.
#See CSV Dataplot notebook to plot old experiment data.
from __future__ import division
from __future__ import print_function
from pyqtgraph import QtGui, QtCore #Provides usage of PyQt4's libraries which aids in UI design
import pyqtgraph as pg #Initiation of plotting code
import serial #Communication with the serial port is done using the pySerial 2.7 package
from datetime import datetime #Allows us to look at current date and time
#import dataprocessing #code for plotting the data from the CSV
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything (a window)
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Voltage Plots')
startBtnClicked = False
quitBtnClicked = False
firstupdate = 0
## This function contains the behavior we want to see when the start button is clicked
def startButtonClicked():
global startBtnClicked
global startBtn
if (startBtnClicked == False):
teensySerialData.flushInput() #empty serial buffer for input from the teensy
startBtnClicked = True
startBtn.setText('Stop')
elif (startBtnClicked == True):
startBtnClicked = False
startBtn.setText('Start')
## Below at the end of the update function we check the value of quitBtnClicked
def quitButtonClicked():
global quitBtnClicked
quitBtnClicked = True
## Buttons to control the High Voltage
def HVoffButtonClicked():
teensySerialData.write('0')
print("HV Off")
def HVonButtonClicked():
teensySerialData.write('1')
print("HV On")
def insertionButtonClicked():
teensySerialData.write('3')
print("Insertion")
def separationButtonClicked():
teensySerialData.write('2')
print("Separation")
#Start Recording in Widget
## Create widgets to be placed inside
startBtn = QtGui.QPushButton('Start')
startBtn.setToolTip('Click to begin graphing') #This message appears while hovering mouse over button
quitBtn = QtGui.QPushButton('Quit')
quitBtn.setToolTip('Click to quit program')
HVonBtn = QtGui.QPushButton("HV on")
HVonBtn.setToolTip('Click to turn the high voltage on')
HVoffBtn = QtGui.QPushButton("HV off")
HVoffBtn.setToolTip('Click to turn the high voltage off')
insBtn = QtGui.QPushButton("Insertion")
insBtn.setToolTip('Click to start insertion (#3)')
sepBtn = QtGui.QPushButton("Separation")
sepBtn.setToolTip('Click to start separation (#2)')
## Functions in parantheses are to be called when buttons are clicked
startBtn.clicked.connect(startButtonClicked)
quitBtn.clicked.connect(quitButtonClicked)
HVonBtn.clicked.connect(HVonButtonClicked)
HVoffBtn.clicked.connect(HVoffButtonClicked)
insBtn.clicked.connect(insertionButtonClicked)
sepBtn.clicked.connect(separationButtonClicked)
## xSamples is the maximum amount of samples we want graphed at a time
xSamples = 300
## Create plot widget for peak detector plot
pmtPlotWidget = pg.PlotWidget()
pmtPlotWidget.setYRange(0, 4096)
pmtPlotWidget.setXRange(0, xSamples)
pmtPlotWidget.setLabel('top', text = "PMT") #Title to appear at top of widget
## Create a grid layout to manage the widgets size and position
## The grid layout allows us to place a widget in a given column and row
layout = QtGui.QGridLayout()
w.setLayout(layout)
## Add widgets to the layout in their proper positions
## The first number in parantheses is the row, the second is the column
layout.addWidget(quitBtn, 0, 0)
layout.addWidget(startBtn, 2, 0)
layout.addWidget(HVonBtn, 0, 2)
layout.addWidget(insBtn, 2, 2)
layout.addWidget(sepBtn, 3, 2)
layout.addWidget(HVoffBtn, 4, 2)
layout.addWidget(pmtPlotWidget, 1, 1)
## Display the widget as a new window
w.show()
## Initialize all global variables
## Whenever we plot a range of samples, xLeftIndex is the x value on the
## PlotWidget where we start plotting the samples, xRightIndex is where we stop
## These values will reset when they reach the value of xSamples
xRightIndex = 0
xLeftIndex = 0
## These arrays will hold the unplotted voltage values from the pmt
## and the peak detector until we are able to update the plot
pmtData = []
## Used to determine how often we plot a range of values
graphCount = 0
## Time values in microseconds read from the teensy are stored in these variables
## Before timeElapsed is updated, we store its old value in timeElapsedPrev
timeElapsed = 0
timeElapsedPrev = 0
## Determines if we are running through the update loop for the first time
firstRun = True
## Create new file, with the name being today's date and current time and write headings to file in CSV format
i = datetime.now()
fileName = str(i.year) + str(i.month) + str(i.day) + "_" + str(i.hour) + str(i.minute) + str(i.second) + ".csv"
## File is saved to Documents/IPython Notebooks/RecordedData
#f = open('RecordedData\\' + fileName, 'a')
#f.write("#Data from " + str(i.year) + "-" + str(i.month) + "-" + str(i.day) + " at " + str(i.hour) + ":" + str(i.minute) + ":" + str(i.second) + '\n')
#f.write("Timestamp,PMT\n")
## Initialize the container for our voltage values read in from the teensy
## IMPORTANT NOTE: The com port value needs to be updated if the com value
## changes. It's the same number that appears on the bottom right corner of the
## window containing the TeensyDataWrite.ino code
teensySerialData = serial.Serial("/dev/tty.usbmodem1452", 115200)
def update():
## Set global precedence to previously defined values
global xSamples
global xRightIndex
global xLeftIndex
global pmtData
global graphCount
global timeElapsed
global timeElapsedPrev
global firstRun
global firstupdate
if firstupdate == 0:
teensySerialData.flushInput()
firstupdate += 1
## The number of bytes currently waiting to be read in.
## We want to read these values as soon as possible, because
## we will lose them if the buffer fills up
bufferSize = teensySerialData.inWaiting()
runCount = bufferSize//8 # since we write 8 bytes at a time, we similarly want to read them 8 at a time
#print(bufferSize, runCount)
while (runCount > 0):
if (startBtnClicked == True):
#Read in time (int) and PMT output (float with up to 5 decimal places)
temp = []
temp.append(teensySerialData.readline().strip().split(',') )
print(bufferSize, runCount, temp[-1][0], temp[-1][1])
timeElapsedPrev = timeElapsed
timeElapsed = int (temp[0][0])
if (firstRun == True):
## Only run once to ensure buffer is completely flushed
firstRun = False
teensySerialData.flushInput()
break
# We'll add all our values to this string until we're ready to exit the loop, at which point it will be written to a file
stringToWrite = str(timeElapsed) + ","
## This difference calucalted in the if statement is the amount of time in microseconds since the last value
## we read in and wrote to a file. If this value is significantly greater than 100, we know we have missed some
## values, probably as a result of the buffer filling up and scrapping old values to make room for new values.
## The number we print out will be the approximate number of values we failed to read in.
## This is useful to determine if your code is running too slow
#if (timeElapsed - timeElapsedPrev > 8000):
#print(str((timeElapsed-timeElapsedPrev)/7400))
numData = float (temp[0][1])
pmtData.append(numData)
stringToWrite = stringToWrite + str(numData) + '\n'
#f.write(stringToWrite)
graphCount = graphCount + 1
xRightIndex = xRightIndex + 1
runCount = runCount - 1
## We will start plotting when the start button is clicked
if startBtnClicked == True:
if (graphCount >= 1): #We will plot new values once we have this many values to plot
if (xLeftIndex == 0):
# Remove all PlotDataItems from the PlotWidgets.
# This will effectively reset the graphs (approximately every 30000 samples)
#pmtPlotWidget.clear()
pmtPlotWidget.clear()
## pmtCurve are of the PlotDataItem type and are added to the PlotWidget.
## Documentation for these types can be found on pyqtgraph's website
pmtCurve = pmtPlotWidget.plot()
xRange = range(xLeftIndex,xRightIndex)
pmtCurve.setData(xRange, pmtData)
## Now that we've plotting the values, we no longer need these arrays to store them
pmtData = []
xLeftIndex = xRightIndex
graphCount = 0
if(xRightIndex >= xSamples):
xRightIndex = 0
xLeftIndex = 0
pmtData = []
if(quitBtnClicked == True):
## Close the file and close the window. Performing this action here ensures values we want to write to the file won't be cut off
#f.close()
w.close()
teensySerialData.close()
#dataprocessing.CSVDataPlot(fileName)
## Run update function in response to a timer
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start the Qt event loop
app.exec_() | gregnordin/micropython_pyboard | 150729_pyboard_to_pyqtgraph/serial_pyboard_to_python.py | Python | mit | 9,779 |
import re
list_re = re.compile(r'\((.*)\) \"(.*)\" \"(.*)\"')
class Response(object):
# There are three possible server completion responses
OK = "OK" # indicates success
NO = "NO" # indicates failure
BAD = "BAD" # indicates a protocol error
class ListResponse(object):
def __init__(self, list_response):
match = list_re.match(list_response)
self.attributes = match.group(1).split()
self.hierarchy_delimiter = match.group(2)
self.name = match.group(3)
| clara-labs/imaplib3 | imaplib3/response.py | Python | mit | 513 |
#!/usr/bin/env python
import argparse
import bz2
import gzip
import os.path
import sys
from csvkit import CSVKitReader
from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderError
def lazy_opener(fn):
def wrapped(self, *args, **kwargs):
self._lazy_open()
fn(*args, **kwargs)
return wrapped
class LazyFile(object):
"""
A proxy for a File object that delays opening it until
a read method is called.
Currently this implements only the minimum methods to be useful,
but it could easily be expanded.
"""
def __init__(self, init, *args, **kwargs):
self.init = init
self.f = None
self._is_lazy_opened = False
self._lazy_args = args
self._lazy_kwargs = kwargs
def __getattr__(self, name):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return getattr(self.f, name)
def __iter__(self):
return self
def close(self):
self.f.close()
self.f = None
self._is_lazy_opened = False
def next(self):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return self.f.next()
class CSVFileType(object):
"""
An argument factory like argparse.FileType with compression support.
"""
def __init__(self, mode='rb'):
"""
Initialize the factory.
"""
self._mode = mode
def __call__(self, path):
"""
Build a file-like object from the specified path.
"""
if path == '-':
if 'r' in self._mode:
return sys.stdin
elif 'w' in self._mode:
return sys.stdout
else:
raise ValueError('Invalid path "-" with mode {0}'.format(self._mode))
else:
(_, extension) = os.path.splitext(path)
if extension == '.gz':
return LazyFile(gzip.open, path, self._mode)
if extension == '.bz2':
return LazyFile(bz2.BZ2File, path, self._mode)
else:
return LazyFile(open, path, self._mode)
class CSVKitUtility(object):
description = ''
epilog = ''
override_flags = ''
def __init__(self, args=None, output_file=None):
"""
Perform argument processing and other setup for a CSVKitUtility.
"""
self._init_common_parser()
self.add_arguments()
self.args = self.argparser.parse_args(args)
self.reader_kwargs = self._extract_csv_reader_kwargs()
self.writer_kwargs = self._extract_csv_writer_kwargs()
self._install_exception_handler()
if output_file is None:
self.output_file = sys.stdout
else:
self.output_file = output_file
# Ensure SIGPIPE doesn't throw an exception
# Prevents [Errno 32] Broken pipe errors, e.g. when piping to 'head'
# To test from the shell:
# python -c "for i in range(5000): print 'a,b,c'" | csvlook | head
# Without this fix you will see at the end:
# [Errno 32] Broken pipe
# With this fix, there should be no error
# For details on Python and SIGPIPE, see http://bugs.python.org/issue1652
try:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except (ImportError, AttributeError):
#Do nothing on platforms that don't have signals or don't have SIGPIPE
pass
def add_arguments(self):
"""
Called upon initialization once the parser for common arguments has been constructed.
Should be overriden by individual utilities.
"""
raise NotImplementedError('add_arguments must be provided by each subclass of CSVKitUtility.')
def main(self):
"""
Main loop of the utility.
Should be overriden by individual utilities and explicitly called by the executing script.
"""
raise NotImplementedError(' must be provided by each subclass of CSVKitUtility.')
def _init_common_parser(self):
"""
Prepare a base argparse argument parser so that flags are consistent across different shell command tools.
If you want to constrain which common args are present, you can pass a string for 'omitflags'. Any argument
whose single-letter form is contained in 'omitflags' will be left out of the configured parser. Use 'f' for
file.
"""
self.argparser = argparse.ArgumentParser(description=self.description, epilog=self.epilog)
# Input
if 'f' not in self.override_flags:
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=CSVFileType(), default=sys.stdin,
help='The CSV file to operate on. If omitted, will accept input on STDIN.')
if 'd' not in self.override_flags:
self.argparser.add_argument('-d', '--delimiter', dest='delimiter',
help='Delimiting character of the input CSV file.')
if 't' not in self.override_flags:
self.argparser.add_argument('-t', '--tabs', dest='tabs', action='store_true',
help='Specifies that the input CSV file is delimited with tabs. Overrides "-d".')
if 'q' not in self.override_flags:
self.argparser.add_argument('-q', '--quotechar', dest='quotechar',
help='Character used to quote strings in the input CSV file.')
if 'u' not in self.override_flags:
self.argparser.add_argument('-u', '--quoting', dest='quoting', type=int, choices=[0,1,2,3],
help='Quoting style used in the input CSV file. 0 = Quote Minimal, 1 = Quote All, 2 = Quote Non-numeric, 3 = Quote None.')
if 'b' not in self.override_flags:
self.argparser.add_argument('-b', '--doublequote', dest='doublequote', action='store_true',
help='Whether or not double quotes are doubled in the input CSV file.')
if 'p' not in self.override_flags:
self.argparser.add_argument('-p', '--escapechar', dest='escapechar',
help='Character used to escape the delimiter if --quoting 3 ("Quote None") is specified and to escape the QUOTECHAR if --doublequote is not specified.')
if 'z' not in self.override_flags:
self.argparser.add_argument('-z', '--maxfieldsize', dest='maxfieldsize', type=int,
help='Maximum length of a single field in the input CSV file.')
if 'e' not in self.override_flags:
self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8',
help='Specify the encoding the input CSV file.')
if 'S' not in self.override_flags:
self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', default=False, action='store_true',
help='Ignore whitespace immediately following the delimiter.')
if 'H' not in self.override_flags:
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Specifies that the input CSV file has no header row. Will create default headers.')
if 'v' not in self.override_flags:
self.argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed tracebacks when errors occur.')
# Output
if 'l' not in self.override_flags:
self.argparser.add_argument('-l', '--linenumbers', dest='line_numbers', action='store_true',
help='Insert a column of line numbers at the front of the output. Useful when piping to grep or as a simple primary key.')
# Input/Output
if 'zero' not in self.override_flags:
self.argparser.add_argument('--zero', dest='zero_based', action='store_true',
help='When interpreting or displaying column numbers, use zero-based numbering instead of the default 1-based numbering.')
def _extract_csv_reader_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the input CSV reader(s).
"""
kwargs = {}
if self.args.encoding:
kwargs['encoding'] = self.args.encoding
if self.args.tabs:
kwargs['delimiter'] = '\t'
elif self.args.delimiter:
kwargs['delimiter'] = self.args.delimiter
if self.args.quotechar:
kwargs['quotechar'] = self.args.quotechar
if self.args.quoting:
kwargs['quoting'] = self.args.quoting
if self.args.doublequote:
kwargs['doublequote'] = self.args.doublequote
if self.args.escapechar:
kwargs['escapechar'] = self.args.escapechar
if self.args.maxfieldsize:
kwargs['maxfieldsize'] = self.args.maxfieldsize
if self.args.skipinitialspace:
kwargs['skipinitialspace'] = self.args.skipinitialspace
return kwargs
def _extract_csv_writer_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the output CSV writer.
"""
kwargs = {}
if 'l' not in self.override_flags and self.args.line_numbers:
kwargs['line_numbers'] = True
return kwargs
def _install_exception_handler(self):
"""
Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.
"""
def handler(t, value, traceback):
if self.args.verbose:
sys.__excepthook__(t, value, traceback)
else:
# Special case handling for Unicode errors, which behave very strangely
# when cast with unicode()
if t == UnicodeDecodeError:
sys.stderr.write('Your file is not "%s" encoded. Please specify the correct encoding with the -e flag. Use the -v flag to see the complete error.\n' % self.args.encoding)
else:
sys.stderr.write('%s\n' % unicode(value).encode('utf-8'))
sys.excepthook = handler
def print_column_names(self):
"""
Pretty-prints the names and indices of all columns to a file-like object (usually sys.stdout).
"""
if self.args.no_header_row:
raise RequiredHeaderError, 'You cannot use --no-header-row with the -n or --names options.'
f = self.args.file
output = self.output_file
try:
zero_based=self.args.zero_based
except:
zero_based=False
rows = CSVKitReader(f, **self.reader_kwargs)
column_names = rows.next()
for i, c in enumerate(column_names):
if not zero_based:
i += 1
output.write('%3i: %s\n' % (i, c))
def match_column_identifier(column_names, c, zero_based=False):
"""
Determine what column a single column id (name or index) matches in a series of column names.
Note that integer values are *always* treated as positional identifiers. If you happen to have
column names which are also integers, you must specify them using a positional index.
"""
if isinstance(c, basestring) and not c.isdigit() and c in column_names:
return column_names.index(c)
else:
try:
c = int(c)
if not zero_based:
c -= 1
# Fail out if neither a column name nor an integer
except:
raise ColumnIdentifierError('Column identifier "%s" is neither an integer, nor a existing column\'s name.' % c)
# Fail out if index is 0-based
if c < 0:
raise ColumnIdentifierError('Column 0 is not valid; columns are 1-based.')
# Fail out if index is out of range
if c >= len(column_names):
raise ColumnIdentifierError('Index %i is beyond the last named column, "%s" at index %i.' % (c, column_names[-1], len(column_names) - 1))
return c
def parse_column_identifiers(ids, column_names, zero_based=False, excluded_columns=None):
"""
Parse a comma-separated list of column indices AND/OR names into a list of integer indices.
Ranges of integers can be specified with two integers separated by a '-' or ':' character. Ranges of
non-integers (e.g. column names) are not supported.
Note: Column indices are 1-based.
"""
columns = []
# If not specified, start with all columns
if not ids:
columns = range(len(column_names))
if columns and not excluded_columns:
return columns
if not columns:
for c in ids.split(','):
c = c.strip()
try:
columns.append(match_column_identifier(column_names, c, zero_based))
except ColumnIdentifierError:
if ':' in c:
a,b = c.split(':',1)
elif '-' in c:
a,b = c.split('-',1)
else:
raise
try:
if a:
a = int(a)
else:
a = 1
if b:
b = int(b) + 1
else:
b = len(column_names)
except ValueError:
raise ColumnIdentifierError("Invalid range %s. Ranges must be two integers separated by a - or : character.")
for x in range(a,b):
columns.append(match_column_identifier(column_names, x, zero_based))
excludes = []
if excluded_columns:
for c in excluded_columns.split(','):
c = c.strip()
try:
excludes.append(match_column_identifier(column_names, c, zero_based))
except ColumnIdentifierError:
if ':' in c:
a,b = c.split(':',1)
elif '-' in c:
a,b = c.split('-',1)
else:
raise
try:
if a:
a = int(a)
else:
a = 1
if b:
b = int(b) + 1
else:
b = len(column_names)
except ValueError:
raise ColumnIdentifierError("Invalid range %s. Ranges must be two integers separated by a - or : character.")
for x in range(a,b):
excludes.append(match_column_identifier(column_names, x, zero_based))
return [c for c in columns if c not in excludes]
| cypreess/csvkit | csvkit/cli.py | Python | mit | 15,243 |
#
#
#March 2014
#Adam Breznicky - TxDOT TPP - Mapping Group
#
#This is an independent script which requires a single parameter designating a directory.
#The script will walk through each subfolder and file within the designated directory, identifying the MXD files
#and re-sourcing the Comanche database connections to utilize the new 'Admin' prefix
#
#
#
#
#import modules
import arcpy, os
#variables
directory = ""
def re_source_admin():
#issue list
issues = []
#walk through each directory
for root, dirs, files in os.walk(directory):
#ignore file and personal geodatabases
specDir = root.split("\\")[-1]
dbsuffix = specDir.split(".")[-1]
if dbsuffix == "gdb" or dbsuffix == "mdb" or dbsuffix == "tbx":
pass
else:
for n in files:
#identify the mxds
if str(n).split(".")[-1] == "mxd":
print "working on: " + str(os.path.join(root, n))
map = arcpy.mapping.MapDocument(os.path.join(root, n))
dataframes = arcpy.mapping.ListDataFrames(map)
for df in dataframes:
layers = arcpy.mapping.ListLayers(map, "", df)
for lyr in layers:
try:
if "TPP_GIS.MCHAMB1." in lyr.dataSource:
print "lyr source: " + lyr.dataSource
newsource = lyr.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
lyr.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "lyr replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print lyr.name + " is not a feature layer"
tables = arcpy.mapping.ListTableViews(map, "", df)
for tbl in tables:
try:
if "TPP_GIS.MCHAMB1." in tbl.dataSource:
print "tbl source: " + tbl.dataSource
newsource = tbl.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
tbl.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "tbl replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print tbl.name + " is not a feature layer"
map.save()
re_source_admin()
print "success!"
print "the following MXDs contained issues with a layer having not a dataSource (e.g. a non-feature layer):"
for i in issues:
print str(i) | TxDOT/python | standalone/AdminPrefix_Resourcer_v1.py | Python | mit | 3,702 |
# -*- coding: utf-8 -*-
import sys
import time
from subprocess import call
#add the project folder to pythpath
sys.path.append('../../')
from library.components.SensorModule import SensorModule as Sensor
from library.components.MetaData import MetaData as MetaData
class Raspistill(Sensor):
def __init__(self):
super(Raspistill, self).__init__()
# ISO100
iso100MetaData = MetaData('ISO100')
iso100MetaData.setValueCallback(self.getIso100)
iso100MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso100MetaData)
# ISO200
iso200MetaData = MetaData('ISO200')
iso200MetaData.setValueCallback(self.getIso200)
iso200MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso200MetaData)
# ISO400'
iso400MetaData = MetaData('ISO400')
iso400MetaData.setValueCallback(self.getIso400)
iso400MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso400MetaData)
# ISO800'
iso800MetaData = MetaData('ISO800')
iso800MetaData.setValueCallback(self.getIso800)
iso800MetaData.setUnitCallback(self.getUnit)
self.addMetaData(iso800MetaData)
def getIso100(self):
filename = "photos/" + str(time.time()) + "-iso100.jpg"
call(["raspistill", "--ISO", "100", "-o", filename])
return str(filename)
def getIso200(self):
filename = "photos/" + str(time.time()) + "-iso200.jpg"
call(["raspistill", "--ISO", "200", "-o", filename])
return str(filename)
def getIso400(self):
filename = "photos/" + str(time.time()) + "-iso400.jpg"
call(["raspistill", "--ISO", "400", "-o", filename])
return str(filename)
def getIso800(self):
filename = "photos/" + str(time.time()) + "-iso800.jpg"
call(["raspistill", "--ISO", "800", "-o", filename])
return str(filename)
def getUnit(self):
return " Photo"
def getMetaData(self):
return super(Raspistill, self).getMetaData() | OpenSpaceProgram/pyOSP | library/sensors/Raspistill.py | Python | mit | 2,060 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorParameters(Model):
"""Parameters that define the operation to create a connection monitor.
All required parameters must be populated in order to send to Azure.
:param source: Required.
:type source:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorSource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start
automatically once created. Default value: True .
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
Default value: 60 .
:type monitoring_interval_in_seconds: int
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorParameters, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/connection_monitor_parameters.py | Python | mit | 2,077 |
# https://projecteuler.net/problem=81
from projecteuler.FileReader import file_to_2D_array_of_ints
# this problem uses a similar solution to problem 18, "Maximum Path Sum 1."
# this problem uses a diamond instead of a pyramid
matrix = file_to_2D_array_of_ints("p081.txt", ",")
y_max = len(matrix) - 1
x_max = len(matrix[0]) - 1
for y in range(y_max, -1, -1):
for x in range(x_max, -1, -1):
if y == y_max and x == x_max:
continue
elif y == y_max:
matrix[y][x] += matrix[y][x + 1]
elif x == x_max:
matrix[y][x] += matrix[y + 1][x]
else:
matrix[y][x] += min(matrix[y][x + 1], matrix[y + 1][x])
print(matrix[0][0])
| Peter-Lavigne/Project-Euler | p081.py | Python | mit | 699 |
import unittest
from hamlpy.parser.core import (
ParseException,
Stream,
peek_indentation,
read_line,
read_number,
read_quoted_string,
read_symbol,
read_whitespace,
read_word,
)
from hamlpy.parser.utils import html_escape
class ParserTest(unittest.TestCase):
def test_read_whitespace(self):
stream = Stream(" \t foo \n bar ")
assert read_whitespace(stream) == " \t "
assert stream.text[stream.ptr :] == "foo \n bar "
stream.ptr += 3 # skip over foo
assert read_whitespace(stream) == " "
assert stream.text[stream.ptr :] == "\n bar "
assert read_whitespace(stream, include_newlines=True) == "\n "
assert stream.text[stream.ptr :] == "bar "
stream.ptr += 3 # skip over bar
assert read_whitespace(stream) == " "
assert stream.text[stream.ptr :] == ""
def test_peek_indentation(self):
assert peek_indentation(Stream("content")) == 0
assert peek_indentation(Stream(" content")) == 2
assert peek_indentation(Stream("\n")) is None
assert peek_indentation(Stream(" \n")) is None
def test_quoted_string(self):
stream = Stream("'hello'---")
assert read_quoted_string(stream) == "hello"
assert stream.text[stream.ptr :] == "---"
stream = Stream('"this don\'t \\"x\\" hmm" not in string')
assert read_quoted_string(stream) == 'this don\'t "x" hmm'
assert stream.text[stream.ptr :] == " not in string"
self.assertRaises(ParseException, read_quoted_string, Stream('"no end quote...'))
def test_read_line(self):
stream = Stream("line1\n line2\n\nline4\n\n")
assert read_line(stream) == "line1"
assert read_line(stream) == " line2"
assert read_line(stream) == ""
assert read_line(stream) == "line4"
assert read_line(stream) == ""
assert read_line(stream) is None
assert read_line(Stream("last line ")) == "last line "
def test_read_number(self):
stream = Stream('123"')
assert read_number(stream) == "123"
assert stream.text[stream.ptr :] == '"'
stream = Stream("123.4xx")
assert read_number(stream) == "123.4"
assert stream.text[stream.ptr :] == "xx"
stream = Stream("0.0001 ")
assert read_number(stream) == "0.0001"
assert stream.text[stream.ptr :] == " "
def test_read_symbol(self):
stream = Stream("=> bar")
assert read_symbol(stream, ["=>", ":"]) == "=>"
assert stream.text[stream.ptr :] == " bar"
self.assertRaises(ParseException, read_symbol, Stream("foo"), ["=>"])
def test_read_word(self):
stream = Stream("foo_bar")
assert read_word(stream) == "foo_bar"
assert stream.text[stream.ptr :] == ""
stream = Stream("foo_bar ")
assert read_word(stream) == "foo_bar"
assert stream.text[stream.ptr :] == " "
stream = Stream("ng-repeat(")
assert read_word(stream) == "ng"
assert stream.text[stream.ptr :] == "-repeat("
stream = Stream("ng-repeat(")
assert read_word(stream, ("-",)) == "ng-repeat"
assert stream.text[stream.ptr :] == "("
stream = Stream("これはテストです...")
assert read_word(stream) == "これはテストです"
assert stream.text[stream.ptr :] == "..."
class UtilsTest(unittest.TestCase):
def test_html_escape(self):
assert html_escape("") == ""
assert html_escape("&<>\"'") == "&<>"'"
assert html_escape('{% trans "hello" %}') == '{% trans "hello" %}'
assert html_escape('{{ foo|default:"hello" }}') == '{{ foo|default:"hello" }}'
assert html_escape("{% }} & %}") == "{% }} & %}"
result = html_escape('<>{% trans "hello" %}<>{{ foo|default:"hello" }}<>')
assert result == '<>{% trans "hello" %}<>{{ foo|default:"hello" }}<>'
| nyaruka/django-hamlpy | hamlpy/test/test_parser.py | Python | mit | 4,020 |
#! /usr/bin/env python
# Copyright (c) 2019 Red Hat, Inc.
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Molecule distribution package setuptools installer."""
import setuptools
HAS_DIST_INFO_CMD = False
try:
import setuptools.command.dist_info
HAS_DIST_INFO_CMD = True
except ImportError:
"""Setuptools version is too old."""
ALL_STRING_TYPES = tuple(map(type, ('', b'', u'')))
MIN_NATIVE_SETUPTOOLS_VERSION = 34, 4, 0
"""Minimal setuptools having good read_configuration implementation."""
RUNTIME_SETUPTOOLS_VERSION = tuple(map(int, setuptools.__version__.split('.')))
"""Setuptools imported now."""
READ_CONFIG_SHIM_NEEDED = (
RUNTIME_SETUPTOOLS_VERSION < MIN_NATIVE_SETUPTOOLS_VERSION
)
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
}
if not READ_CONFIG_SHIM_NEEDED:
from setuptools.config import read_configuration, ConfigOptionsHandler
import setuptools.config
import setuptools.dist
# Set default value for 'use_scm_version'
setattr(setuptools.dist.Distribution, 'use_scm_version', False)
# Attach bool parser to 'use_scm_version' option
class ShimConfigOptionsHandler(ConfigOptionsHandler):
"""Extension class for ConfigOptionsHandler."""
@property
def parsers(self):
"""Return an option mapping with default data type parsers."""
_orig_parsers = super(ShimConfigOptionsHandler, self).parsers
return dict(use_scm_version=self._parse_bool, **_orig_parsers)
def parse_section_packages__find(self, section_options):
find_kwargs = super(
ShimConfigOptionsHandler, self
).parse_section_packages__find(section_options)
return stringify_dict_contents(find_kwargs)
setuptools.config.ConfigOptionsHandler = ShimConfigOptionsHandler
else:
"""This is a shim for setuptools<required."""
import functools
import io
import json
import sys
import warnings
try:
import setuptools.config
def filter_out_unknown_section(i):
def chi(self, *args, **kwargs):
i(self, *args, **kwargs)
self.sections = {
s: v for s, v in self.sections.items()
if s != 'packages.find'
}
return chi
setuptools.config.ConfigHandler.__init__ = filter_out_unknown_section(
setuptools.config.ConfigHandler.__init__,
)
except ImportError:
pass
def ignore_unknown_options(s):
@functools.wraps(s)
def sw(**attrs):
try:
ignore_warning_regex = (
r"Unknown distribution option: "
r"'(license_file|project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
return s(**attrs)
finally:
warnings.resetwarnings()
return sw
def parse_predicates(python_requires):
import itertools
import operator
sorted_operators_map = tuple(sorted(
{
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'': operator.eq,
}.items(),
key=lambda i: len(i[0]),
reverse=True,
))
def is_decimal(s):
return type(u'')(s).isdecimal()
conditions = map(str.strip, python_requires.split(','))
for c in conditions:
for op_sign, op_func in sorted_operators_map:
if not c.startswith(op_sign):
continue
raw_ver = itertools.takewhile(
is_decimal,
c[len(op_sign):].strip().split('.'),
)
ver = tuple(map(int, raw_ver))
yield op_func, ver
break
def validate_required_python_or_fail(python_requires=None):
if python_requires is None:
return
python_version = sys.version_info
preds = parse_predicates(python_requires)
for op, v in preds:
py_ver_slug = python_version[:max(len(v), 3)]
condition_matches = op(py_ver_slug, v)
if not condition_matches:
raise RuntimeError(
"requires Python '{}' but the running Python is {}".
format(
python_requires,
'.'.join(map(str, python_version[:3])),
)
)
def verify_required_python_runtime(s):
@functools.wraps(s)
def sw(**attrs):
try:
validate_required_python_or_fail(attrs.get('python_requires'))
except RuntimeError as re:
sys.exit('{} {!s}'.format(attrs['name'], re))
return s(**attrs)
return sw
setuptools.setup = ignore_unknown_options(setuptools.setup)
setuptools.setup = verify_required_python_runtime(setuptools.setup)
try:
from configparser import ConfigParser, NoSectionError
except ImportError:
from ConfigParser import ConfigParser, NoSectionError
ConfigParser.read_file = ConfigParser.readfp
def maybe_read_files(d):
"""Read files if the string starts with `file:` marker."""
FILE_FUNC_MARKER = 'file:'
d = d.strip()
if not d.startswith(FILE_FUNC_MARKER):
return d
descs = []
for fname in map(str.strip, str(d[len(FILE_FUNC_MARKER):]).split(',')):
with io.open(fname, encoding='utf-8') as f:
descs.append(f.read())
return ''.join(descs)
def cfg_val_to_list(v):
"""Turn config val to list and filter out empty lines."""
return list(filter(bool, map(str.strip, str(v).strip().splitlines())))
def cfg_val_to_dict(v):
"""Turn config val to dict and filter out empty lines."""
return dict(
map(lambda l: list(map(str.strip, l.split('=', 1))),
filter(bool, map(str.strip, str(v).strip().splitlines())))
)
def cfg_val_to_primitive(v):
"""Parse primitive config val to appropriate data type."""
return json.loads(v.strip().lower())
def read_configuration(filepath):
"""Read metadata and options from setup.cfg located at filepath."""
cfg = ConfigParser()
with io.open(filepath, encoding='utf-8') as f:
cfg.read_file(f)
md = dict(cfg.items('metadata'))
for list_key in 'classifiers', 'keywords', 'project_urls':
try:
md[list_key] = cfg_val_to_list(md[list_key])
except KeyError:
pass
try:
md['long_description'] = maybe_read_files(md['long_description'])
except KeyError:
pass
opt = dict(cfg.items('options'))
for list_key in 'include_package_data', 'use_scm_version', 'zip_safe':
try:
opt[list_key] = cfg_val_to_primitive(opt[list_key])
except KeyError:
pass
for list_key in 'scripts', 'install_requires', 'setup_requires':
try:
opt[list_key] = cfg_val_to_list(opt[list_key])
except KeyError:
pass
try:
opt['package_dir'] = cfg_val_to_dict(opt['package_dir'])
except KeyError:
pass
try:
opt_package_data = dict(cfg.items('options.package_data'))
if not opt_package_data.get('', '').strip():
opt_package_data[''] = opt_package_data['*']
del opt_package_data['*']
except (KeyError, NoSectionError):
opt_package_data = {}
try:
opt_extras_require = dict(cfg.items('options.extras_require'))
opt['extras_require'] = {}
for k, v in opt_extras_require.items():
opt['extras_require'][k] = cfg_val_to_list(v)
except NoSectionError:
pass
opt['package_data'] = {}
for k, v in opt_package_data.items():
opt['package_data'][k] = cfg_val_to_list(v)
try:
opt_exclude_package_data = dict(
cfg.items('options.exclude_package_data'),
)
if (
not opt_exclude_package_data.get('', '').strip()
and '*' in opt_exclude_package_data
):
opt_exclude_package_data[''] = opt_exclude_package_data['*']
del opt_exclude_package_data['*']
except NoSectionError:
pass
else:
opt['exclude_package_data'] = {}
for k, v in opt_exclude_package_data.items():
opt['exclude_package_data'][k] = cfg_val_to_list(v)
cur_pkgs = opt.get('packages', '').strip()
if '\n' in cur_pkgs:
opt['packages'] = cfg_val_to_list(opt['packages'])
elif cur_pkgs.startswith('find:'):
opt_packages_find = stringify_dict_contents(
dict(cfg.items('options.packages.find'))
)
opt['packages'] = setuptools.find_packages(**opt_packages_find)
return {'metadata': md, 'options': opt}
def cut_local_version_on_upload(version):
"""Generate a PEP440 local version if uploading to PyPI."""
import os
import setuptools_scm.version # only present during setup time
IS_PYPI_UPLOAD = os.getenv('PYPI_UPLOAD') == 'true' # set in tox.ini
return (
'' if IS_PYPI_UPLOAD
else setuptools_scm.version.get_local_node_and_date(version)
)
if HAS_DIST_INFO_CMD:
class patched_dist_info(setuptools.command.dist_info.dist_info):
def run(self):
self.egg_base = str_if_nested_or_str(self.egg_base)
return setuptools.command.dist_info.dist_info.run(self)
declarative_setup_params = read_configuration('setup.cfg')
"""Declarative metadata and options as read by setuptools."""
setup_params = {}
"""Explicit metadata for passing into setuptools.setup() call."""
setup_params = dict(setup_params, **declarative_setup_params['metadata'])
setup_params = dict(setup_params, **declarative_setup_params['options'])
if HAS_DIST_INFO_CMD:
setup_params['cmdclass'] = {
'dist_info': patched_dist_info,
}
setup_params['use_scm_version'] = {
'local_scheme': cut_local_version_on_upload,
}
# Patch incorrectly decoded package_dir option
# ``egg_info`` demands native strings failing with unicode under Python 2
# Ref https://github.com/pypa/setuptools/issues/1136
setup_params = stringify_dict_contents(setup_params)
__name__ == '__main__' and setuptools.setup(**setup_params)
| metacloud/molecule | setup.py | Python | mit | 12,584 |
from django.contrib import admin
from xbee_module.models import xbee_module
# Register your models here.
admin.site.register(xbee_module); | EricJones89/SmartHome | SmartHome/xbee_module/admin.py | Python | mit | 139 |
def represents_int(value):
try:
int(value)
return True
except ValueError:
return False
def bytes_to_gib(byte_value, round_digits=2):
return round(byte_value / 1024 / 1024 / float(1024), round_digits)
def count_to_millions(count_value, round_digits=3):
return round(count_value / float(1000000), round_digits)
| skomendera/PyMyTools | providers/value.py | Python | mit | 359 |
import mmap
import os.path
import re
from collections import OrderedDict
from .base_handler import BaseHandler
from .iso9660 import ISO9660Handler
from utils import MmappedFile, ConcatenatedFile
class GDIParseError(ValueError):
pass
class GDIHandler(BaseHandler):
def test(self):
if not re.match('^.*\.gdi', self.file_name, re.IGNORECASE):
return False
try:
self.parse()
except GDIParseError:
return False
return True
def parse(self):
text = self.read(0, 8*1024)
lines = text.decode('ascii').splitlines()
if len(lines) == 1:
raise GDIParseError
try:
n_tracks = int(lines.pop(0))
except ValueError:
raise GDIParseError
if len(lines) != n_tracks:
print(len(lines), n_tracks)
raise GDIParseError
# TODO figure out complete format
tracks = []
for track_i, line in enumerate(lines):
try:
match = re.match('(?P<index>\d+) (?P<sector>\d+) (?P<type>\d+) (?P<sector_size>\d+)'
' (?P<file_name>\S+) (\d+)', line)
if not match:
raise GDIParseError
track = match.groupdict()
for key in ('index', 'sector', 'type', 'sector_size'):
track[key] = int(track[key])
if track['index'] != track_i + 1:
raise GDIParseError
tracks.append(track)
except ValueError:
raise GDIParseError
return tracks
def get_info(self):
tracks = self.parse()
for track in tracks:
track['path'] = os.path.join(os.path.dirname(self.file_name), track['file_name'])
if len(tracks) > 3 and tracks[2]['type'] == 4 and tracks[-1]['type'] == 4:
# Dreamcast discs often contain two data tracks (track 3 and the last track) in addition to track 1.
mixed_mode = True
else:
mixed_mode = False
track_info = OrderedDict()
for track in tracks:
if mixed_mode and track == tracks[-1]:
continue
if mixed_mode and track['index'] == 3:
last_track = tracks[-1]
offset_gap = (last_track['sector'] - track['sector']) * 2352
track_name = 'Track {}+{}'.format(track['index'], last_track['index'])
file = ConcatenatedFile(file_names=[track['path'], last_track['path']],
offsets=[0, offset_gap]) # TODO handle different sector sizes
else:
track_name = 'Track {}'.format(track['index'])
file = MmappedFile(track['path'])
with file:
if track['type'] == 4:
handler = DCDataTrackHandler(file=file, file_name=track['file_name'], sector_offset=track['sector'], track_name=track_name)
if handler.test():
handler.get_info()
track_info[track_name] = handler.info
else:
track_info[track_name] = 'Data track in unknown format'
elif track['type'] == 0:
track_info[track_name] = 'Audio track'
else:
track_info[track_name] = 'Unknown'
self.info['Tracks'] = track_info
class DCDataTrackHandler(ISO9660Handler):
def test(self):
if not super().test():
return False
if self.read(0, 16) == b'SEGA SEGAKATANA ':
return True
else:
return False
def get_info(self):
header_info = OrderedDict()
header_info['Hardware ID'] = self.unpack('string', 0x00, 16, 0)
header_info['Maker ID'] = self.unpack('string', 0x10, 16, 0)
header_info['CRC'] = self.unpack('string', 0x20, 4, 0)
header_info['Device'] = self.unpack('string', 0x25, 6, 0)
header_info['Disc'] = self.unpack('string', 0x2b, 3, 0)
header_info['Region'] = self.unpack('string', 0x30, 8, 0).strip()
header_info['Peripherals'] = self.unpack('string', 0x38, 8, 0)
header_info['Product number'] = self.unpack('string', 0x40, 10, 0)
header_info['Product version'] = self.unpack('string', 0x4a, 6, 0)
header_info['Release date'] = self.unpack('string', 0x50, 16, 0)
header_info['Boot file'] = self.unpack('string', 0x60, 16, 0)
header_info['Company name'] = self.unpack('string', 0x70, 16, 0)
header_info['Software name'] = self.unpack('string', 0x80, 16, 0)
self.info['Header'] = header_info
super().get_info()
| drx/rom-info | handlers/dreamcast.py | Python | mit | 4,779 |
frame_len = .1
keys = {
'DOWN': 0x42,
'LEFT': 0x44,
'RIGHT': 0x43,
'UP': 0x41,
'Q': 0x71,
'ENTER': 0x0a,
}
apple_domain = 1000
food_values = {
'apple': 3,
}
game_sizes = {
's': (25, 20),
'm': (50, 40),
'l': (80, 40),
}
initial_size = 4
| tancredi/python-console-snake | snake/config.py | Python | mit | 282 |
from distutils.core import setup
import sslserver
setup(name="django-sslserver",
version=sslserver.__version__,
author="Ted Dziuba",
author_email="[email protected]",
description="An SSL-enabled development server for Django",
url="https://github.com/teddziuba/django-sslserver",
packages=["sslserver",
"sslserver.management",
"sslserver.management.commands"],
package_dir={"sslserver": "sslserver"},
package_data={"sslserver": ["certs/development.crt",
"certs/development.key",
"certs/server.csr"]},
install_requires=["setuptools",
"Django >= 1.4"],
license="MIT"
)
| mapennell/django-sslserver | setup.py | Python | mit | 759 |
"""
Dump Mapper
This script acts as a map/function over the pages in a set of MediaWiki
database dump files. This script allows the algorithm for processing a set of
pages to be spread across the available processor cores of a system for faster
analysis.
This script can also be imported as a module to expose the `map()` function
that returns an iterator over output rather than printing to stdout.
Examples:
dump_map revision_meta /dumps/enwiki-20110115-pages-meta-history* > ~/data/revision_meta.tsv
"""
import sys, logging, re, types, argparse, os, subprocess, importlib
from multiprocessing import Process, Queue, Lock, cpu_count, Value
from Queue import Empty
from .iterator import Iterator
class FileTypeError(Exception):pass
class Processor(Process):
"""
A processor for managing the reading of dump files from a queue and
the application of a a function for each 'page'.
"""
def __init__(self, input, processPage, output, callback, logger, noop):
"""
Constructor
:Parameters:
input : `multiprocessing.Queue`
a queue paths to dump files to process
processPage : function
a function to apply to each page of a dump file
output : `multiprocessing.Queue`
a queue to send processing output to
callback : function
a function to run upon completion
logger : `logging.Logger`
a logger object to send logging events to
"""
self.input = input
self.processPage = processPage
self.output = output
self.callback = callback
self.logger = logger
self.noop = noop
Process.__init__(self)
def run(self):
try:
while True:
foo = self.input.qsize()
fn = self.input.get(block=False)
self.logger.info("Processing dump file %s." % fn)
dump = Iterator(openDumpFile(fn))
for page in dump.readPages():
self.logger.debug("Processing page %s:%s." % (page.getId(), page.getTitle()))
try:
if self.noop: self.processPage(dump, page)
else:
for out in self.processPage(dump, page):
self.output.put(out)
except Exception as e:
self.logger.error(
"Failed to process page %s:%s - %s" % (
page.getId(),
page.getTitle(),
e
)
)
except Empty:
self.logger.info("Nothing left to do. Shutting down thread.")
finally:
self.callback()
def map(dumps, processPage, threads=cpu_count()-1, outputBuffer=100):
"""
Maps a function across all of the pages in a set of dump files and returns
an (order not guaranteed) iterator over the output. Increasing the
`outputBuffer` size will allow more mapplications to happen before the
output is read, but will consume memory to do so. Big output buffers
are benefitial when the resulting iterator from this map will be read in
bursts.
The `processPage` function must return an iterable object (such as a
generator). If your processPage function does not need to produce
output, make it return an empty iterable upon completion (like an empty
list).
:Parameters:
dumps : list
a list of paths to dump files to process
processPage : function
a function to run on every page of a set of dump files
threads : int
the number of individual processing threads to spool up
outputBuffer : int
the maximum number of output values to buffer.
"""
input = dumpFiles(dumps)
output = Queue(maxsize=outputBuffer)
running = Value('i', 0)
threads = max(1, min(int(threads), input.qsize()))
def dec(): running.value -= 1
for i in range(0, threads):
running.value += 1
Processor(
input,
processPage,
output,
dec,
logging.getLogger("Process %s" % i),
False
).start()
#output while processes are running
while running.value > 0:
try: yield output.get(timeout=.25)
except Empty: pass
#finish yielding output buffer
try:
while True: yield output.get(block=False)
except Empty:
pass
EXTENSIONS = {
'xml': "cat",
'bz2': "bzcat",
'7z': "7zr e -so",
'lzma':"lzcat"
}
"""
A map from file extension to the command to run to extract the data to standard out.
"""
EXT_RE = re.compile(r'\.([^\.]+)$')
"""
A regular expression for extracting the final extension of a file.
"""
def dumpFile(path):
"""
Verifies that a file exists at a given path and that the file has a
known extension type.
:Parameters:
path : `str`
the path to a dump file
"""
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise FileTypeError("Can't find file %s" % path)
match = EXT_RE.search(path)
if match == None:
raise FileTypeError("No extension found for %s." % path)
elif match.groups()[0] not in EXTENSIONS:
raise FileTypeError("File type %r is not supported." % path)
else:
return path
def dumpFiles(paths):
"""
Produces a `multiprocessing.Queue` containing path for each value in
`paths` to be used by the `Processor`s.
:Parameters:
paths : iterable
the paths to add to the processing queue
"""
q = Queue()
for path in paths: q.put(dumpFile(path))
return q
def openDumpFile(path):
"""
Turns a path to a dump file into a file-like object of (decompressed)
XML data.
:Parameters:
path : `str`
the path to the dump file to read
"""
match = EXT_RE.search(path)
ext = match.groups()[0]
p = subprocess.Popen(
"%s %s" % (EXTENSIONS[ext], path),
shell=True,
stdout=subprocess.PIPE,
stderr=open(os.devnull, "w")
)
#sys.stderr.write("\n%s %s\n" % (EXTENSIONS[ext], path))
#sys.stderr.write(p.stdout.read(1000))
#return False
return p.stdout
def encode(v):
"""
Encodes an output value as a string intended to be read by eval()
"""
if type(v) == types.FloatType:
return str(int(v))
elif v == None:
return "\\N"
else:
return repr(v)
def main():
parser = argparse.ArgumentParser(
description='Maps a function across pages of MediaWiki dump files'
)
parser.add_argument(
'-o', '--out',
metavar="<path>",
type=lambda path:open(path, "w"),
help='the path to an output file to write putput to (defaults to stdout)',
default=sys.stdout
)
parser.add_argument(
'-t', '--threads',
metavar="",
type=int,
help='the number of threads to start (defaults to # of cores -1)',
default=cpu_count()-1
)
parser.add_argument(
'processor',
type=lambda path: importlib.import_module(path, path),
help='the class path to the module that contains the process() function be passed each page'
)
parser.add_argument(
'dump',
type=dumpFile,
help='the XML dump file(s) to process',
nargs="+"
)
parser.add_argument(
'--debug',
action="store_true",
default=False
)
args = parser.parse_args()
LOGGING_STREAM = sys.stderr
if args.debug: level = logging.DEBUG
else: level = logging.INFO
logging.basicConfig(
level=level,
stream=LOGGING_STREAM,
format='%(name)s: %(asctime)s %(levelname)-8s %(message)s',
datefmt='%b-%d %H:%M:%S'
)
logging.info("Starting dump processor with %s threads." % min(args.threads, len(args.dump)))
for row in map(args.dump, args.processor.process, threads=args.threads):
print('\t'.join(encode(v) for v in row))
if __name__ == "__main__":
main()
| maribelacosta/wikiwho | wmf/dump/map.py | Python | mit | 7,155 |
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import jsonschema
DEFAULT_GENERATE_CONFIG_FILENAME = 'generate_config.yaml'
GENERATE_OPTIONS_SCHEMA = {
'type': 'object',
'required': ['repo', 'database'],
'properties': {
'skip_default_metrics': {'type': 'boolean'},
'tempdir_location': {'type': ['string', 'null']},
'metric_package_names': {'type': 'array', 'items': {'type': 'string'}},
'repo': {'type': 'string'},
'database': {'type': 'string'},
},
}
class GenerateOptions(collections.namedtuple(
'GenerateOptions',
[
'skip_default_metrics',
'tempdir_location',
'metric_package_names',
'repo',
'database',
],
)):
@classmethod
def from_yaml(cls, yaml_dict):
jsonschema.validate(yaml_dict, GENERATE_OPTIONS_SCHEMA)
return cls(
skip_default_metrics=yaml_dict.get('skip_default_metrics', False),
tempdir_location=yaml_dict.get('tempdir_location', None),
metric_package_names=yaml_dict.get('metric_package_names', []),
repo=yaml_dict['repo'],
database=yaml_dict['database'],
)
def to_yaml(self):
ret = {'repo': self.repo, 'database': self.database}
if self.skip_default_metrics is True:
ret['skip_default_metrics'] = True
if self.metric_package_names:
ret['metric_package_names'] = list(self.metric_package_names)
if self.tempdir_location:
ret['tempdir_location'] = self.tempdir_location
return ret
| ucarion/git-code-debt | git_code_debt/generate_config.py | Python | mit | 1,667 |
"""
@file
@brief Buffer as a logging function.
"""
from io import StringIO
class BufferedPrint:
"""
Buffered display. Relies on :epkg:`*py:io:StringIO`.
Use it as follows:
.. runpython::
:showcode:
def do_something(fLOG=None):
if fLOG:
fLOG("Did something.")
return 3
from pyquickhelper.loghelper import BufferedPrint
buf = BufferedPrint()
do_something(fLOG=buf.fprint)
print(buf)
"""
def __init__(self):
"constructor"
self.buffer = StringIO()
def fprint(self, *args, **kwargs):
"print function"
mes = " ".join(str(_) for _ in args)
self.buffer.write(mes)
self.buffer.write("\n")
def __str__(self):
"Returns the content."
return self.buffer.getvalue()
| sdpython/pyquickhelper | src/pyquickhelper/loghelper/buffered_flog.py | Python | mit | 844 |
from django.contrib import admin
from bananas.apps.appointment.forms import AppointmentForm
from bananas.apps.appointment.models import Appointment
from bananas.apps.appointment.models import AppointmentType
@admin.register(Appointment)
class AppointmentAdmin(admin.ModelAdmin):
list_display = (
'time',
'client_name',
'client_phone',
'client_email',
'appointment_type_name',
'counselor_name'
)
search_fields = (
'time',
'client_first_name',
'client_last_name',
'client_email',
'client_phone',
'appointment_type__name',
'counselor__first_name',
'counselor__last_name',
'counselor__email',
'counselor__phone',
)
list_filter = ('time', )
form = AppointmentForm
def get_queryset(self, request):
queryset = super(AppointmentAdmin, self).get_queryset(request)
return queryset.filter(deleted=False)
def client_name(self, obj):
return "{} {}".format(
obj.client_first_name, obj.client_last_name)
def counselor_name(self, obj):
return "{} {}".format(
obj.counselor.first_name, obj.counselor.last_name)
def appointment_type_name(self, obj):
return obj.appointment_type.name
client_name.short_description = 'Client'
counselor_name.short_description = 'Counselor'
appointment_type_name.short_description = 'Appointment type'
@admin.register(AppointmentType)
class AppointmentTypeAdmin(admin.ModelAdmin):
list_display = (
'name',
'appointment_count',
'message_template_count'
)
search_fields = (
'name',
)
def appointment_count(self, obj):
return obj.appointments.all().count()
def message_template_count(self, obj):
return obj.message_templates.all().count()
| tmcdonnell87/bananas | bananas/apps/appointment/admin.py | Python | mit | 1,876 |
# -*- coding: utf-8 -*-
import datetime
import os
#compsiteとcommandをあわせたような形
#ContextがhandlerでCommandが処理
class JobCommand(object):
def execute(self, context):
if context.getCurrentCommand() != 'begin':
raise Exception('illegal command ' + str(context.getCurrentCommand()))
command_list = CommandListCommand()
command_list.execute(context.next())
class CommandListCommand(object):
def execute(self, context):
while (True):
current_command = context.getCurrentCommand()
if current_command is None:
raise Exception('"end" not found ')
elif current_command == 'end':
break
else:
command = CommandCommand()
command.execute(context)
context.next()
class CommandCommand(object):
def execute(self, context):
current_command = context.getCurrentCommand()
if current_command == 'diskspace':
free_size = 100000000.0
max_size = 210000000.0
ratio = free_size / max_size * 100
print( 'Disk Free : %dMB (%.2f%%)' % (free_size / 1024 / 1024, ratio))
elif current_command == 'date':
print datetime.datetime.today().strftime("%Y/%m/%d")
elif current_command == 'line':
print '--------------------'
else:
raise Exception('invalid command [' + str(current_command) + ']')
class Context(object):
def __init__(self, command):
self.commands = []
self.current_index = 0
self.max_index = 0
self.commands = command.strip().split()
print self.commands
self.max_index = len(self.commands)
def next(self):
self.current_index += 1
print self.current_index
return self
def getCurrentCommand(self):
if self.current_index > len(self.commands):
return None
return self.commands[self.current_index].strip()
def execute(command):
job = JobCommand()
try:
job.execute(Context(command))
except Exception, e:
print e.args
if __name__ == '__main__':
command = 'begin date line diskspace end'
if command != '':
execute(command)
| t10471/python | practice/src/design_pattern/Interpreter.py | Python | mit | 2,299 |
from copy import copy
import silk.utils.six as six
from silk.singleton import Singleton
def default_permissions(user):
if user:
return user.is_staff
return False
class SilkyConfig(six.with_metaclass(Singleton, object)):
defaults = {
'SILKY_DYNAMIC_PROFILING': [],
'SILKY_IGNORE_PATHS': [],
'SILKY_HIDE_COOKIES': True,
'SILKY_IGNORE_QUERIES': [],
'SILKY_META': False,
'SILKY_AUTHENTICATION': False,
'SILKY_AUTHORISATION': False,
'SILKY_PERMISSIONS': default_permissions,
'SILKY_MAX_REQUEST_BODY_SIZE': -1,
'SILKY_MAX_RESPONSE_BODY_SIZE': -1,
'SILKY_INTERCEPT_PERCENT': 100,
'SILKY_INTERCEPT_FUNC': None,
'SILKY_PYTHON_PROFILER': False,
}
def _setup(self):
from django.conf import settings
options = {option: getattr(settings, option) for option in dir(settings) if option.startswith('SILKY')}
self.attrs = copy(self.defaults)
self.attrs.update(options)
def __init__(self):
super(SilkyConfig, self).__init__()
self._setup()
def __getattr__(self, item):
return self.attrs.get(item, None)
def __setattribute__(self, key, value):
self.attrs[key] = value
| Alkalit/silk | silk/config.py | Python | mit | 1,268 |
# -*- coding: utf-8 -*-
import django_dynamic_fixture as fixture
from unittest import mock
from django import urls
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.auth.models import User
from django.test import TestCase
from readthedocs.core.models import UserProfile
from readthedocs.projects.models import Project
class ProjectAdminActionsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.owner = fixture.get(User)
cls.profile = fixture.get(UserProfile, user=cls.owner, banned=False)
cls.admin = fixture.get(User, is_staff=True, is_superuser=True)
cls.project = fixture.get(
Project,
main_language_project=None,
users=[cls.owner],
)
def setUp(self):
self.client.force_login(self.admin)
def test_project_ban_owner(self):
self.assertFalse(self.owner.profile.banned)
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
'action': 'ban_owner',
'index': 0,
}
resp = self.client.post(
urls.reverse('admin:projects_project_changelist'),
action_data,
)
self.assertTrue(self.project.users.filter(profile__banned=True).exists())
self.assertFalse(self.project.users.filter(profile__banned=False).exists())
def test_project_ban_multiple_owners(self):
owner_b = fixture.get(User)
profile_b = fixture.get(UserProfile, user=owner_b, banned=False)
self.project.users.add(owner_b)
self.assertFalse(self.owner.profile.banned)
self.assertFalse(owner_b.profile.banned)
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
'action': 'ban_owner',
'index': 0,
}
resp = self.client.post(
urls.reverse('admin:projects_project_changelist'),
action_data,
)
self.assertFalse(self.project.users.filter(profile__banned=True).exists())
self.assertEqual(self.project.users.filter(profile__banned=False).count(), 2)
@mock.patch('readthedocs.projects.admin.clean_project_resources')
def test_project_delete(self, clean_project_resources):
"""Test project and artifacts are removed."""
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
'action': 'delete_selected',
'index': 0,
'post': 'yes',
}
resp = self.client.post(
urls.reverse('admin:projects_project_changelist'),
action_data,
)
self.assertFalse(Project.objects.filter(pk=self.project.pk).exists())
clean_project_resources.assert_has_calls([
mock.call(
self.project,
),
])
| rtfd/readthedocs.org | readthedocs/rtd_tests/tests/projects/test_admin_actions.py | Python | mit | 2,812 |
class Base(object):
def meth(self):
pass
class Derived1(Base):
def meth(self):
return super().meth()
class Derived2(Derived1):
def meth(self):
return super().meth()
class Derived3(Derived1):
pass
class Derived4(Derived3, Derived2):
def meth(self):
return super().meth()
class Derived5(Derived1):
def meth(self):
return super().meth()
class Derived6(Derived5, Derived2):
def meth(self):
return super().meth()
| github/codeql | python/ql/test/3/library-tests/PointsTo/inheritance/test.py | Python | mit | 496 |
# -*- coding: utf-8 -*-
import sys
def print_progress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
copied from: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
With slight adjustment so that it allows just one iteration (total = 0)
"""
formatStr = "{0:." + str(decimals) + "f}"
percent = formatStr.format(100 * (iteration / float(total))) if not total == 0 else formatStr.format(100)
filledLength = int(round(barLength * iteration / float(total))) if not total == 0 else int(round(barLength))
bar = '█' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush() | DawudH/scrapy_real-estate | plot/print_progressbar.py | Python | mit | 1,262 |
import os
from conan.tools.files.files import save_toolchain_args
from conan.tools.gnu import Autotools
from conans.test.utils.mocks import ConanFileMock
from conans.test.utils.test_files import temp_folder
def test_source_folder_works():
folder = temp_folder()
os.chdir(folder)
save_toolchain_args({
"configure_args": "-foo bar",
"make_args": ""}
)
conanfile = ConanFileMock()
conanfile.folders.set_base_install(folder)
sources = "/path/to/sources"
conanfile.folders.set_base_source(sources)
autotools = Autotools(conanfile)
autotools.configure(build_script_folder="subfolder")
assert conanfile.command.replace("\\", "/") == '"/path/to/sources/subfolder/configure" -foo bar'
autotools.configure()
assert conanfile.command.replace("\\", "/") == '"/path/to/sources/configure" -foo bar'
| conan-io/conan | conans/test/unittests/tools/gnu/autotools_test.py | Python | mit | 857 |
# -*- coding: utf-8 -*-
import sys
def diff(a,b):
return compareTree(a, b)
def getType(a):
if isinstance(a, dict):
return 'object'
elif isinstance(a, list):
return 'array'
elif isinstance(a, str):
return 'string'
elif isinstance(a, int):
return 'number'
elif isinstance(a, bool):
return 'boolean'
return 'null'
def compareTree(a, b):
diff = {'_sys_diff':{}}
a_type = getType(a)
b_type = getType(b)
if a_type == b_type:
if a_type == 'object':
return compareTree_dict(a, b)
elif a_type == 'array':
return compareTree_list(a, b)
else:
if a == b:
return a
else:
return {'+':a,'-':b}
else:
return {'+':a,'-':b}
def compareTree_dict(a, b):
diff = {'_sys_diff':{}}
for key in a:
diff[key] = None
for key in b:
diff[key] = None
for key in diff:
if key in a and key in b:
diff[key] = compareTree(a[key], b[key])
elif key in a:
diff['_sys_diff'][key] = '+'
diff[key] = a[key]
elif key in b:
diff['_sys_diff'][key] = '-'
diff[key] = b[key]
#else:
#print 'error ' + key
return diff
def compareTree_list(a, b):
diff = []
for i in a:
diff.append(i)
for i in b:
if i in diff:
pass
else:
diff.append(i)
return diff
def merge_part(a, b):
if isinstance(a, str):
return a
if isinstance(a, int):
return a
if isinstance(a, bool):
return a
if isinstance(a, dict):
pass
if isinstance(a, list):
return a
result = {}
keys = {}
for key in a:
keys[key] = None
for key in b:
keys[key] = None
for key in keys:
if key == '_sys_diff':
continue
#if key[0:4] == '_sys':
# continue
state = 0
if a.has_key('_sys_diff') and key in a['_sys_diff']:
if a['_sys_diff'][key] == '+':
state = 0
elif a['_sys_diff'][key] == '-':
state = 1
else:
state = 2
if b.has_key('_sys_diff') and key in b['_sys_diff']:
if b['_sys_diff'][key] == '+':
state += 3 * 0
elif b['_sys_diff'][key] == '-':
state += 3 * 1
else:
state += 3 * 2
if state == 0:
#
#a=+ b=+
result[key] = merge_part(a[key], b[key])
elif state == 1:
#- +
pass
elif state == 2:
#none +
result[key] = b[key]
elif state == 3:
#+ -
pass
elif state == 4:
#- -
pass
elif state == 5:
#none -
pass
elif state == 6:
#+ none
result[key] = a[key]
elif state == 7:
#- none
pass
elif state == 8:
#none none
if a.has_key(key) and b.has_key(key):
result[key] = merge_part(a[key], b[key])
elif a.has_key(key):
result[key] = a[key]
elif b.has_key(key):
result[key] = b[key]
elif state == 9:
pass
return result
def clean_sys_diff(a):
if isinstance(a, dict):
if a.has_key('_sys_diff'):
del a['_sys_diff']
for key in a:
clean_sys_diff(a[str(key)])
def merge(src, dest, base):
src_diff = diff(src, base)
dest_diff = diff(dest, base)
merged_model = merge_part(src_diff, dest_diff)
clean_sys_diff(merged_model)
return merged_model
| syuhei176/json-mergepy | jsonutil.py | Python | mit | 3,813 |
#!/usr/bin/env python3
# Copyright (c) 2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import PlanbcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(PlanbcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, planbcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let planbcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older planbcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-planbcoind> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active planbcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.nodes[node_id] = self.start_node(node_id, self.options.tmpdir)
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| planbcoin/planbcoin | test/functional/p2p-segwit.py | Python | mit | 90,174 |
# -*- coding: utf-8 -*-
"""
Decorators module
"""
import logging
from docktors.core import decorated
from docktors.wdocker import DockerContainer
logger = logging.getLogger(__name__)
def docker(func=None, **kwargs):
"""
Decorator to startup and shutdown a docker container.
:param image: The name of the image to use.
:param command: The input docker command to run,
:param ports: The ports bindings to made
:param volumes: The volumes to mount
:param environment: The environment value
:param wait_for_log: A string to wait in the logs before going into the function
:param wait_for_port: A string to wait before going into the function
:param kill_signal: If you want to kill the container, the signal to use. Otherwise, only a stop will be made.
:param func: the function to be decorated
:return: the decorated function
"""
docker_container = DockerContainer(**kwargs)
# Decorator in variable assignment : function is undefined
if func is None:
def decorator(func): # pylint: disable=locally-disabled, missing-docstring
return decorated(docker_container, func)
return decorator
return decorated(docker_container, func)
| Patouche/pydocktors | docktors/decorators.py | Python | mit | 1,226 |
import json
class JSONRenderer(object):
def render(self, data):
return json.dumps(data)
| Quantify-world/apification | src/apification/renderers.py | Python | mit | 101 |
from django.apps import AppConfig
class MemosConfig(AppConfig):
name = 'memos'
| a-kirin/Dockerfiles | sample01/web/sample01/memos/apps.py | Python | mit | 85 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class(object):
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.im_self == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.im_func(obj))
def test_functions_have_attributes(self):
self.assertEqual(31, len(dir(function)))
self.assertEqual(True, dir(function) == dir(Class.method.im_func))
def test_bound_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(23, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
try:
obj.method.cherries = 3
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.im_func.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
try:
cls = function2.get_fruit.im_self
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
# ------------------------------------------------------------------
class BoundClass(object):
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', bound_obj.__class__.__name__)
self.assertEqual('AboutMethodBindings', binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor(object):
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
| jpvantuyl/python_koans | python2/koans/about_method_bindings.py | Python | mit | 2,996 |
#
# (c) Simon Marlow 2002
#
import sys
import os
import string
import getopt
import platform
import time
import re
from testutil import *
from testglobals import *
# Readline sometimes spews out ANSI escapes for some values of TERM,
# which result in test failures. Thus set TERM to a nice, simple, safe
# value.
os.environ['TERM'] = 'vt100'
if sys.platform == "cygwin":
cygwin = True
else:
cygwin = False
global config
config = getConfig() # get it from testglobals
# -----------------------------------------------------------------------------
# cmd-line options
long_options = [
"config=", # config file
"rootdir=", # root of tree containing tests (default: .)
"output-summary=", # file in which to save the (human-readable) summary
"only=", # just this test (can be give multiple --only= flags)
"way=", # just this way
"skipway=", # skip this way
"threads=", # threads to run simultaneously
]
opts, args = getopt.getopt(sys.argv[1:], "e:", long_options)
for opt,arg in opts:
if opt == '--config':
execfile(arg)
# -e is a string to execute from the command line. For example:
# testframe -e 'config.compiler=ghc-5.04'
if opt == '-e':
exec arg
if opt == '--rootdir':
config.rootdirs.append(arg)
if opt == '--output-summary':
config.output_summary = arg
if opt == '--only':
config.only.append(arg)
if opt == '--way':
if (arg not in config.run_ways and arg not in config.compile_ways and arg not in config.other_ways):
sys.stderr.write("ERROR: requested way \'" +
arg + "\' does not exist\n")
sys.exit(1)
config.cmdline_ways = [arg] + config.cmdline_ways
if (arg in config.other_ways):
config.run_ways = [arg] + config.run_ways
config.compile_ways = [arg] + config.compile_ways
if opt == '--skipway':
if (arg not in config.run_ways and arg not in config.compile_ways and arg not in config.other_ways):
sys.stderr.write("ERROR: requested way \'" +
arg + "\' does not exist\n")
sys.exit(1)
config.other_ways = filter(neq(arg), config.other_ways)
config.run_ways = filter(neq(arg), config.run_ways)
config.compile_ways = filter(neq(arg), config.compile_ways)
if opt == '--threads':
config.threads = int(arg)
config.use_threads = 1
if config.use_threads == 1:
# Trac #1558 says threads don't work in python 2.4.4, but do
# in 2.5.2. Probably >= 2.5 is sufficient, but let's be
# conservative here.
# Some versions of python have things like '1c1' for some of
# these components (see trac #3091), but int() chokes on the
# 'c1', so we drop it.
(maj, min, pat) = platform.python_version_tuple()
# We wrap maj, min, and pat in str() to work around a bug in python
# 2.6.1
maj = int(re.sub('[^0-9].*', '', str(maj)))
min = int(re.sub('[^0-9].*', '', str(min)))
pat = int(re.sub('[^0-9].*', '', str(pat)))
if (maj, min, pat) < (2, 5, 2):
print "Warning: Ignoring request to use threads as python version < 2.5.2"
config.use_threads = 0
if windows:
print "Warning: Ignoring request to use threads as running on Windows"
config.use_threads = 0
# Try to use UTF8
if windows:
import ctypes
if cygwin:
# Is this actually right? Which calling convention does it use?
# As of the time of writing, ctypes.windll doesn't exist in the
# cygwin python, anyway.
mydll = ctypes.cdll
else:
mydll = ctypes.windll
# This actually leaves the terminal in codepage 65001 (UTF8) even
# after python terminates. We ought really remember the old codepage
# and set it back.
if mydll.kernel32.SetConsoleCP(65001) == 0:
raise Exception("Failure calling SetConsoleCP(65001)")
if mydll.kernel32.SetConsoleOutputCP(65001) == 0:
raise Exception("Failure calling SetConsoleOutputCP(65001)")
else:
# Try and find a utf8 locale to use
# First see if we already have a UTF8 locale
h = os.popen('locale | grep LC_CTYPE | grep -i utf', 'r')
v = h.read()
h.close()
if v == '':
# We don't, so now see if 'locale -a' works
h = os.popen('locale -a', 'r')
v = h.read()
h.close()
if v != '':
# If it does then use the first utf8 locale that is available
h = os.popen('locale -a | grep -i "utf8\|utf-8" 2>/dev/null', 'r')
v = h.readline().strip()
h.close()
if v != '':
os.environ['LC_ALL'] = v
print "setting LC_ALL to", v
else:
print 'WARNING: No UTF8 locale found.'
print 'You may get some spurious test failures.'
# This has to come after arg parsing as the args can change the compiler
get_compiler_info()
# Can't import this earlier as we need to know if threading will be
# enabled or not
from testlib import *
# On Windows we need to set $PATH to include the paths to all the DLLs
# in order for the dynamic library tests to work.
if windows or darwin:
pkginfo = getStdout([config.ghc_pkg, 'dump'])
topdir = re.sub('\\\\','/',getStdout([config.compiler, '--print-libdir'])).rstrip()
for line in pkginfo.split('\n'):
if line.startswith('library-dirs:'):
path = line.rstrip()
path = re.sub('^library-dirs: ', '', path)
path = re.sub('\\$topdir', topdir, path)
if path.startswith('"'):
path = re.sub('^"(.*)"$', '\\1', path)
path = re.sub('\\\\(.)', '\\1', path)
if windows:
if cygwin:
# On cygwin we can't put "c:\foo" in $PATH, as : is a
# field separator. So convert to /cygdrive/c/foo instead.
# Other pythons use ; as the separator, so no problem.
path = re.sub('([a-zA-Z]):', '/cygdrive/\\1', path)
path = re.sub('\\\\', '/', path)
os.environ['PATH'] = os.pathsep.join([path, os.environ.get("PATH", "")])
else:
# darwin
os.environ['DYLD_LIBRARY_PATH'] = os.pathsep.join([path, os.environ.get("DYLD_LIBRARY_PATH", "")])
global testopts_local
testopts_local.x = TestOptions()
global thisdir_testopts
thisdir_testopts = getThisDirTestOpts()
if config.use_threads:
t.lock = threading.Lock()
t.thread_pool = threading.Condition(t.lock)
t.running_threads = 0
# if timeout == -1 then we try to calculate a sensible value
if config.timeout == -1:
config.timeout = int(read_no_crs(config.top + '/timeout/calibrate.out'))
print 'Timeout is ' + str(config.timeout)
# -----------------------------------------------------------------------------
# The main dude
if config.rootdirs == []:
config.rootdirs = ['.']
t_files = findTFiles(config.rootdirs)
print 'Found', len(t_files), '.T files...'
t = getTestRun()
# Avoid cmd.exe built-in 'date' command on Windows
if not windows:
t.start_time = chop(os.popen('date').read())
else:
t.start_time = 'now'
print 'Beginning test run at', t.start_time
# set stdout to unbuffered (is this the best way to do it?)
sys.stdout.flush()
sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w", 0)
# First collect all the tests to be run
for file in t_files:
print '====> Scanning', file
newTestDir(os.path.dirname(file))
try:
execfile(file)
except:
print '*** framework failure: found an error while executing ', file, ':'
t.n_framework_failures = t.n_framework_failures + 1
traceback.print_exc()
# Now run all the tests
if config.use_threads:
t.running_threads=0
for oneTest in allTests:
oneTest()
if config.use_threads:
t.thread_pool.acquire()
while t.running_threads>0:
t.thread_pool.wait()
t.thread_pool.release()
summary(t, sys.stdout)
if config.output_summary != '':
summary(t, open(config.output_summary, 'w'))
sys.exit(0)
| iliastsi/gac | testsuite/driver/runtests.py | Python | mit | 8,165 |
"""
These settings are used by the ``manage.py`` command.
With normal tests we want to use the fastest possible way which is an
in-memory sqlite database but if you want to create South migrations you
need a persistant database.
Unfortunately there seems to be an issue with either South or syncdb so that
defining two routers ("default" and "south") does not work.
"""
from cmsplugin_redirect.tests.test_settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
INSTALLED_APPS.append('south', )
| bitmazk/cmsplugin-redirect | cmsplugin_redirect/tests/south_settings.py | Python | mit | 586 |
import re
from .base import EventBuilder
from .._misc import utils
from .. import _tl
from ..types import _custom
class NewMessage(EventBuilder, _custom.Message):
"""
Represents the event of a new message. This event can be treated
to all effects as a `Message <telethon.tl.custom.message.Message>`,
so please **refer to its documentation** to know what you can do
with this event.
Members:
message (`Message <telethon.tl.custom.message.Message>`):
This is the only difference with the received
`Message <telethon.tl.custom.message.Message>`, and will
return the `telethon.tl.custom.message.Message` itself,
not the text.
See `Message <telethon.tl.custom.message.Message>` for
the rest of available members and methods.
pattern_match (`obj`):
The resulting object from calling the passed ``pattern`` function.
Here's an example using a string (defaults to regex match):
>>> from telethon import TelegramClient, events
>>> client = TelegramClient(...)
>>>
>>> @client.on(events.NewMessage(pattern=r'hi (\\w+)!'))
... async def handler(event):
... # In this case, the result is a ``Match`` object
... # since the `str` pattern was converted into
... # the ``re.compile(pattern).match`` function.
... print('Welcomed', event.pattern_match.group(1))
...
>>>
Example
.. code-block:: python
import asyncio
from telethon import events
@client.on(events.NewMessage(pattern='(?i)hello.+'))
async def handler(event):
# Respond whenever someone says "Hello" and something else
await event.reply('Hey!')
@client.on(events.NewMessage(outgoing=True, pattern='!ping'))
async def handler(event):
# Say "!pong" whenever you send "!ping", then delete both messages
m = await event.respond('!pong')
await asyncio.sleep(5)
await client.delete_messages(event.chat_id, [event.id, m.id])
"""
@classmethod
def _build(cls, client, update, entities):
if isinstance(update,
(_tl.UpdateNewMessage, _tl.UpdateNewChannelMessage)):
if not isinstance(update.message, _tl.Message):
return # We don't care about MessageService's here
msg = update.message
elif isinstance(update, _tl.UpdateShortMessage):
msg = _tl.Message(
out=update.out,
mentioned=update.mentioned,
media_unread=update.media_unread,
silent=update.silent,
id=update.id,
peer_id=_tl.PeerUser(update.user_id),
from_id=_tl.PeerUser(self_id if update.out else update.user_id),
message=update.message,
date=update.date,
fwd_from=update.fwd_from,
via_bot_id=update.via_bot_id,
reply_to=update.reply_to,
entities=update.entities,
ttl_period=update.ttl_period
)
elif isinstance(update, _tl.UpdateShortChatMessage):
msg = _tl.Message(
out=update.out,
mentioned=update.mentioned,
media_unread=update.media_unread,
silent=update.silent,
id=update.id,
from_id=_tl.PeerUser(self_id if update.out else update.from_id),
peer_id=_tl.PeerChat(update.chat_id),
message=update.message,
date=update.date,
fwd_from=update.fwd_from,
via_bot_id=update.via_bot_id,
reply_to=update.reply_to,
entities=update.entities,
ttl_period=update.ttl_period
)
else:
return
return cls._new(client, msg, entities, None)
| LonamiWebs/Telethon | telethon/_events/newmessage.py | Python | mit | 4,114 |
from __future__ import unicode_literals
import django
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, Client
from django.test.client import RequestFactory
from django.test.utils import override_settings
from custard.conf import (CUSTOM_TYPE_TEXT, CUSTOM_TYPE_INTEGER,
CUSTOM_TYPE_BOOLEAN, CUSTOM_TYPE_FLOAT,
CUSTOM_TYPE_DATE, CUSTOM_TYPE_DATETIME,
CUSTOM_TYPE_TIME, settings)
from custard.builder import CustomFieldsBuilder
from custard.utils import import_class
from .models import (SimpleModelWithManager, SimpleModelWithoutManager,
CustomFieldsModel, CustomValuesModel, builder)
#==============================================================================
class SimpleModelWithManagerForm(builder.create_modelform()):
class Meta:
model = SimpleModelWithManager
fields = '__all__'
#class ExampleAdmin(admin.ModelAdmin):
# form = ExampleForm
# search_fields = ('name',)
#
# def get_search_results(self, request, queryset, search_term):
# queryset, use_distinct = super(ExampleAdmin, self).get_search_results(request, queryset, search_term)
# queryset |= self.model.objects.search(search_term)
# return queryset, use_distinct
#
# admin.site.register(Example, ExampleAdmin)
#==============================================================================
class CustomModelsTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.simple_with_manager_ct = ContentType.objects.get_for_model(SimpleModelWithManager)
self.simple_without_manager_ct = ContentType.objects.get_for_model(SimpleModelWithoutManager)
self.cf = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='text_field',
label="Text field",
data_type=CUSTOM_TYPE_TEXT)
self.cf.save()
self.cf2 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='another_text_field',
label="Text field 2",
data_type=CUSTOM_TYPE_TEXT,
required=True,
searchable=False)
self.cf2.clean()
self.cf2.save()
self.cf3 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='int_field', label="Integer field",
data_type=CUSTOM_TYPE_INTEGER)
self.cf3.save()
self.cf4 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='boolean_field', label="Boolean field",
data_type=CUSTOM_TYPE_BOOLEAN)
self.cf4.save()
self.cf5 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='float_field', label="Float field",
data_type=CUSTOM_TYPE_FLOAT)
self.cf5.save()
self.cf6 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='date_field', label="Date field",
data_type=CUSTOM_TYPE_DATE)
self.cf6.save()
self.cf7 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='datetime_field', label="Datetime field",
data_type=CUSTOM_TYPE_DATETIME)
self.cf7.save()
self.cf8 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='time_field', label="Time field",
data_type=CUSTOM_TYPE_TIME)
self.cf8.save()
self.obj = SimpleModelWithManager.objects.create(name='old test')
self.obj.save()
def tearDown(self):
CustomFieldsModel.objects.all().delete()
def test_import_class(self):
self.assertEqual(import_class('custard.builder.CustomFieldsBuilder'), CustomFieldsBuilder)
def test_model_repr(self):
self.assertEqual(repr(self.cf), "<CustomFieldsModel: text_field>")
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="abcdefg")
val.save()
self.assertEqual(repr(val), "<CustomValuesModel: text_field: abcdefg>")
@override_settings(CUSTOM_CONTENT_TYPES=['simplemodelwithmanager'])
def test_field_creation(self):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel',
'tests.CustomValuesModel',
settings.CUSTOM_CONTENT_TYPES)
class TestCustomFieldsModel(builder2.create_fields()):
class Meta:
app_label = 'tests'
self.assertQuerysetEqual(ContentType.objects.filter(builder2.content_types_query),
ContentType.objects.filter(Q(name__in=['simplemodelwithmanager'])))
def test_mixin(self):
self.assertIn(self.cf, self.obj.get_custom_fields())
self.assertIn(self.cf, SimpleModelWithManager.get_model_custom_fields())
self.assertEqual(self.cf, self.obj.get_custom_field('text_field'))
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="123456")
val.save()
self.assertEqual("123456", self.obj.get_custom_value('text_field'))
self.obj.set_custom_value('text_field', "abcdefg")
self.assertEqual("abcdefg", self.obj.get_custom_value('text_field'))
val.delete()
def test_field_model_clean(self):
cf = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='another_text_field',
label="Text field already present",
data_type=CUSTOM_TYPE_INTEGER)
with self.assertRaises(ValidationError):
cf.full_clean()
cf = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='name',
label="Text field already in model",
data_type=CUSTOM_TYPE_TEXT)
with self.assertRaises(ValidationError):
cf.full_clean()
def test_value_model_clean(self):
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk)
val.value = "qwertyuiop"
val.save()
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk)
val.value = "qwertyuiop"
with self.assertRaises(ValidationError):
val.full_clean()
def test_value_creation(self):
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="qwertyuiop")
val.save()
self.assertEqual(val.content_type, self.simple_with_manager_ct)
self.assertEqual(val.content_type, val.custom_field.content_type)
self.assertEqual(val.value_text, "qwertyuiop")
self.assertEqual(val.value, "qwertyuiop")
def test_value_search(self):
newobj = SimpleModelWithManager.objects.create(name='new simple')
newobj.save()
v1 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="qwertyuiop")
v1.save()
v2 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=newobj.pk,
value="qwertyuiop")
v2.save()
v3 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=newobj.pk,
value="000asdf123")
v3.save()
qs1 = SimpleModelWithManager.objects.search("asdf")
self.assertQuerysetEqual(qs1, [repr(newobj)])
qs2 = SimpleModelWithManager.objects.search("qwerty")
self.assertQuerysetEqual(qs2, [repr(self.obj), repr(newobj)], ordered=False)
def test_value_search_not_searchable_field(self):
v1 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="12345")
v1.save()
v2 = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk,
value="67890")
v2.save()
qs1 = SimpleModelWithManager.objects.search("12345")
self.assertQuerysetEqual(qs1, [repr(self.obj)])
qs2 = SimpleModelWithManager.objects.search("67890")
self.assertQuerysetEqual(qs2, [])
def test_get_formfield_for_field(self):
with self.settings(CUSTOM_FIELD_TYPES={CUSTOM_TYPE_TEXT: 'django.forms.fields.EmailField'}):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel', 'tests.CustomValuesModel')
class SimpleModelWithManagerForm2(builder2.create_modelform(field_types=settings.CUSTOM_FIELD_TYPES)):
class Meta:
model = SimpleModelWithManager
fields = '__all__'
form = SimpleModelWithManagerForm2(data={}, instance=self.obj)
self.assertIsNotNone(form.get_formfield_for_field(self.cf))
self.assertEqual(django.forms.fields.EmailField, form.get_formfield_for_field(self.cf).__class__)
def test_get_widget_for_field(self):
with self.settings(CUSTOM_WIDGET_TYPES={CUSTOM_TYPE_TEXT: 'django.forms.widgets.CheckboxInput'}):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel', 'tests.CustomValuesModel')
class SimpleModelWithManagerForm2(builder2.create_modelform(widget_types=settings.CUSTOM_WIDGET_TYPES)):
class Meta:
fields = '__all__'
model = SimpleModelWithManager
form = SimpleModelWithManagerForm2(data={}, instance=self.obj)
self.assertIsNotNone(form.get_widget_for_field(self.cf))
self.assertEqual(django.forms.widgets.CheckboxInput, form.get_widget_for_field(self.cf).__class__)
def test_form(self):
class TestForm(builder.create_modelform()):
custom_name = 'My Custom Fields'
custom_description = 'Edit the Example custom fields here'
custom_classes = 'zzzap-class'
class Meta:
fields = '__all__'
model = SimpleModelWithManager
request = self.factory.post('/', { 'text_field': '123' })
form = TestForm(request.POST, instance=self.obj)
self.assertFalse(form.is_valid())
self.assertIn('another_text_field', form.errors)
self.assertRaises(ValueError, lambda: form.save())
request = self.factory.post('/', { 'id': self.obj.pk,
'name': 'xxx',
'another_text_field': 'wwwzzzyyyxxx' })
form = TestForm(request.POST, instance=self.obj)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.obj.get_custom_value('another_text_field'), 'wwwzzzyyyxxx')
self.assertEqual(self.obj.name, 'xxx')
#self.assertInHTML(TestForm.custom_name, form.as_p())
#self.assertInHTML(TestForm.custom_description, form.as_p())
#self.assertInHTML(TestForm.custom_classes, form.as_p())
def test_admin(self):
modeladmin_class = builder.create_modeladmin()
#c = Client()
#if c.login(username='fred', password='secret'):
# response = c.get('/admin/', follow=True)
# print(response) | quamilek/django-custard | custard/tests/test.py | Python | mit | 13,067 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""zigzi, Platform independent binary instrumentation module.
Copyright (c) 2016-2017 hanbum park <[email protected]>
All rights reserved.
For detailed copyright information see the file COPYING in the root of the
distribution archive.
"""
import argparse
from PEInstrument import *
from PEAnalyzeTool import *
from PEManager import *
from keystone import *
from DataSegment import *
from SampleReturnVerifier import *
from WindowAPIHelper import *
code_rva = 0
def simple_return_address_save_function():
global code_rva
allocation = pe_instrument.falloc(0x1000)
code = ("push eax;push ebx;" # save register
"mov eax, [{0}];" # get shadow stack counter
"inc eax;" # increase shadow stack counter
"" # get return address from stack
"mov [{0}], eax;" # save return address
"pop ebx;pop eax;" # restore register
"ret;" # return
).format(allocation.get_va() + 4)
code_rva = pe_instrument.append_code(code)
code_abs_va = pe_manager.get_abs_va_from_rva(code_rva)
allocation[0:4] = code_abs_va
# TODO : need a way for does not calculate the relocation address directly.
pe_manager.register_rva_to_relocation(code_rva + 1 + 1)
pe_manager.register_rva_to_relocation(code_rva + 7 + 1)
def simple_indirect_branch_counting_function_call_instrument(instruction):
global code_rva
code_zero_rva = code_rva - 0x1000
instruction_zero_rva = instruction.address
# 5 mean instrumented code size.
code = "CALL {:d}".format(code_zero_rva - instruction_zero_rva + 5)
hex_code = binascii.hexlify(code).decode('hex')
try:
# Initialize engine in X86-32bit mode
ks = Ks(KS_ARCH_X86, KS_MODE_32)
encoding, count = ks.asm(hex_code)
return encoding, count
except KsError as ex:
print("ERROR: %s" % ex)
return None, 0
def simple_indirect_branch_counting_function_instrument():
global code_rva
allocation = pe_instrument.falloc(0x1000)
code = ("push eax;"
"mov eax, [{0}];"
"inc eax;"
"mov [{0}], eax;"
"pop eax;"
"ret;").format(allocation.get_va() + 4)
code_rva = pe_instrument.append_code(code)
code_abs_va = pe_manager.get_abs_va_from_rva(code_rva)
allocation[0:4] = code_abs_va
# TODO : need a way for does not calculate the relocation address directly.
pe_manager.register_rva_to_relocation(code_rva + 1 + 1)
pe_manager.register_rva_to_relocation(code_rva + 7 + 1)
def do_indirect_branch_counting():
simple_indirect_branch_counting_function_instrument()
pe_instrument.register_pre_indirect_branch(
simple_indirect_branch_counting_function_call_instrument
)
def do_return_address_verifier(pe_instrument, pe_manager, fn_rva):
simple_instrument_error_handler(pe_instrument, pe_manager, fn_rva)
pe_instrument.register_after_relative_branch(
simple_instrument_return_address_at_after_branch
)
pe_instrument.register_after_indirect_branch(
simple_instrument_return_address_at_after_branch
)
pe_instrument.register_pre_return(
simple_instrument_return_address_verifier_at_pre_return
)
pe_instrument.do_instrument()
if __name__ == '__main__':
parser = argparse.ArgumentParser("zigzi")
parser.add_argument("file",
help="filename include its absolute path.",
type=str)
args = parser.parse_args()
filename = args.file
if not os.path.isfile(filename):
parser.print_help()
exit()
pe_manager = PEManager(filename)
# add api
window_api_helper = WindowAPIHelper(pe_manager)
message_box_fn_rva = window_api_helper.add_message_box()
# set new instrumentation
pe_instrument = PEInstrument(pe_manager)
do_return_address_verifier(pe_instrument, pe_manager, message_box_fn_rva)
# do_indirect_branch_counting()
# TODO : change to avoid duplicate processing.
# do not double adjustment for file, it break file layout.
# pe_manager.adjust_file_layout()
output_filename = filename[:-4] + "_after_test.exe"
pe_manager.writefile(output_filename)
pe_instrument._save_instruction_log()
# C:\work\python\zigzi\tests\simple_echo_server.exe
| ParkHanbum/zigzi | __init__.py | Python | mit | 4,427 |
# -*- coding: utf-8 -*-
"""
Clement Michard (c) 2015
"""
import os
import sys
import nltk
from emotion import Emotion
from nltk.corpus import WordNetCorpusReader
import xml.etree.ElementTree as ET
class WNAffect:
"""WordNet-Affect ressource."""
def __init__(self, wordnet16_dir, wn_domains_dir):
"""Initializes the WordNet-Affect object."""
try:
cwd = os.getcwd()
nltk.data.path.append(cwd)
wn16_path = "{0}/dict".format(wordnet16_dir)
self.wn16 = WordNetCorpusReader(os.path.abspath("{0}/{1}".format(cwd, wn16_path)), nltk.data.find(wn16_path))
self.flat_pos = {'NN':'NN', 'NNS':'NN', 'JJ':'JJ', 'JJR':'JJ', 'JJS':'JJ', 'RB':'RB', 'RBR':'RB', 'RBS':'RB', 'VB':'VB', 'VBD':'VB', 'VGB':'VB', 'VBN':'VB', 'VBP':'VB', 'VBZ':'VB'}
self.wn_pos = {'NN':self.wn16.NOUN, 'JJ':self.wn16.ADJ, 'VB':self.wn16.VERB, 'RB':self.wn16.ADV}
self._load_emotions(wn_domains_dir)
self.synsets = self._load_synsets(wn_domains_dir)
except:
print "Please download the dependencies and re-run the script after installing them successfully. Exiting !"
exit()
def _load_synsets(self, wn_domains_dir):
"""Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str)."""
tree = ET.parse("{0}/wn-affect-1.1/a-synsets.xml".format(wn_domains_dir))
root = tree.getroot()
pos_map = { "noun": "NN", "adj": "JJ", "verb": "VB", "adv": "RB" }
synsets = {}
for pos in ["noun", "adj", "verb", "adv"]:
tag = pos_map[pos]
synsets[tag] = {}
for elem in root.findall(".//{0}-syn-list//{0}-syn".format(pos, pos)):
offset = int(elem.get("id")[2:])
if not offset: continue
if elem.get("categ"):
synsets[tag][offset] = Emotion.emotions[elem.get("categ")] if elem.get("categ") in Emotion.emotions else None
elif elem.get("noun-id"):
synsets[tag][offset] = synsets[pos_map["noun"]][int(elem.get("noun-id")[2:])]
return synsets
def _load_emotions(self, wn_domains_dir):
"""Loads the hierarchy of emotions from the WordNet-Affect xml."""
tree = ET.parse("{0}/wn-affect-1.1/a-hierarchy.xml".format(wn_domains_dir))
root = tree.getroot()
for elem in root.findall("categ"):
name = elem.get("name")
if name == "root":
Emotion.emotions["root"] = Emotion("root")
else:
Emotion.emotions[name] = Emotion(name, elem.get("isa"))
def get_emotion(self, word, pos):
"""Returns the emotion of the word.
word -- the word (str)
pos -- part-of-speech (str)
"""
if pos in self.flat_pos:
pos = self.flat_pos[pos]
synsets = self.wn16.synsets(word, self.wn_pos[pos])
if synsets:
offset = synsets[0].offset
if offset in self.synsets[pos]:
return self.synsets[pos][offset], offset
return None
def get_emotion_synset(self, offset):
"""Returns the emotion of the synset.
offset -- synset offset (int)
"""
for pos in self.flat_pos.values():
if offset in self.synsets[pos]:
return self.synsets[pos][offset]
return None
| Arnukk/TDS | wnaffect.py | Python | mit | 3,540 |
#!/usr/bin/env python
import cv2
import cv2.cv as cv
class Display:
def setup(self, fullscreen):
cv2.namedWindow('proj_0', cv2.WINDOW_OPENGL)
if fullscreen:
cv2.setWindowProperty('proj_0', cv2.WND_PROP_FULLSCREEN, cv.CV_WINDOW_FULLSCREEN)
def draw(self, image):
cv2.imshow('proj_0', image)
cv2.waitKey(1)
| light-swarm/lightswarm_render | scripts/display.py | Python | mit | 385 |
from typing import Iterable, Callable, Optional, Any, List, Iterator
from dupescan.fs._fileentry import FileEntry
from dupescan.fs._root import Root
from dupescan.types import AnyPath
FSPredicate = Callable[[FileEntry], bool]
ErrorHandler = Callable[[EnvironmentError], Any]
def catch_filter(inner_filter: FSPredicate, error_handler_func: ErrorHandler) -> FSPredicate:
# If no filter function provided, return one that includes everything. In
# this case it will never raise an error, so error_handler_func doesn't get
# a look-in here
if inner_filter is None:
def always_true(*args, **kwargs):
return True
return always_true
# Otherwise if the filter function throws an EnvironmentError, pass it to
# the error_handler_func (if provided) and return false
def wrapped_func(*args, **kwargs):
try:
return inner_filter(*args, **kwargs)
except EnvironmentError as env_error:
if error_handler_func is not None:
error_handler_func(env_error)
return False
return wrapped_func
def noerror(_):
pass
class Walker(object):
def __init__(
self,
recursive: bool,
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
):
self._recursive = bool(recursive)
self._onerror = noerror if onerror is None else onerror
self._dir_filter = catch_filter(dir_object_filter, self._onerror)
self._file_filter = catch_filter(file_object_filter, self._onerror)
def __call__(self, paths: Iterable[AnyPath]) -> Iterator[FileEntry]:
for root_index, root_path in enumerate(paths):
root_spec = Root(root_path, root_index)
try:
root_obj = FileEntry.from_path(root_path, root_spec)
except EnvironmentError as env_error:
self._onerror(env_error)
continue
if root_obj.is_dir and self._dir_filter(root_obj):
if self._recursive:
yield from self._recurse_dir(root_obj)
else:
yield root_obj
elif root_obj.is_file and self._file_filter(root_obj):
yield root_obj
def _recurse_dir(self, root_obj: FileEntry):
dir_obj_q: List[FileEntry] = [ root_obj ]
next_dirs: List[FileEntry] = [ ]
while len(dir_obj_q) > 0:
dir_obj = dir_obj_q.pop()
next_dirs.clear()
try:
for child_obj in dir_obj.dir_content():
try:
if (
child_obj.is_dir and
not child_obj.is_symlink and
self._dir_filter(child_obj)
):
next_dirs.append(child_obj)
elif (
child_obj.is_file and
self._file_filter(child_obj)
):
yield child_obj
except EnvironmentError as query_error:
self._onerror(query_error)
except EnvironmentError as env_error:
self._onerror(env_error)
dir_obj_q.extend(reversed(next_dirs))
def flat_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(False, dir_object_filter, file_object_filter, onerror)(paths)
def recurse_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(True, dir_object_filter, file_object_filter, onerror)(paths)
| yellcorp/dupescan | dupescan/fs/_walker.py | Python | mit | 4,089 |
#!/usr/bin/env python
import sys
import os
import subprocess
import shutil
import fix_rocks_network
import json
pxelinux_kernels_dir='/tftpboot/pxelinux/';
centos7_templates_dir='./centos7_ks'
centos7_dir='/export/rocks/install/centos7/';
centos7_ks_scripts_dir=centos7_dir+'/scripts/';
centos7_pxeboot_dir=centos7_dir+'/images/pxeboot';
#Fix PXE boot bug
def fix_pxe_bug():
shutil.copy('/usr/share/syslinux/chain.c32', pxelinux_kernels_dir);
subprocess.call('rocks add bootaction action=os kernel="com32 chain.c32" args="hd0"', shell=True);
def fix_install_action():
shutil.copy(centos7_pxeboot_dir+'/vmlinuz', pxelinux_kernels_dir+'/vmlinuz-centos7');
shutil.copy(centos7_pxeboot_dir+'/initrd.img', pxelinux_kernels_dir+'/initrd.img-centos7');
ks_host = fix_rocks_network.get_rocks_attr('Kickstart_PrivateKickstartHost');
ks_base_dir = fix_rocks_network.get_rocks_attr('Kickstart_PrivateKickstartBasedir');
subprocess.call('rocks add bootaction action=install kernel=vmlinuz-centos7 ramdisk=initrd.img-centos7 args="ksdevice=bootif ramdisk_size=16000 ks=http://'+ks_host+'/'+ks_base_dir+'/centos7/ks.cfg rhgb quiet console=tty0 console=ttyS0,115200n8"', shell=True);
def setup_for_centos7(params):
if(not os.path.isdir(centos7_dir)):
sys.stderr.write('ERROR: the contents of a CentOS-7 iso must be unpacked in the directory: '+centos7_dir+'\n');
raise Exception('Missing directory containing CentOS-7 iso contents');
try:
os.mkdir(centos7_ks_scripts_dir, 0755);
except OSError:
pass
#PXE boot changes
fix_pxe_bug();
fix_install_action();
#ssh public key
shutil.rmtree(centos7_ks_scripts_dir+'/ssh_public_keys', ignore_errors=True);
try:
os.remove(centos7_ks_scripts_dir+'/ssh_public_keys');
except Exception:
pass
if('ssh_public_keys_file' in params):
shutil.copy(params['ssh_public_keys_file'], centos7_ks_scripts_dir+'/ssh_public_keys');
#Get root password
root_passwd='$6$CdGXnN6zABQ0Pc/7$lsUtU27wSxwpGNrLQq00Mzpwb27ujgkV5Trq8wlZrqOmrmFuX6q5X0hebNKKs5DSk8.fU3o.b6Z0ISOfNnpTl.';
sys.stderr.write('Enter the root password to be set for your cluster by kickstart\n');
pid = subprocess.Popen('grub-crypt --sha-512', shell=True, stdout=subprocess.PIPE);
stdout_str = pid.communicate()[0];
if(pid.returncode == 0):
root_passwd = stdout_str.strip();
else:
sys.stderr.write('ERROR: could not obtain root password, using a random string. Re-run the program to set your root passwd\n');
#Copy disk.py file for partitioning
shutil.copy(centos7_templates_dir+'/scripts/disk.py', centos7_ks_scripts_dir+'/disk.py');
#Create files from templates
shutil.copy(centos7_templates_dir+'/ks_template.cfg', centos7_dir+'/ks.cfg');
shutil.copy(centos7_templates_dir+'/scripts/pre_install_template.sh', centos7_ks_scripts_dir+'/pre_install.sh');
shutil.copy(centos7_templates_dir+'/scripts/post_install_template.sh', centos7_ks_scripts_dir+'/post_install.sh');
ks_host = fix_rocks_network.get_rocks_attr('Kickstart_PrivateKickstartHost');
ks_base_dir = fix_rocks_network.get_rocks_attr('Kickstart_PrivateKickstartBasedir');
cmd = 'sed -i -e \'s/Kickstart_PrivateKickstartHost/'+ks_host+'/g\' -e \'s/Kickstart_PrivateKickstartBasedir/'+ks_base_dir+'/g\' '+centos7_ks_scripts_dir+'/post_install.sh '+centos7_ks_scripts_dir+'/pre_install.sh '+centos7_dir+'/ks.cfg';
status = subprocess.call(cmd, shell=True);
if(status != 0):
sys.stderr.write('ERROR: could not setup pre/post install scripts and kickstart file\n');
raise Exception('Could not setup pre/post install scripts and kickstart file');
if('timezone' in params):
cmd = 'sed -i -e \'/^timezone/c\\\ntimezone '+params['timezone']+'\' '+centos7_dir+'/ks.cfg'
status = subprocess.call(cmd, shell=True);
if(status != 0):
sys.stderr.write('ERROR: could not setup timezone in kickstart file\n');
raise Exception('Could not setup timezone in kickstart file');
with open(centos7_dir+'/ks.cfg', 'ab') as fptr:
fptr.write('rootpw --iscrypted '+root_passwd+' \n');
fptr.close();
if __name__ == "__main__":
params = {};
if(len(sys.argv) >= 2):
with open(sys.argv[1], 'rb') as data_file:
params = json.load(data_file);
directory = os.path.dirname(sys.argv[0]);
if(directory and directory != ''):
os.chdir(directory);
setup_for_centos7(params);
| kgururaj/rocks-centos7 | setup_for_centos7.py | Python | mit | 4,371 |
# -*- coding: utf-8 -*-
import logging
from . import constants
logger = logging.getLogger(constants.NAME)
| dantezhu/yunbk | yunbk/log.py | Python | mit | 109 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Prints a summary of the contents of the IPHAS source catalogue.
"""
import os
from astropy.io import fits
from astropy import log
import numpy as np
import sys
from dr2 import constants
n_sources = 0
n_r20 = 0
n_reliable = 0
n_deblend = 0
n_reliable_deblend = 0
n_pair = 0
n_saturated = 0
n_brightNeighb = 0
path = os.path.join(constants.DESTINATION, 'concatenated', 'full')
for filename in os.listdir(path):
if filename.endswith('fits.gz'):
print filename
myfile = os.path.join(path, filename)
log.info(myfile)
f = fits.open(myfile)
#n_sources += f[1].header['NAXIS2']
n_sources += f[1].data['ra'].size
n_r20 += (f[1].data['r'] < 21).sum()
n_reliable += f[1].data['reliable'].sum()
n_reliable_deblend += (f[1].data['reliable'] & f[1].data['deblend']).sum()
n_deblend += f[1].data['deblend'].sum()
n_pair += (f[1].data['sourceID2'] != ' ').sum()
n_saturated += f[1].data['saturated'].sum()
n_brightNeighb += f[1].data['brightNeighb'].sum()
print "{0} sources so far".format(n_sources)
with open('summary.txt', 'w') as out:
out.write("#Unique sources: {0}\n".format(n_sources))
out.write("#Sources r < 21: {0}\n".format(n_r20))
out.write("#Reliable sources: {0}\n".format(n_reliable))
out.write("#Deblend sources: {0}\n".format(n_deblend))
out.write("#Reliable+deblend: {0}\n".format(n_reliable_deblend))
out.write("#Paired sources: {0}\n".format(n_pair))
out.write("#Saturated sources: {0}\n".format(n_saturated))
out.write("#Bright neighb sources: {0}\n".format(n_brightNeighb))
| barentsen/iphas-dr2 | scripts/summary.py | Python | mit | 1,682 |
# TODO implement a smoothing function perhaps
# (Ie only accept frames within x distance of previous frame) | swirlingsand/self-driving-car-nanodegree-nd013 | p4-CarND-Advanced-Lane-Lines/methods/laneDetection/qualityControl.py | Python | mit | 109 |
"""
WSGI config for asteria project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asteria.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| tunegoon/asteria | asteria/wsgi.py | Python | mit | 1,136 |
from copy import copy
import sys
from textwrap import dedent
import warnings
import logging
import numpy
from six.moves import xrange
import theano
from theano.compat import izip
from six import integer_types
from theano.gradient import DisconnectedType
from theano import gof
from theano.gof import Apply, Constant, hashtype, Op, Type, MethodNotDefined
from theano.printing import pprint
from theano import scalar as scal
from theano.tensor.basic import alloc
from theano.tensor.basic import (addbroadcast, clip, get_scalar_constant_value,
ARange, TensorType, NotScalarConstantError)
from theano.tensor.elemwise import DimShuffle
from theano.tensor.type_other import NoneConst, SliceType, make_slice
from theano import config
inplace_increment = None
if config.cxx:
import theano.gof.cutils # needed to import cutils_ext
try:
from cutils_ext.cutils_ext import inplace_increment
except ImportError:
pass
_logger = logging.getLogger("theano.tensor.subtensor")
# Do a lazy import of the sparse module
sparse_module_ref = None
class AdvancedIndexingError(TypeError):
"""
Raised when Subtensor is asked to perform advanced indexing.
"""
def __init__(self, *args):
TypeError.__init__(self, *args)
##########
# Helpful functions to deal with Subtensor and IncSubtensor
##########
def make_constant(args):
"""
Convert python litterals to theano constants in subtensor arguments.
"""
def conv(a):
if a is None:
return a
elif isinstance(a, slice):
return slice(conv(a.start),
conv(a.stop),
conv(a.step))
elif isinstance(a, (integer_types, numpy.integer)):
return scal.ScalarConstant(scal.int64, a)
else:
return a
return tuple(map(conv, args))
def get_idx_list(inputs, idx_list, get_count=False):
'''
Given a list of inputs to the subtensor and its idx_list reorders
the inputs according to the idx list to get the right values.
If get_counts=True, instead returns the number of inputs consumed
during this process.
'''
# The number of indices
n = len(inputs) - 1
# The subtensor (or idx_list) does not depend on the inputs.
if n == 0:
return tuple(idx_list)
indices = list(reversed(list(inputs[1:])))
# General case
def convert(entry):
if isinstance(entry, gof.Type):
return indices.pop()
elif isinstance(entry, slice):
return slice(convert(entry.start),
convert(entry.stop),
convert(entry.step))
else:
return entry
cdata = tuple(map(convert, idx_list))
if get_count:
return n - len(indices)
else:
return cdata
def get_canonical_form_slice(theslice, length):
'''
Given a slice [start:stop:step] transform it into a canonical form
that respects the conventions imposed by python and numpy.
In a canonical form a slice is represented by a canonical form slice,
in which 0 <= start <= stop <= length and step > 0, and a flag which says
if the resulting set of numbers needs to be reversed or not.
'''
from theano.tensor import switch, lt, ge, sgn
if isinstance(theslice, slice):
def analyze(x):
try:
x_constant = get_scalar_constant_value(x)
is_constant = True
except theano.tensor.NotScalarConstantError:
x_constant = theano.tensor.extract_constant(x)
is_constant = False
return x_constant, is_constant
start, is_start_constant = analyze(theslice.start)
stop, is_stop_constant = analyze(theslice.stop)
step, is_step_constant = analyze(theslice.step)
length, is_length_constant = analyze(length)
if step is None:
step = 1
is_step_constant = True
# First handle the easier and common case where `step` is 1 and
# either `start` or `stop` is a range boundary. More specializations
# could be added later. This makes the resulting graph smaller than
# in the generic case below.
if step == 1:
is_start_0 = (
start is None or start == 0 or
(is_start_constant and is_length_constant and
start < 0 and start + length <= 0))
is_stop_length = (
stop is None or stop in [length, sys.maxsize] or
(is_stop_constant and is_length_constant and
stop >= length))
if is_start_0:
# 0:stop:1
if is_stop_length:
# Full slice.
return slice(0, length, 1), 1
if is_stop_constant and stop >= 0:
return (slice(0, switch(lt(stop, length), stop, length),
1), 1)
stop_plus_len = stop + length
stop = switch(
lt(stop, 0),
# stop < 0
switch(
lt(stop_plus_len, 0),
# stop + len < 0
0,
# stop + len >= 0
stop_plus_len),
# stop >= 0: use min(stop, length)
switch(lt(stop, length), stop, length))
return slice(0, stop, 1), 1
elif is_stop_length:
# start:length:1
if is_start_constant and start >= 0:
return slice(switch(lt(start, length), start, length),
length, 1), 1
start_plus_len = start + length
start = switch(
lt(start, 0),
# start < 0
switch(
lt(start_plus_len, 0),
# start + len < 0
0,
# start + len >= 0
start_plus_len),
# start >= 0: use min(start, length)
switch(lt(start, length), start, length))
return slice(start, length, 1), 1
# This is the generic case.
if is_step_constant:
# When we know the sign of `step`, the graph can be made simpler.
assert step != 0
if step > 0:
def switch_neg_step(a, b):
return b
abs_step = step
sgn_step = 1
else:
def switch_neg_step(a, b):
return a
abs_step = -step
sgn_step = -1
else:
is_step_neg = lt(step, 0)
def switch_neg_step(a, b):
return switch(is_step_neg, a, b)
abs_step = abs(step)
sgn_step = sgn(step)
defstart = switch_neg_step(length - 1, 0)
defstop = switch_neg_step(-1, length)
if start is None:
start = defstart
else:
start = switch(lt(start, 0), start + length, start)
start = switch(lt(start, 0), switch_neg_step(-1, 0), start)
start = switch(ge(start, length),
switch_neg_step(length - 1, length),
start)
if stop is None or stop == sys.maxsize:
# The special "maxsize" case is probably not needed here,
# as slices containing maxsize are not generated by
# __getslice__ anymore.
stop = defstop
else:
stop = switch(lt(stop, 0), stop + length, stop)
stop = switch(lt(stop, 0), -1, stop)
stop = switch(ge(stop, length), length, stop)
nw_stop = switch_neg_step(start + 1, stop)
slice_len = (start - stop - 1) // abs_step + 1
slice_len = switch(lt(slice_len, 0), 0, slice_len)
neg_start = nw_stop - (slice_len - 1) * abs_step - 1
neg_start = switch(lt(neg_start, 0), (nw_stop - 1), neg_start)
nw_start = switch_neg_step(neg_start, start)
nw_start = switch(lt(nw_start, 0), 0, nw_start)
nw_stop = switch(lt(nw_stop, 0), 0, nw_stop)
# Ensure start <= stop.
nw_start = switch(lt(nw_start, nw_stop), nw_start, nw_stop)
nw_step = abs_step
if step != 1:
reverse = sgn_step
return slice(nw_start, nw_stop, nw_step), reverse
else:
return slice(nw_start, nw_stop, nw_step), 1
else:
value = theano.tensor.extract_constant(theslice)
value = switch(lt(value, 0), (value + length), value)
return value, 1
class Subtensor(Op):
"""Return a subtensor view
The inputs array is the tensor x, followed by scalar integer types.
TODO: WRITEME: how are the scalar integer variables formatted?
This class uses a relatively complex internal representation of the inputs
to remember how the input tensor x should be sliced.
idx_list: instance variable TODO: WRITEME: is this a list or a tuple?
(old docstring gives two conflicting
descriptions)
elements are either integers, theano scalar types, or slices.
one element per "explicitly named dimension"
TODO: WRITEME: what is an "explicitly named dimension" ?
if integer:
indexes into the inputs array
if slice:
start/stop/step members of each slice are integer indices
into the inputs array or None
integer indices be actual integers or theano scalar types
Note that the idx_list defines the Op, so two Subtensor instances are
considered to be different Ops if they have different idx_list fields.
This means that the entries in it are theano Types, not theano Variables.
@todo: add support for advanced tensor indexing (in Subtensor_dx too).
"""
e_invalid = ('The index list is longer (size %d) than the number of '
'dimensions of the tensor(namely %d). You are asking for '
'a dimension of the tensor that does not exist! You might '
'need to use dimshuffle to add extra dimension to your '
'tensor.')
e_subslice = 'nested slicing is not supported'
e_indextype = "Invalid index type or slice for Subtensor"
debug = 0
check_input = False
view_map = {0: [0]}
_f16_ok = True
__props__ = ("idx_list",)
@staticmethod
def collapse(idxs, cond):
"""
idxs: a list of indices or slices.
cond: a callable that returns a bool
returns: idxs, with the slices flattened out into a list.
if cond is true for an entry, does not flatten it.
"""
ret = []
def helper(entry):
if cond(entry):
ret.append(entry)
elif isinstance(entry, slice):
helper(entry.start)
helper(entry.stop)
helper(entry.step)
for idx in idxs:
helper(idx)
return ret
@staticmethod
def convert(entry, slice_ok=True):
"""
The "idx_list" field is unique to each Subtensor instance.
It is not unique to each Apply node, so it should not refer to
specific Variables. This method changes references to Variables
into references to Types.
TODO: WRITEME: This method also accepts "entry" already being a Type;
when would that happen?
"""
invalid_scal_types = [scal.float64, scal.float32, scal.float16]
scal_types = [scal.int64, scal.int32, scal.int16, scal.int8]
tensor_types = [theano.tensor.lscalar, theano.tensor.iscalar,
theano.tensor.wscalar, theano.tensor.bscalar]
invalid_tensor_types = [theano.tensor.fscalar, theano.tensor.dscalar,
theano.tensor.cscalar, theano.tensor.zscalar]
if (isinstance(entry, gof.Variable) and
(entry.type in invalid_scal_types or
entry.type in invalid_tensor_types)):
raise TypeError("Expected an integer")
if isinstance(entry, gof.Variable) and entry.type in scal_types:
return entry.type
elif isinstance(entry, gof.Type) and entry in scal_types:
return entry
if (isinstance(entry, gof.Variable) and
entry.type in tensor_types and
numpy.all(entry.type.broadcastable)):
return scal.get_scalar_type(entry.type.dtype)
elif (isinstance(entry, gof.Type) and
entry in tensor_types and
numpy.all(entry.broadcastable)):
return scal.get_scalar_type(entry.dtype)
elif slice_ok and isinstance(entry, slice):
a = entry.start
b = entry.stop
c = entry.step
if a is not None:
slice_a = Subtensor.convert(a, False)
else:
slice_a = None
if b is not None and b != sys.maxsize:
# The special "maxsize" case is probably not needed here,
# as slices containing maxsize are not generated by
# __getslice__ anymore.
slice_b = Subtensor.convert(b, False)
else:
slice_b = None
if c is not None:
slice_c = Subtensor.convert(c, False)
else:
slice_c = None
return slice(slice_a, slice_b, slice_c)
elif isinstance(entry, (integer_types, numpy.integer)):
# Disallow the use of python scalars in idx_list
raise TypeError("Python scalar in idx_list."
"Please report this error to theano-dev.")
else:
raise AdvancedIndexingError(Subtensor.e_indextype, entry)
def get_constant_idx(self, inputs, allow_partial=False,
only_process_constants=False):
"""
Return the idx_list with constant inputs replaced by their
python scalar equivalent. May raise
`theano.tensor.NotScalarConstantError` if the idx contains
non-constant entries.
If allow_partial is True, then entries that are not constant
will stay as their input variable rather than raising an
exception.
None entries are always left as-is.
Example usage (where v, a are appropriately typed theano variables):
>>> b = a[v, 1:3]
>>> b.owner.op.idx_list
(Scalar(int64), slice(Scalar(int64), Scalar(int64), None))
>>> b.owner.op.get_constant_idx(b.owner.inputs, allow_partial=True)
[v, slice(1, 3, None)]
>>> b.owner.op.get_constant_idx(b.owner.inputs)
NotScalarConstantError: v
:param only_process_constants: If True, we only attempt to obtain
the value of an index/slice if it's directly constant and don't
try to dig through dimshuffles, fills, allocs, and other to figure
out its value.
"""
real_idx = get_idx_list(inputs, self.idx_list)
def conv(val):
if val is None:
return None
elif isinstance(val, slice):
return slice(conv(val.start),
conv(val.stop),
conv(val.step))
else:
try:
return get_scalar_constant_value(
val,
only_process_constants=only_process_constants)
except theano.tensor.NotScalarConstantError:
if allow_partial:
return val
else:
raise
return list(map(conv, real_idx))
def __init__(self, idx_list):
self.idx_list = tuple(map(self.convert, idx_list))
@staticmethod
def my_as_scalar(a):
# Since scal.as_scalar does not know about tensor types (it would
# create a circular import) , this method converts either a
# TensorVariable or a ScalarVariable to a scalar.
if isinstance(a, gof.Variable) and isinstance(a.type, TensorType):
return theano.tensor.scalar_from_tensor(a)
else:
return scal.as_scalar(a)
def make_node(self, x, *inputs):
"""
x: the tensor to take a subtensor of
inputs: a list of theano Scalars
"""
x = theano.tensor.as_tensor_variable(x)
inputs = tuple(self.my_as_scalar(a) for a in inputs)
idx_list = list(self.idx_list)
if len(idx_list) > x.type.ndim:
exception = ValueError(Subtensor.e_invalid % (
len(idx_list), x.type.ndim))
exception.subtensor_invalid = True
raise exception
input_types = Subtensor.collapse(idx_list,
lambda entry: isinstance(entry,
gof.Type))
if len(inputs) != len(input_types):
raise IndexError(
"Not enough inputs to fill in the Subtensor template.",
inputs, idx_list)
for input, expected_type in izip(inputs, input_types):
if input.type != expected_type:
raise TypeError(
"Wrong type for Subtensor template. Expected %s, got %s."
% (input.type, expected_type))
# infer the broadcasting pattern
padded = (self.get_constant_idx((None,) + inputs, allow_partial=True) +
[slice(None, None, None)] * (x.type.ndim - len(idx_list)))
broadcastable = []
for i, (p, bc) in enumerate(izip(padded, x.type.broadcastable)):
if isinstance(p, slice):
if bc:
start = p.start
try:
start = get_scalar_constant_value(start)
except NotScalarConstantError:
pass
if start is None or start == 0:
start = p.start
if start is None:
start = 0
if (p.stop is None or
(isinstance(p.stop, (int, numpy.integer,
numpy.ndarray)) and
p.stop > start)):
broadcastable.append(True)
continue
broadcastable.append(False)
return gof.Apply(self,
(x, ) + inputs,
[theano.tensor.tensor(dtype=x.type.dtype,
broadcastable=broadcastable)])
def perform(self, node, inputs, out_):
out, = out_
x = inputs[0]
cdata = get_idx_list(inputs, self.idx_list)
if len(cdata) == 1:
cdata = cdata[0]
out[0] = numpy.asarray(x.__getitem__(cdata))
def infer_shape(self, node, shapes):
xshp = shapes[0]
assert len(xshp) == node.inputs[0].ndim
outshp = []
actual_idx_list = list(get_idx_list(node.inputs, self.idx_list))
padded = (actual_idx_list +
[slice(None, None, None)] * (len(xshp) - len(self.idx_list)))
i = 0
for idx, xl in izip(padded, xshp):
if isinstance(idx, slice):
# If it is the default (None, None, None) slice, or a variant,
# the shape will be xl
if ((idx.start in [None, 0]) and
(idx.stop in [None, sys.maxsize]) and
(idx.step is None or idx.step == 1)):
outshp.append(xl)
else:
cnf = get_canonical_form_slice(idx, xl)[0]
if cnf.step == 1:
length = cnf.stop - cnf.start
else:
length = (cnf.stop - cnf.start - 1) // cnf.step + 1
outshp.append(length)
i += 1
else:
# That dimension is dropped
pass
assert i == node.outputs[0].ndim
assert len(outshp) == node.outputs[0].ndim
return [outshp]
def grad(self, inputs, grads):
gz, = grads
x = inputs[0]
rest = inputs[1:]
output = self(*inputs)
if output.dtype.find('int') != -1:
first = x.zeros_like().astype(theano.config.floatX)
else:
first = IncSubtensor(self.idx_list)(x.zeros_like(), gz, *rest)
return ([first] + [DisconnectedType()()] * len(rest))
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def __hash__(self):
# TODO: optimize by cache this hash value
msg = []
for entry in self.idx_list:
if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)]
else:
msg += [entry]
idx_list = tuple(msg)
# backport
# idx_list = tuple((entry.start, entry.stop, entry.step)
# if isinstance(entry, slice)
# else entry
# for entry in self.idx_list)
return hash(idx_list)
@staticmethod
def str_from_slice(entry):
msg = []
for x in [entry.start, entry.stop, entry.step]:
if x is None:
msg.append("")
else:
msg.append(str(x))
return ":".join(msg)
def __str__(self):
indices = []
for entry in self.idx_list:
if isinstance(entry, slice):
indices.append(self.str_from_slice(entry))
else:
indices.append(str(entry))
return "%s{%s}" % (self.__class__.__name__, ", ".join(indices))
@staticmethod
def default_helper_c_code_args():
"""
Returns a dictionary of default arguments to
helper_c_code
"""
return {"c_prefix": "PyArray",
"strides_mul": 1}
@staticmethod
def helper_c_code(node, name, inputs, outputs, sub, idx_list, view_ndim,
c_prefix=None,
strides_mul=None):
"""
The parameters c_prefix are there to allow reusing this
function on PyArray and CudaNdarray object.
This fct take as input the x,
"""
default_args = Subtensor.default_helper_c_code_args()
if strides_mul is None:
strides_mul = default_args['strides_mul']
if c_prefix is None:
c_prefix = default_args['c_prefix']
#
# two arrays are created in C code:
# is_slice: len == ndim, 0 means int, 1 means slice
# subtensor_spec: len = n_ints + 3 * n_slices
#
fail = sub['fail']
init_cmds = [] # initialization for subtensor_spec
is_slice = []
# TODO: change that, it might lead to unexpected results,
# see assembla-#767
NONE_CODE = sys.maxsize - 1
pos = [0, 1] # annoying version of global variable for init_entry
def inc_spec_pos(amt):
pos[0] += amt
def inc_input_pos(amt):
pos[1] += amt
def spec_pos():
return pos[0]
def input_pos():
return pos[1]
def init_entry(entry, depth=0):
if isinstance(entry, (numpy.integer, int)):
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
entry))
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
elif isinstance(entry, Type):
init_cmds.append(
"subtensor_spec[%i] = %s;" % (spec_pos(),
inputs[input_pos()]))
inc_spec_pos(1)
inc_input_pos(1)
if depth == 0:
is_slice.append(0)
elif entry is None:
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
NONE_CODE))
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
elif depth == 0 and isinstance(entry, slice):
init_entry(entry.start, depth + 1)
init_entry(entry.stop, depth + 1)
init_entry(entry.step, depth + 1)
is_slice.append(1)
else:
assert 0, entry
for entry in idx_list:
init_entry(entry)
# make sure we used all inputs
assert input_pos() == len(inputs), input_pos()
assert len(is_slice) <= node.inputs[0].ndim, node.inputs[0].ndim
len_is_slice = len(is_slice)
len_subtensor_spec = spec_pos()
subensor_spec = "npy_intp subtensor_spec[%(len_subtensor_spec)s];" % locals()
if len_subtensor_spec == 0:
subensor_spec = "npy_intp * subtensor_spec = NULL;"
if is_slice:
is_slice_init = "int is_slice[] = {" + ",".join([str(s) for s in
is_slice]) + "};"
else:
is_slice_init = "int* is_slice = NULL;"
subtensor_init = "\n".join(init_cmds)
x, = inputs[:1]
z, = outputs
if view_ndim:
rval = """
// Argument of the view
npy_intp xview_dims[%(view_ndim)s];
npy_intp xview_strides[%(view_ndim)s];
""" % locals()
else:
rval = """
// Argument of the view
npy_intp* xview_dims = NULL;
npy_intp* xview_strides = NULL;
"""
rval += """
// One more argument of the view
npy_intp xview_offset = 0;
// The subtensor is created by iterating over the dimensions
// and updating stride, shape, and data pointers
%(is_slice_init)s
%(subensor_spec)s
%(subtensor_init)s;
int spec_pos = 0; //position in subtensor_spec
int inner_ii = 0; // the current dimension of zview
int outer_ii = 0; // current dimension of z
for (; outer_ii < %(len_is_slice)s; ++outer_ii)
{
if (is_slice[outer_ii])
{
npy_intp length = %(c_prefix)s_DIMS(%(x)s)[outer_ii];
npy_intp slicelength;
npy_intp start = subtensor_spec[spec_pos+0];
npy_intp stop = subtensor_spec[spec_pos+1];
npy_intp step = subtensor_spec[spec_pos+2];
if (step == %(NONE_CODE)s) step = 1;
npy_intp defstart = step < 0 ? length-1 : 0;
npy_intp defstop = step < 0 ? -1 : length;
// logic adapted from
// PySlice_GetIndicesEx in python source
if (!step)
{
PyErr_Format(PyExc_ValueError,
"slice step cannot be zero");
%(fail)s;
}
if (start == %(NONE_CODE)s)
{
start = defstart;
}
else
{
if (start < 0) start += length;
if (start < 0) start = (step < 0) ? -1 : 0;
if (start >= length)
start = (step < 0) ? length - 1 : length;
}
if (stop == %(NONE_CODE)s)
{
stop = defstop;
}
else
{
if (stop < 0) stop += length;
if (stop < 0) stop = (step < 0) ? -1 : 0;
if (stop >= length)
stop = (step < 0) ? length - 1 : length;
}
if ((step < 0 && stop >= start)
|| (step > 0 && start >= stop)) {
slicelength = 0;
}
else if (step < 0) {
slicelength = (stop-start+1)/step+1;
}
else {
slicelength = (stop-start-1)/step+1;
}
if (0){
fprintf(stdout, "start %%zi\\n", start);
fprintf(stdout, "stop %%zi\\n", stop);
fprintf(stdout, "step %%zi\\n", step);
fprintf(stdout, "length %%zi\\n", length);
fprintf(stdout, "slicelength %%zi\\n", slicelength);
}
assert (slicelength <= length);
xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii]
* start * %(strides_mul)s;
xview_dims[inner_ii] = slicelength;
xview_strides[inner_ii] = (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * step;
inner_ii += 1;
spec_pos += 3;
}
else // tuple coord `outer_ii` is an int
{
int idx = subtensor_spec[spec_pos];
if (idx < 0) idx += %(c_prefix)s_DIMS(%(x)s)[outer_ii];
if (idx >= 0)
{
if (idx < %(c_prefix)s_DIMS(%(x)s)[outer_ii])
{
xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * idx *
%(strides_mul)s;
}
else
{
PyErr_Format(PyExc_IndexError,"index out of bounds");
%(fail)s;
}
}
else
{
PyErr_Format(PyExc_IndexError,"index out of bounds");
%(fail)s;
}
spec_pos += 1;
}
}
assert (inner_ii <= %(view_ndim)s);
while (inner_ii < %(view_ndim)s)
{
assert (outer_ii < %(c_prefix)s_NDIM(%(x)s));
xview_dims[inner_ii] = %(c_prefix)s_DIMS(%(x)s)[outer_ii];
xview_strides[inner_ii] = %(c_prefix)s_STRIDES(%(x)s)[outer_ii];
inner_ii += 1;
outer_ii += 1;
}
""" % locals()
# print rval
return rval
@staticmethod
def helper_c_code_cache_version():
return (9,)
def c_code(self, node, name, inputs, outputs, sub): # DEBUG
if not isinstance(node.inputs[0].type, theano.tensor.TensorType):
raise NotImplementedError()
x = inputs[0]
z, = outputs
ndim = node.inputs[0].ndim
view_ndim = node.outputs[0].ndim
fail = sub['fail']
decl = "PyArrayObject * xview = NULL;"
checkNDim = """
if (PyArray_NDIM(%(x)s) != %(ndim)s){
PyErr_SetString(PyExc_ValueError,
"Expected %(ndim)s dimensions input"
);
%(fail)s
}
""" % locals()
get_xview = self.helper_c_code(node, name, inputs, outputs, sub,
self.idx_list, view_ndim)
build_view = """
//TODO: give this Op a second output so that this view can be cached
//TODO: alternatively, fix the memory leak on failure
Py_INCREF(PyArray_DESCR(%(x)s));
xview = (PyArrayObject*)PyArray_NewFromDescr(
&PyArray_Type,
PyArray_DESCR(%(x)s),
%(view_ndim)s,
xview_dims,
xview_strides,
PyArray_BYTES(%(x)s) + xview_offset,
PyArray_FLAGS(%(x)s),
NULL);
assert (PyArray_NDIM(xview) == %(view_ndim)s);
if (!xview)
{
%(fail)s;
}
""" % locals()
finish_view = """
//This is needed for NumPy 1.5, but not 1.7.2
PyArray_UpdateFlags(xview, NPY_ARRAY_C_CONTIGUOUS| NPY_ARRAY_F_CONTIGUOUS);
Py_XDECREF(%(z)s);
Py_INCREF(py_%(x)s);
#if NPY_API_VERSION < 0x00000007
PyArray_BASE(xview) = py_%(x)s;
#else
PyArray_SetBaseObject(xview, py_%(x)s);
#endif
assert(py_%(x)s == (PyObject*)%(x)s);
%(z)s = xview;
""" % locals()
return (decl + checkNDim +
"{" + get_xview + build_view + finish_view + "}")
def c_code_cache_version(self):
hv = self.helper_c_code_cache_version()
# If `helper_c_code_cache_version` is not versioned we do not want to
# have a versioned version of this op's C code.
if len(hv) == 0:
return ()
return (4, hv)
def R_op(self, inputs, eval_points):
# Subtensor is not differentiable wrt to its indices, therefore we
# do not even need to consider the eval_points provided for those
# (they should be defaulted to zeros_like by the global R_op)
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
class SubtensorPrinter:
def process(self, r, pstate):
if r.owner is None:
raise TypeError("Can only print Subtensor.")
elif isinstance(r.owner.op, Subtensor):
idxs = r.owner.op.idx_list
inputs = list(r.owner.inputs)
input = inputs.pop()
sidxs = []
inbrack_pstate = pstate.clone(precedence=-1000)
for entry in idxs:
if isinstance(entry, int):
sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar):
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
elif isinstance(entry, slice):
if entry.start is None or entry.start == 0:
msg1 = ""
else:
msg1 = entry.start
if entry.stop is None or entry.stop == sys.maxsize:
msg2 = ""
else:
msg2 = entry.stop
if entry.step is None:
msg3 = ""
else:
msg3 = ":%s" % entry.step
sidxs.append("%s:%s%s" % (msg1, msg2, msg3))
return "%s[%s]" % (pstate.pprinter.process(
input,
pstate.clone(precedence=1000)),
", ".join(sidxs))
else:
raise TypeError("Can only print Subtensor.")
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor),
SubtensorPrinter())
def set_subtensor(x, y, inplace=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor overwritten by y.
Example: To replicate the numpy expression "r[10:] = 5", type
>>> r = ivector()
>>> new_r = set_subtensor(r[10:], 5)
:param x: symbolic variable for the lvalue of = operation
:param y: symbolic variable for the rvalue of = operation
:param tolerate_inplace_aliasing: see inc_subtensor for documentation.
"""
return inc_subtensor(x, y, inplace, set_instead_of_inc=True,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor incremented by y.
:param x: the symbolic result of a Subtensor operation.
:param y: the amount by which to increment ths subtensor in question
:param inplace: Don't use. Theano will do it when possible.
:param set_instead_of_inc: If True, do a set_subtensor instead.
:param tolerate_inplace_aliasing: allow x and y to be views of a single
underlying array even while working inplace. For correct results,
x and y must not be overlapping views; if they overlap, the result
of this Op will generally be incorrect. This value has no effect if
inplace=False.
Example: To replicate the numpy expression "r[10:] += 5", type
>>> r = ivector()
>>> new_r = inc_subtensor(r[10:], 5)
"""
# First of all, y cannot have a higher dimension than x,
# nor have non-broadcastable dimensions where x is broadcastable.
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
if y.ndim > x.ndim:
raise TypeError(("Trying to increment a %d-dimensional "
"subtensor with a %d-dimensional value.") % (x.ndim,
y.ndim))
dim_offset = x.ndim - y.ndim
for dim in xrange(y.ndim):
if (x.broadcastable[dim + dim_offset] and not y.broadcastable[dim]):
# It is acceptable to try to increment a subtensor with a
# broadcastable dim with a tensor that is not broadcastable
# on that dimension. However, its length must then be 1.
# We insert a Rebroadcast Op to make sure it is the case.
y = addbroadcast(y, dim)
if not x.owner:
raise TypeError('x must be the result of a subtensor operation')
# retrieve idx_list from x.owner
if isinstance(x.owner.op, Subtensor):
if tolerate_inplace_aliasing:
destroyhandler_tolerate_aliased = [[0, 1]]
else:
destroyhandler_tolerate_aliased = []
the_op = IncSubtensor(
x.owner.op.idx_list, inplace, set_instead_of_inc,
destroyhandler_tolerate_aliased=destroyhandler_tolerate_aliased)
real_x = x.owner.inputs[0]
real_idxargs = x.owner.inputs[1:]
return the_op(real_x, y, *real_idxargs)
elif isinstance(x.owner.op, AdvancedSubtensor1):
real_x = x.owner.inputs[0]
ilist = x.owner.inputs[1]
the_op = AdvancedIncSubtensor1(inplace,
set_instead_of_inc=set_instead_of_inc)
return the_op(real_x, y, ilist)
elif isinstance(x.owner.op, AdvancedSubtensor):
real_x = x.owner.inputs[0]
ilist = x.owner.inputs[1:]
the_op = AdvancedIncSubtensor(inplace,
set_instead_of_inc=set_instead_of_inc)
return the_op(real_x, y, *ilist)
elif isinstance(x.owner.op, DimShuffle):
inner_x = x.owner.inputs[0]
# In the dimshuffle case, there are in fact two dimshuffles:
# one to make the indexed dimension the last one,
# and one to put it back where it was. So, in the case where we have
# inc_subtensor(x[:,i], y), the graph is actually
# inc_subtensor((x.T)[i].T, y).
# We could get all the way to x, and then get rid of the dimshuffles
# completely, but the problem is that advanced_inc_subtensor1 can only
# work on the first (outer-most, left-most) dimension of x,
# just like advanced_subtensor1.
# So we call advanced_inc_subtensor1(x.T, i, y.T) (as we also need to
# transpose y if it is not a scalar or a vector), but then we need to
# return something that has the same shape as x, not as x.T (inner_x).
# So re-apply the outer dimshuffle on the new inc_subtensor,
# and return advanced_inc_subtensor1(x.T, i, y.T).T.
# Get the dimshuffle pattern to apply to y.
x_order = x.owner.op.new_order
y_order = ['x'] * x.ndim
for i, v in enumerate(x_order):
if v != 'x' and (v - dim_offset) >= 0:
y_order[v - dim_offset] = i
# Warn if this code path would have produced wrong results in the past
if config.warn.inc_set_subtensor1:
# Dimshuffle pattern for y that would be equivalent to past code
prev_y_order = ['x'] * (dim_offset) + list(range(y.ndim))
if y_order != prev_y_order:
warnings.warn(
'Although your current code is fine, please note that '
'earlier versions prior to 0.7 (or this development '
'version) may have yielded an incorrect result in '
'this `inc_subtensor` or `set_subtensor` operation. '
'To remove this warning, you can either set the '
'`warn.inc_set_subtensor1` config option to `False`, '
'or `warn.ignore_bug_before` to at least "0.7".',
stacklevel=2)
inner_incsubtensor = inc_subtensor(
inner_x,
y.dimshuffle(y_order),
inplace=inplace,
set_instead_of_inc=set_instead_of_inc,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
return x.owner.op(inner_incsubtensor, *x.owner.inputs[1:])
elif isinstance(x.owner.op, theano.tensor.Reshape):
# This case happens when the indices are not arranged as a vector, but
# as a higher-dimensional array. This is handled by the subtensor
# by flattening this list, taking the subtensor, then reshaping the
# result.
inner_x = x.owner.inputs[0]
# Try to apply inc_subtensor on inner_x.
# If it works, there is no need to reshape, as the inc_subtensor
# will have the same shape as inner_x, which is what we want.
# We also explicitly duplicate y to its broadcasted shape
# before we partially flatten it to inner_x dimension. This is
# not strictly needed in all cases, but it is easier this way.
if y.ndim > 0:
# This if is needed to prevent some useless warning about
# old code bug.
expanded_y = alloc(y, *[x.shape[i] for i in xrange(x.ndim)])
flattened_y = expanded_y.flatten(inner_x.ndim)
else:
flattened_y = y
# Warn if this code path would have produced wrong results in the past
if config.warn.inc_set_subtensor1:
if inner_x.ndim > 1 and sum(y.broadcastable) > 0:
warnings.warn(
'Although your current code is fine, please note that '
'earlier versions prior to 0.7 (or this development '
'version) may have yielded an incorrect result in '
'this `inc_subtensor` or `set_subtensor` operation. '
'To remove this warning, you can either set the '
'`warn.inc_set_subtensor1` config option to `False`, '
'or `warn.ignore_bug_before` to at least "0.7".',
stacklevel=2)
inner_incsubtensor = inc_subtensor(
inner_x,
flattened_y,
inplace=inplace,
set_instead_of_inc=set_instead_of_inc,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
return inner_incsubtensor
else:
raise TypeError('x must be the result of a subtensor operation')
class IncSubtensor(Op):
"""Increment a subtensor.
This is like numpy's
x[i,j,k] += y
It is used internally to implement the gradient on SubTensor.
:param set_instead_of_inc: if True set the subtensor to the value instead
of incrementing it by that value.
"""
check_input = False
__props__ = ("idx_list", "inplace", "set_instead_of_inc")
def __init__(self, idx_list, inplace=False, set_instead_of_inc=False,
destroyhandler_tolerate_aliased=None):
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.idx_list = list(map(Subtensor.convert, idx_list))
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
self.destroyhandler_tolerate_aliased = list(
destroyhandler_tolerate_aliased)
self.set_instead_of_inc = set_instead_of_inc
def __hash__(self):
msg = []
for entry in self.idx_list:
if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)]
else:
msg += [entry]
idx_list = tuple(msg)
# backport
# idx_list = tuple((entry.start, entry.stop, entry.step)
# if isinstance(entry, slice)
# else entry
# for entry in self.idx_list)
return (hashtype(self) ^ hash(idx_list) ^ hash(self.inplace) ^
hash(self.set_instead_of_inc))
def __str__(self):
indices = []
for entry in self.idx_list:
if isinstance(entry, slice):
indices.append(Subtensor.str_from_slice(entry))
else:
indices.append(str(entry))
if self.inplace:
msg = 'Inplace'
else:
msg = ''
if not self.set_instead_of_inc:
msg += 'Inc'
else:
msg += 'Set'
return "%s{%s;%s}" % (
self.__class__.__name__,
msg,
", ".join(indices))
def make_node(self, x, y, *inputs):
"""
x: the tensor to increment
y: the value to increment by
inputs: TODO WRITEME
"""
x, y = map(theano.tensor.as_tensor_variable, [x, y])
if y.ndim > x.ndim:
raise ValueError(("Trying to increment a %d-dimensional "
"subtensor with a %d-dimensional value.") % (
x.ndim, y.ndim))
inputs = tuple(map(Subtensor.my_as_scalar, inputs))
idx_list = list(self.idx_list)
if len(idx_list) > x.type.ndim:
exception = ValueError(
Subtensor.e_invalid % (
len(idx_list),
x.type.ndim))
exception.subtensor_invalid = True
raise exception
input_types = Subtensor.collapse(
idx_list,
lambda entry: isinstance(entry, gof.Type))
if len(inputs) != len(input_types):
raise IndexError(
"Not enough inputs to fill in the Subtensor template.",
inputs, idx_list)
for input, expected_type in izip(inputs, input_types):
if input.type != expected_type:
raise TypeError(
"Wrong type for Subtensor template. Expected %s, got %s."
% (input.type, expected_type))
return gof.Apply(self,
(x, y) + inputs,
[x.type()])
def decl_view(self):
return "PyArrayObject * zview = NULL;"
def perform(self, node, inputs, out_):
out, = out_
x, y = inputs[:2]
indices = list(reversed(inputs[2:]))
def convert(entry):
if isinstance(entry, gof.Type):
rval = indices.pop()
if sys.version_info < (2, 5):
# Before Python 2.5, PySlice_GetIndicesEx requires
# Python int to be passed.
rval_ = int(rval)
if rval_ != rval:
raise IndexError((
"Invalid value for indexing: %s. "
"That value may be too big.") % rval)
return rval_
return rval
elif isinstance(entry, slice):
return slice(convert(entry.start),
convert(entry.stop),
convert(entry.step))
else:
return entry
cdata = tuple(map(convert, self.idx_list))
if len(cdata) == 1:
cdata = cdata[0]
if not self.inplace:
x = x.copy()
sub_x = x.__getitem__(cdata)
if sub_x.shape:
# we've sliced out an N-D tensor with N > 0
if not self.set_instead_of_inc:
sub_x += y
else:
# sub_x += -sub_x + y
x.__setitem__(cdata, y)
else:
# scalar case
if not self.set_instead_of_inc:
x.__setitem__(cdata, sub_x + y)
else:
x.__setitem__(cdata, y)
out[0] = x
def c_code(self, node, name, inputs, outputs, sub):
# This method delegates much of the work to helper
# methods. This method implements the main logic
# but subclasses may override the helper methods
# to change the particulars, e.g. GpuIncSubtensor
# turns the view/copy operations on numpy arrays
# into the same operations on cuda arrays.
self.do_type_checking(node)
if self.inplace: # convert bool to int
inplace = 1
else:
inplace = 0
x = inputs[0]
y = inputs[1]
z, = outputs
if self.set_instead_of_inc: # convert bool to int
op_is_set = 1
else:
op_is_set = 0
fail = sub['fail']
view_ndim = (node.inputs[0].ndim -
numpy.sum([not isinstance(idx, slice)
for idx in self.idx_list]))
copy_of_x = self.copy_of_x(x)
copy_input_if_necessary = """
if (%(inplace)s)
{
if (%(x)s != %(z)s)
{
Py_XDECREF(%(z)s);
Py_INCREF(%(x)s);
%(z)s = %(x)s;
}
}
else
{
Py_XDECREF(%(z)s);
%(z)s = %(copy_of_x)s;
}
""" % locals()
# get info needed to make zview: a view of %(z)s
helper_args = self.get_helper_c_code_args()
get_zview = Subtensor.helper_c_code(
node=node,
name=name,
inputs=outputs[:1] + inputs[2:],
outputs=outputs,
sub=sub,
idx_list=self.idx_list,
view_ndim=view_ndim,
** helper_args
)
# Make a view on the output, as we will write into it.
alloc_zview = self.make_view_array(z, view_ndim)
build_view = """
//TODO: give this Op a second output so that this view can be cached
//TODO: alternatively, fix the memory leak on failure
%(alloc_zview)s;
if (!zview)
{
%(fail)s;
}
""" % locals()
copy_into = self.copy_into("zview", y)
add_to_zview = self.add_to_zview(name, y, fail)
make_modification = """
if (%(op_is_set)s)
{
if (%(copy_into)s) // does broadcasting
{
Py_DECREF(zview);
%(fail)s;
}
}
else
{
%(add_to_zview)s
}
""" % locals()
return (self.decl_view() +
copy_input_if_necessary +
get_zview +
build_view +
make_modification +
"Py_DECREF(zview);"
)
def do_type_checking(self, node):
""" Should raise NotImplementedError if c_code does not support
the types involved in this node.
"""
if not isinstance(node.inputs[0].type, theano.tensor.TensorType):
raise NotImplementedError()
def c_code_cache_version(self):
hv = Subtensor.helper_c_code_cache_version()
if hv:
return (1, hv)
else:
return ()
def copy_of_x(self, x):
"""
:param x: a string giving the name of a C variable
pointing to an array
:return: C code expression to make a copy of x
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
"""
# Parameters of PyArrary_FromAny are:
# array
# dtype: we pass NULL to say any dtype is acceptable, so the existing
# dtype will be copied
# min_depth: we pass 0 to have this parameter ignored
# max_depth: we pass 0 to have this parameter ignored
# requirements: here we pass NPY_ARRAY_ENSURECOPY to force a copy
# context: this is almost always NULL, I'm not sure what it's used for
return """(PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,
NPY_ARRAY_ENSURECOPY, NULL)""" % locals()
def make_view_array(self, x, view_ndim):
"""
:param x: a string identifying an array to be viewed
:param view_ndim: a string specifying the number of dimensions
to have in the view
This doesn't need to actually set up the view with the
right indexing; we'll do that manually later.
"""
return """Py_INCREF(PyArray_DESCR(%(x)s));
zview = (PyArrayObject*)PyArray_NewFromDescr(
&PyArray_Type,
PyArray_DESCR(%(x)s),
%(view_ndim)s,
xview_dims, //PyArray_DIMS(%(x)s),
xview_strides, //PyArray_STRIDES(%(x)s),
PyArray_BYTES(%(x)s) + xview_offset, //PyArray_DATA(%(x)s),
PyArray_FLAGS(%(x)s),
NULL);
//This is needed for NumPy 1.5, but not 1.7.2
PyArray_UpdateFlags(zview, NPY_ARRAY_C_CONTIGUOUS| NPY_ARRAY_F_CONTIGUOUS);
""" % locals()
def get_helper_c_code_args(self):
""" Return a dictionary of arguments to pass to helper_c_code."""
return Subtensor.default_helper_c_code_args()
def copy_into(self, view, source):
"""
view: string, C code expression for an array
source: string, C code expression for an array
returns a C code expression to copy source into view, and
return 0 on success
"""
return """PyArray_CopyInto(%(view)s, %(source)s)""" % locals()
def add_to_zview(self, name, x, fail):
""" Return C code to add x to zview. Should DECREF zview if the
add fails."""
return """
PyArrayObject * add_rval = (PyArrayObject*)PyNumber_InPlaceAdd(
(PyObject*)zview, py_%(x)s);
if (add_rval)
{
assert (PyArray_Check((PyObject*)add_rval));
assert (PyArray_DATA(add_rval) == PyArray_DATA(zview));
Py_DECREF(add_rval);
}
else
{
Py_DECREF(zview);
%(fail)s;
}""" % locals()
def infer_shape(self, node, shapes):
return [shapes[0]]
def R_op(self, inputs, eval_points):
if eval_points[0] is None or eval_points[1] is None:
return [None]
# Again we ignore eval points for indices because incsubtensor is
# not differentiable wrt to those
return self(eval_points[0], eval_points[1], *inputs[2:],
**dict(return_list=True))
def connection_pattern(self, node):
rval = [[True], [True]]
for ipt in node.inputs[2:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
g_output, = grads
x, y = inputs[:2]
idx_list = inputs[2:]
if x.dtype in theano.tensor.discrete_dtypes:
# The output dtype is the same as x
gx = x.zeros_like(dtype=theano.config.floatX)
if y.dtype in theano.tensor.discrete_dtypes:
gy = y.zeros_like(dtype=theano.config.floatX)
else:
gy = y.zeros_like()
elif x.dtype in theano.tensor.complex_dtypes:
raise NotImplementedError("No support for complex grad yet")
else:
if self.set_instead_of_inc:
gx = set_subtensor(
Subtensor(idx_list=self.idx_list)(g_output, *idx_list),
theano.tensor.zeros_like(y))
else:
gx = g_output
gy = Subtensor(idx_list=self.idx_list)(g_output, *idx_list)
gy = _sum_grad_over_bcasted_dims(y, gy)
return [gx, gy] + [DisconnectedType()()] * len(idx_list)
def _sum_grad_over_bcasted_dims(x, gx):
"""Sum of gx over dimensions to reproduce x.broadcastable.
This is useful to sum gradients over certain dimensions when
x has been broadcasted, and we need to sum the gradient contributions
over all duplications.
"""
if gx.broadcastable != x.broadcastable:
x_dim_added = gx.ndim - x.ndim
x_broad = (True,) * x_dim_added + x.broadcastable
assert sum(gx.broadcastable) < sum(x_broad)
axis_to_sum = []
for i in xrange(gx.ndim):
if gx.broadcastable[i] is False and x_broad[i] is True:
axis_to_sum.append(i)
elif (gx.broadcastable[i] is True and
x_broad[i] is False):
# This means that Theano was able to infer that
# gx.shape[i] is 1, so x.shape[i] is 1, but we
# didn't know it. It is fine.
pass
else:
assert gx.broadcastable[i] == x_broad[i]
gx = gx.sum(axis=axis_to_sum, keepdims=True)
if gx.ndim != x.ndim:
assert gx.ndim > x.ndim
for i in xrange(x_dim_added):
assert gx.broadcastable[i]
gx = gx.dimshuffle(*list(range(x_dim_added, gx.ndim)))
assert gx.broadcastable == x.broadcastable
return gx
#########################
# Advanced indexing
#########################
#
# Should reproduce numpy's behaviour, see url:
# docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
class AdvancedSubtensor1(Op):
"""Implement x[ilist] where ilist is a vector of integers."""
# sparse_grad doesn't go in here since it only affects the output
# of the grad() method.
__props__ = ()
_f16_ok = True
def __init__(self, sparse_grad=False):
self.sparse_grad = sparse_grad
def make_node(self, x, ilist):
x_ = theano.tensor.as_tensor_variable(x)
ilist_ = theano.tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return Apply(self, [x_, ilist_], [TensorType(dtype=x.dtype,
broadcastable=bcast)()])
def perform(self, node, inp, out_):
x, i = inp
out, = out_
# Copy always implied by numpy advanced indexing semantic.
if out[0] is not None and out[0].shape == (len(i),) + x.shape[1:]:
o = out[0]
else:
o = None
# If i.dtype is more precise than numpy.intp (int32 on 32-bit machines,
# int64 on 64-bit machines), numpy may raise the following error:
# TypeError: array cannot be safely cast to required type.
# We need to check if values in i can fit in numpy.intp, because
# if they don't, that should be an error (no array can have that
# many elements on a 32-bit arch).
if i.dtype != numpy.intp:
i_ = theano._asarray(i, dtype=numpy.intp)
if not numpy.can_cast(i.dtype, numpy.intp):
# Check if there was actually an incorrect conversion
if numpy.any(i != i_):
raise IndexError(
'index contains values that are bigger '
'than the maximum array size on this system.', i)
i = i_
out[0] = x.take(i, axis=0, out=o)
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
global sparse_module_ref
x, ilist = inputs
gz, = grads
assert len(inputs) == 2
if self.sparse_grad:
if x.type.ndim != 2:
raise TypeError(
"AdvancedSubtensor1: you can't take the sparse grad"
" from a tensor with ndim != 2. ndim is " +
str(x.type.ndim))
if sparse_module_ref is None:
import theano.sparse as sparse_module_ref
rval1 = [sparse_module_ref.construct_sparse_from_list(x, gz,
ilist)]
else:
rval1 = [advanced_inc_subtensor1(x.zeros_like(), gz, ilist)]
return rval1 + [DisconnectedType()()] * (len(inputs) - 1)
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def infer_shape(self, node, ishapes):
x, ilist = ishapes
return [ilist + x[1:]]
def c_support_code(self):
# In some versions of numpy, NPY_MIN_INTP is defined as MIN_LONG,
# which is not defined. It should be NPY_MIN_LONG instead in that case.
return dedent("""\
#ifndef MIN_LONG
#define MIN_LONG NPY_MIN_LONG
#endif""")
def c_code(self, node, name, input_names, output_names, sub):
if self.__class__ is not AdvancedSubtensor1:
raise MethodNotDefined(
"c_code defined for AdvancedSubtensor1,"
" not for child class", type(self))
a_name, i_name = input_names[0], input_names[1]
output_name = output_names[0]
fail = sub['fail']
return """
PyArrayObject *indices;
int i_type = PyArray_TYPE(%(i_name)s);
if (i_type != NPY_INTP) {
// Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom),
// if all values fit.
if (!PyArray_CanCastSafely(i_type, NPY_INTP)) {
npy_int64 min_val, max_val;
PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS,
NULL);
if (py_min_val == NULL) {
%(fail)s;
}
min_val = PyLong_AsLongLong(py_min_val);
Py_DECREF(py_min_val);
if (min_val == -1 && PyErr_Occurred()) {
%(fail)s;
}
PyObject* py_max_val = PyArray_Max(%(i_name)s, NPY_MAXDIMS,
NULL);
if (py_max_val == NULL) {
%(fail)s;
}
max_val = PyLong_AsLongLong(py_max_val);
Py_DECREF(py_max_val);
if (max_val == -1 && PyErr_Occurred()) {
%(fail)s;
}
if (min_val < NPY_MIN_INTP || max_val > NPY_MAX_INTP) {
PyErr_SetString(PyExc_IndexError,
"Index contains values "
"that are bigger than the maximum array "
"size on this system.");
%(fail)s;
}
}
indices = (PyArrayObject*) PyArray_Cast(%(i_name)s, NPY_INTP);
if (indices == NULL) {
%(fail)s;
}
}
else {
indices = %(i_name)s;
Py_INCREF(indices);
}
if (%(output_name)s != NULL) {
npy_intp nd, i, *shape;
nd = PyArray_NDIM(%(a_name)s) + PyArray_NDIM(indices) - 1;
if (PyArray_NDIM(%(output_name)s) != nd) {
Py_CLEAR(%(output_name)s);
}
else {
shape = PyArray_DIMS(%(output_name)s);
for (i = 0; i < PyArray_NDIM(indices); i++) {
if (shape[i] != PyArray_DIMS(indices)[i]) {
Py_CLEAR(%(output_name)s);
break;
}
}
if (%(output_name)s != NULL) {
for (; i < nd; i++) {
if (shape[i] != PyArray_DIMS(%(a_name)s)[
i-PyArray_NDIM(indices)+1]) {
Py_CLEAR(%(output_name)s);
break;
}
}
}
}
}
%(output_name)s = (PyArrayObject*)PyArray_TakeFrom(
%(a_name)s, (PyObject*)indices, 0, %(output_name)s, NPY_RAISE);
Py_DECREF(indices);
if (%(output_name)s == NULL) %(fail)s;
""" % locals()
def c_code_cache_version(self):
return (0, 1, 1)
advanced_subtensor1 = AdvancedSubtensor1()
class AdvancedIncSubtensor1(Op):
"""Increments a subtensor using advanced slicing (list of index)"""
__props__ = ('inplace', 'set_instead_of_inc')
def __init__(self, inplace=False, set_instead_of_inc=False):
self.inplace = inplace
self.set_instead_of_inc = set_instead_of_inc
if inplace:
self.destroy_map = {0: [0]}
def clone_inplace(self):
return self.__class__(
inplace=True,
set_instead_of_inc=self.set_instead_of_inc)
def __str__(self):
if self.inplace:
msg = "inplace"
else:
msg = "no_inplace"
if self.set_instead_of_inc:
msg += ",set"
else:
msg += ",inc"
return self.__class__.__name__ + "{%s}" % msg
def make_node(self, x, y, ilist):
x_ = theano.tensor.as_tensor_variable(x)
y_ = theano.tensor.as_tensor_variable(y)
ilist_ = theano.tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError(
'cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return Apply(self, [x_, y_, ilist_], [x_.type()])
def copy_of_x(self, x):
"""
:param x: a string giving the name of a C variable
pointing to an array
:return: C code expression to make a copy of x
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
"""
# Parameters of PyArrary_FromAny are:
# array
# dtype: we pass NULL to say any dtype is acceptable, so the existing
# dtype will be copied
# min_depth: we pass 0 to have this parameter ignored
# max_depth: we pass 0 to have this parameter ignored
# requirements: here we pass NPY_ARRAY_ENSURECOPY to force a copy
# context: this is almost always NULL, I'm not sure what it's used for
return """(PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,
NPY_ARRAY_ENSURECOPY, NULL)""" % locals()
def c_support_code(self):
from theano.gof.cutils import compile_cutils_code
return compile_cutils_code()
def c_code(self, node, name, input_names, output_names, sub):
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if bool(numpy_ver < [1, 8]):
raise NotImplementedError
x, y, idx = input_names
out = output_names[0]
fail = sub['fail']
inc_or_set = 1 - self.set_instead_of_inc
if self.inplace: # convert bool to int
inplace = 1
else:
inplace = 0
copy_of_x = self.copy_of_x(x)
return """
if (%(inplace)s)
{
if (%(x)s != %(out)s)
{
Py_XDECREF(%(out)s);
Py_INCREF(%(x)s);
%(out)s = %(x)s;
}
}
else
{
Py_XDECREF(%(out)s);
%(out)s = %(copy_of_x)s;
}
PyObject *arglist = Py_BuildValue("OOOi",%(out)s, %(idx)s, %(y)s, %(inc_or_set)d);
inplace_increment(NULL, arglist);
Py_XDECREF(arglist);
""" % locals()
def c_code_cache_version(self):
return (1,)
def perform(self, node, inp, out_):
# TODO opt to make this inplace
x, y, idx = inp
out, = out_
if not self.inplace:
x = x.copy()
# In Numpy, x[idx] += y doesn't work if the same index is present
# many times: it does it only once. Is it a bug? In any case, for
# this reason we implement our own 'inc' iteration.
if self.set_instead_of_inc:
x[idx] = y
else:
increment = inplace_increment
if increment is None:
increment = self.inplace_increment1d_slow
increment(x, idx, y)
out[0] = x
def inplace_increment1d_slow(self, x, idx, y):
# If `y` has as many dimensions as `x`, then we want to iterate
# jointly on `x` and `y`. Otherwise, it means `y` should be
# broadcasted to fill all relevant rows of `x`.
assert y.ndim <= x.ndim # Should be guaranteed by `make_node`
if y.ndim == x.ndim:
if len(y) == 1:
# Allow broadcasting of y[0]
y_0 = y[0]
for i in idx:
x[i] += y_0
else:
assert len(y) == len(idx)
j = 0
for i in idx:
x[i] += y[j]
j += 1
else:
for i in idx:
x[i] += y
def infer_shape(self, node, ishapes):
x, y, ilist = ishapes
return [x]
def R_op(self, inputs, eval_points):
if None in eval_points[:2]:
return [None]
return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs
def connection_pattern(self, node):
rval = [[True], [True], [False]]
return rval
def grad(self, inputs, grads):
g_output, = grads
x, y, idx_list = inputs
if x.dtype in theano.tensor.discrete_dtypes:
# The output dtype is the same as x
gx = x.zeros_like(dtype=theano.config.floatX)
if y.dtype in theano.tensor.discrete_dtypes:
gy = y.zeros_like(dtype=theano.config.floatX)
else:
gy = y.zeros_like()
elif x.dtype in theano.tensor.complex_dtypes:
raise NotImplementedError("No support for complex grad yet")
else:
if self.set_instead_of_inc:
gx = advanced_set_subtensor1(
g_output,
y.zeros_like(),
idx_list)
else:
gx = g_output
gy = advanced_subtensor1(g_output, idx_list)
gy = _sum_grad_over_bcasted_dims(y, gy)
return [gx, gy] + [DisconnectedType()()]
advanced_inc_subtensor1 = AdvancedIncSubtensor1()
advanced_set_subtensor1 = AdvancedIncSubtensor1(set_instead_of_inc=True)
def as_index_variable(idx):
if idx is None:
return NoneConst.clone()
if isinstance(idx, slice):
return make_slice(idx)
if isinstance(idx, gof.Variable) and isinstance(idx.type, SliceType):
return idx
idx = theano.tensor.as_tensor_variable(idx)
if idx.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
return idx
def adv_index_broadcastable_pattern(a, idx):
"""
This function is only used to determine the broadcast pattern for
AdvancedSubtensor output variable.
For this, we make a fake ndarray and a fake idx and call use ask numpy
the output. From this, we find the output broadcast pattern.
"""
def replace_slice(v):
if isinstance(v, gof.Apply):
if len(v.outputs) != 1:
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", v)
else:
v = v.outputs[0]
if NoneConst.equals(v):
return None
if isinstance(v.type, SliceType):
return slice(None, None)
return numpy.zeros((2,) * v.ndim, int)
newidx = tuple(map(replace_slice, idx))
# 2 - True = 1; 2 - False = 2
fakeshape = [2 - bc for bc in a.broadcastable]
retshape = numpy.empty(fakeshape)[newidx].shape
return tuple([dim == 1 for dim in retshape])
class AdvancedSubtensor(Op):
"""Return a subtensor copy, using advanced indexing.
"""
# Should be used by __getitem__ and __getslice__, as follow:
# AdvancedSubtensor()(self, *args),
# if args contains and advanced indexing pattern
__props__ = ()
def make_node(self, x, *index):
x = theano.tensor.as_tensor_variable(x)
index = tuple(map(as_index_variable, index))
bcast = adv_index_broadcastable_pattern(x, index)
return gof.Apply(self,
(x,) + index,
[theano.tensor.tensor(dtype=x.type.dtype,
broadcastable=bcast)])
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def infer_shape(self, node, ishapes):
# Really special case
if len(ishapes) == 3:
xshp, ind1shp, ind2shp = ishapes
if (len(xshp) == 2 and
ind1shp is not None and len(ind1shp) == 1 and
ind2shp is not None and len(ind2shp) == 1):
# if the graph is correct, we can assume ind1shp[0] and
# ind2shp[0] will have the same value.
# Try to return the one closest to the graph input.
if node.inputs[2].owner is None:
return [ind2shp]
else:
return [ind1shp]
# Default case, we don't know
raise theano.tensor.basic.ShapeError("case not implemented")
def perform(self, node, inputs, out_):
out, = out_
# TODO: in general, we need to re-pack the inputs into a valid
# index, just like subtensor
out[0] = inputs[0].__getitem__(inputs[1:])
if (numpy.__version__ <= '1.6.1' and
out[0].size != numpy.uint32(out[0].size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out[0] (%s), with shape %s, is not correctly filled.'
% (out[0], out[0].shape))
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
gz, = grads
x = inputs[0]
rest = inputs[1:]
return [advanced_inc_subtensor(theano.tensor.zeros_like(x), gz,
*rest)] + \
[DisconnectedType()()] * len(rest)
advanced_subtensor = AdvancedSubtensor()
class AdvancedIncSubtensor(Op):
"""Increments a subtensor using advanced indexing.
:note: We need the numpy.inplace_increment() function currently
numpy's PR 326 to be able to make an inplace version of this
op.
"""
__props__ = ("inplace", "set_instead_of_inc")
def __init__(self, inplace=False, set_instead_of_inc=False):
self.inplace = inplace
self.set_instead_of_inc = set_instead_of_inc
# The assert is needed as in the pass the first argument was
# something else that was not used.
assert isinstance(inplace, bool)
if self.inplace:
raise NotImplementedError('In place computation is not'
' implemented')
self.allow_legacy_perform = False
def __str__(self):
return "%s{%s, %s}" % (self.__class__.__name__,
"inplace=" + str(self.inplace),
" set_instead_of_inc=" +
str(self. set_instead_of_inc))
def make_node(self, x, y, *inputs):
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
op = self
# If we are incrementing, but the increment compiled function is not
# available, we need to support legacy cases.
if not self.set_instead_of_inc and inplace_increment is None:
legacy_conditions = False
if x.ndim == 2 and y.ndim == 1 and len(inputs) == 2:
ind1 = theano.tensor.as_tensor_variable(inputs[0])
ind2 = theano.tensor.as_tensor_variable(inputs[1])
if ind1.ndim == 1 and ind2.ndim == 1:
if ind1.owner and isinstance(ind1.owner.op, ARange):
legacy_conditions = True
elif isinstance(ind1, Constant):
# Make sure no index is duplicated
val = ind1.value
if numpy.unique(val).size == val.size:
legacy_conditions = True
elif ind2.owner and isinstance(ind2.owner.op, ARange):
legacy_conditions = True
elif isinstance(ind2, Constant):
# Make sure no index is duplicated
val = ind2.value
if numpy.unique(val).size == val.size:
legacy_conditions = True
if legacy_conditions:
op = copy(self)
op.allow_legacy_perform = True
else:
raise NotImplementedError(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version. '
'You may need to clear the cache (theano-cache clear) '
'afterwards.')
new_inputs = []
for inp in inputs:
if isinstance(inp, (list, tuple)):
inp = theano.tensor.as_tensor_variable(inp)
new_inputs.append(inp)
return gof.Apply(op,
(x, y) + tuple(new_inputs),
[theano.tensor.tensor(
dtype=x.type.dtype,
broadcastable=x.type.broadcastable)])
def perform(self, node, inputs, out_):
# TODO: 1. opt to make this in place 2. generalize as described in
# AdvancedSubtensor's perform TODO
out, = out_
if not self.inplace:
out[0] = inputs[0].copy()
else:
out[0] = inputs[0]
if self.set_instead_of_inc:
out[0][inputs[2:]] = inputs[1]
elif inplace_increment is not None:
inplace_increment(out[0], tuple(inputs[2:]), inputs[1])
elif self.allow_legacy_perform:
out[0][inputs[2:]] += inputs[1]
else:
raise NotImplementedError(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version. '
'You may need to clear the cache (theano-cache clear) '
'afterwards.')
if (numpy.__version__ <= '1.6.1' and
out[0].size != numpy.uint32(out[0].size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out[0] (%s), with shape %s, is not correctly filled.'
% (out[0], out[0].shape))
def infer_shape(self, node, ishapes):
return [ishapes[0]]
def connection_pattern(self, node):
rval = [[True], [True]]
for ipt in node.inputs[2:]:
rval.append([False])
return rval
def grad(self, inpt, output_gradients):
x, y = inpt[:2]
idxs = inpt[2:]
outgrad, = output_gradients
if x.dtype in theano.tensor.discrete_dtypes:
# The output dtype is the same as x
gx = x.zeros_like(dtype=theano.config.floatX)
if y.dtype in theano.tensor.discrete_dtypes:
gy = y.zeros_like(dtype=theano.config.floatX)
else:
gy = y.zeros_like()
elif x.dtype in theano.tensor.complex_dtypes:
raise NotImplementedError("No support for complex grad yet")
else:
if self.set_instead_of_inc:
gx = advanced_set_subtensor(
outgrad,
y.zeros_like(),
*idxs)
else:
gx = outgrad
gy = advanced_subtensor(outgrad, *idxs)
# Make sure to sum gy over the dimensions of y that have been
# added or broadcasted
gy = _sum_grad_over_bcasted_dims(y, gy)
return [gx, gy] + \
[DisconnectedType()() for _ in idxs]
def R_op(self, inputs, eval_points):
if None in eval_points[:2]:
return [None]
return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs
advanced_inc_subtensor = AdvancedIncSubtensor()
advanced_set_subtensor = AdvancedIncSubtensor(set_instead_of_inc=True)
def take(a, indices, axis=None, mode='raise'):
a = theano.tensor.as_tensor_variable(a)
indices = theano.tensor.as_tensor_variable(indices)
# Reuse advanced_subtensor1 if indices is a vector
if indices.ndim == 1:
if mode == 'clip':
indices = clip(indices, 0, a.shape[axis] - 1)
elif mode == 'wrap':
indices = indices % a.shape[axis]
if axis is None:
return advanced_subtensor1(a.flatten(), indices)
elif axis == 0:
return advanced_subtensor1(a, indices)
else:
if axis < 0:
axis += a.ndim
assert axis >= 0
shuffle = list(range(a.ndim))
shuffle[0] = axis
shuffle[axis] = 0
return advanced_subtensor1(
a.dimshuffle(shuffle), indices).dimshuffle(shuffle)
if axis is None:
shape = indices.shape
ndim = indices.ndim
else:
# If axis is 0, don't generate a useless concatenation.
if axis == 0:
shape = theano.tensor.concatenate(
[indices.shape, a.shape[axis + 1:]])
else:
shape = theano.tensor.concatenate(
[a.shape[:axis], indices.shape, a.shape[axis + 1:]])
ndim = a.ndim + indices.ndim - 1
return take(a, indices.flatten(), axis, mode).reshape(shape, ndim)
| nke001/attention-lvcsr | libs/Theano/theano/tensor/subtensor.py | Python | mit | 84,816 |
__all__ = ["melfilterbank", "windowing", "spectrogram", "resample"]
import melfilterbank
import windowing
import spectrogram
import resample | twerkmeister/iLID | preprocessing/audio/__init__.py | Python | mit | 141 |
from models.team import Team
from models.tournament import Tournament
from models.tree import ProbableTournamentTree
import unittest
import pdb
class TestTeam(unittest.TestCase):
def setUp(self):
self.tournament = Tournament()
self.teams = self.tournament.teams
self.usa = Team.get_for_country(self.teams, 'United States')
self.brazil = Team.get_for_country(self.teams, 'Brazil')
def test_add_friendly_result(self):
self.usa.add_friendly_result(opponent=self.brazil)
self.usa.add_friendly_result(opponent=self.brazil, result=Team.DRAW)
self.usa.add_friendly_result(opponent=self.brazil, result=Team.LOSS)
self.assertIn(self.brazil, self.usa.friendly_results['wins'])
self.assertIn(self.brazil, self.usa.friendly_results['draws'])
self.assertIn(self.brazil, self.usa.friendly_results['losses'])
# try adding a friendly result for a team not in the tourney
prev_draws = len(self.usa.friendly_results['draws'])
self.usa.add_friendly_result(opponent=Team.get_for_country(self.teams, "Israel"), result=Team.DRAW)
self.assertEqual(prev_draws + 1, len(self.usa.friendly_results['draws']))
def test_base_score(self):
# these tests operate using some basic, commonly held assumptions (which could actually be a source of human error)
self.assertGreater(self.brazil.base_score, self.usa.base_score)
self.assertGreater(self.usa.base_score, Team.get_for_country(self.teams, "Ghana").base_score)
def test_get_for_country(self):
self.assertEqual(Team.get_for_country(self.teams, 'Brazil').country, 'Brazil')
def test_get_for_group(self):
self.assertIn(Team.get_for_country(self.teams, 'Brazil'), Team.get_for_group(self.teams, 'A'))
self.assertNotIn(Team.get_for_country(self.teams, 'Brazil'), Team.get_for_group(self.teams, 'B'))
class TestTournament(unittest.TestCase):
def setUp(self):
self.tournament = Tournament()
self.teams = self.tournament.teams
def test_get_group_winners(self):
winners = self.tournament.get_group_winners('A')
self.assertEqual(winners[0].country, 'Brazil')
self.assertEqual(winners[1].country, 'Mexico')
class TestTree(unittest.TestCase):
def setUp(self):
self.tournament = Tournament()
self.teams = self.tournament.teams
self.tree = ProbableTournamentTree(self.tournament)
def test_get_opponent_at_stage(self):
brazil = Team.get_for_country(self.teams, 'Brazil')
mexico = Team.get_for_country(self.teams, 'Mexico')
cameroon = Team.get_for_country(self.teams, 'Cameroon')
spain = Team.get_for_country(self.teams, 'Spain')
netherlands = Team.get_for_country(self.teams, 'Netherlands')
opp = self.tree.get_opponent_at_stage(brazil, 0)
self.assertEqual(opp.country, netherlands.country)
opp = self.tree.get_opponent_at_stage(brazil, 1)
self.assertEqual(opp.country, Team.get_for_country(self.teams, 'Colombia').country)
opp = self.tree.get_opponent_at_stage(brazil, 3)
self.assertEqual(opp.country, spain.country)
opp = self.tree.get_opponent_at_stage(netherlands, 0)
self.assertEqual(opp.country, brazil.country)
opp = self.tree.get_opponent_at_stage(mexico, 0)
self.assertEqual(opp.country, spain.country)
# test for a team that isn't in the probability tree
self.assertEqual(self.tree.get_opponent_at_stage(cameroon, 0).country, self.tree.get_opponent_at_stage(mexico, 0).country)
if __name__ == '__main__':
unittest.main() | steinbachr/world-cup-challenge | tests/test_models.py | Python | mit | 3,657 |
import pytest
from click.testing import CliRunner
from parkour import cli
import md5
def file_checksums_equal(file1, file2):
with open(file1) as f:
checksum1 = md5.new(f.read()).digest()
with open(file2) as f:
checksum2 = md5.new(f.read()).digest()
return checksum1==checksum2
def test_trimmed_output():
runner = CliRunner()
result = runner.invoke(cli.main, ['-a', 'fastq/s3_1.fastq.gz', '-b', 'fastq/s3_2.fastq.gz', '-u', 'trim'])
print(result.output)
assert file_checksums_equal('p.s3_1.trim.fastq', 'correct_output/p.s3_1.trim.fastq')
| buenrostrolab/proatac | tests/test_cli.py | Python | mit | 575 |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def CMC():
return 'Welcome to the Container Master Class by Cerulean Canvas'
if __name__ == '__main__':
app.run(host='0.0.0.0')
| tarsoqueiroz/Docker | Study/Oreilly Kubernetes and Docker/s2d9/app.py | Python | mit | 198 |
from flask import (Flask, session, render_template, request, redirect,
url_for, make_response, Blueprint, current_app)
import requests
import json
from datetime import datetime, timedelta
from flask.ext.cors import CORS, cross_origin
bp = Blueprint('audioTag', __name__)
def create_app(blueprint=bp):
app = Flask(__name__)
app.register_blueprint(blueprint)
app.config.from_pyfile('config.py')
CORS(app, allow_headers=('Content-Type', 'Authorization'))
return app
@bp.route('/', methods=['GET'])
@cross_origin()
def index():
# if auth_tok is in session already..
if 'auth_tok' in session:
auth_tok = session['auth_tok']
# check if it has expired
oauth_token_expires_in_endpoint = current_app.config.get(
'SWTSTORE_URL')+'/oauth/token-expires-in'
resp = requests.get(oauth_token_expires_in_endpoint)
expires_in = json.loads(resp.text)['expires_in']
# added for backwared compatibility. previous session stores did not
# have issued key
try:
check = datetime.utcnow() - auth_tok['issued']
if check > timedelta(seconds=expires_in):
# TODO: try to refresh the token before signing out the user
auth_tok = {'access_token': '', 'refresh_token': ''}
else:
"""access token did not expire"""
pass
# if issued key is not there, reset the session
except KeyError:
auth_tok = {'access_token': '', 'refresh_token': ''}
else:
auth_tok = {'access_token': '', 'refresh_token': ''}
# print 'existing tokens'
# print auth_tok
# payload = {'what': 'img-anno',
# 'access_token': auth_tok['access_token']}
# req = requests.get(current_app.config.get(
# 'SWTSTORE_URL', 'SWTSTORE_URL') + '/api/sweets/q', params=payload)
# sweets = req.json()
return render_template('index.html', access_token=auth_tok['access_token'],
refresh_token=auth_tok['refresh_token'],
config=current_app.config,
url=request.args.get('where'))
@bp.route('/authenticate', methods=['GET'])
def authenticateWithOAuth():
auth_tok = None
code = request.args.get('code')
# prepare the payload
payload = {
'scopes': 'email context',
'client_secret': current_app.config.get('APP_SECRET'),
'code': code,
'redirect_uri': current_app.config.get('REDIRECT_URI'),
'grant_type': 'authorization_code',
'client_id': current_app.config.get('APP_ID')
}
# token exchange endpoint
oauth_token_x_endpoint = current_app.config.get(
'SWTSTORE_URL', 'SWTSTORE_URL') + '/oauth/token'
resp = requests.post(oauth_token_x_endpoint, data=payload)
auth_tok = json.loads(resp.text)
if 'error' in auth_tok:
return make_response(auth_tok['error'], 200)
# set sessions etc
session['auth_tok'] = auth_tok
session['auth_tok']['issued'] = datetime.utcnow()
return redirect(url_for('audioTag.index'))
@bp.route('/admin', methods=['GET', 'POST'])
def admin():
if request.method == 'POST':
phone = request.form.get('usertel')
print repr(phone)
return render_template('admin.html')
@bp.route('/upload', methods=['GET', 'POST'])
def upload():
return render_template('upload_url.html')
if __name__ == '__main__':
app = create_app()
app.run(debug=app.config.get('DEBUG'),
host=app.config.get('HOST'))
| janastu/audio-tagger | servers/audioApp.py | Python | mit | 3,589 |
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
number = 0
for i in range(len(s)):
number = number * 26 + ord(s[i]) - ord('A') + 1
return number
| FeiZhan/Algo-Collection | answers/leetcode/Excel Sheet Column Number/Excel Sheet Column Number.py | Python | mit | 255 |
import unittest
class PilhaVaziaErro(Exception):
pass
class Pilha():
def __init__(self):
self.lista=[]
def empilhar(self,valor):
self.lista.append(valor)
def vazia(self):
return not bool(self.lista)
def topo(self):
try:
return self.lista[-1]
except IndexError:
raise PilhaVaziaErro
def desempilhar(self):
if (self.lista):
return self.lista.pop(-1)
else:
raise PilhaVaziaErro
class PilhaTestes(unittest.TestCase):
def test_topo_lista_vazia(self):
pilha = Pilha()
self.assertTrue(pilha.vazia())
self.assertRaises(PilhaVaziaErro, pilha.topo)
def test_empilhar_um_elemento(self):
pilha = Pilha()
pilha.empilhar('A')
self.assertFalse(pilha.vazia())
self.assertEqual('A', pilha.topo())
def test_empilhar_dois_elementos(self):
pilha = Pilha()
pilha.empilhar('A')
pilha.empilhar('B')
self.assertFalse(pilha.vazia())
self.assertEqual('B', pilha.topo())
def test_desempilhar_pilha_vazia(self):
pilha = Pilha()
self.assertRaises(PilhaVaziaErro, pilha.desempilhar)
def test_desempilhar(self):
pilha = Pilha()
letras = 'ABCDE'
for letra in letras:
pilha.empilhar(letra)
for letra_em_ordem_reversa in reversed(letras):
letra_desempilhada = pilha.desempilhar()
self.assertEqual(letra_em_ordem_reversa, letra_desempilhada) | lucas2109/estruturaDados | pilha.py | Python | mit | 1,546 |
# -*- coding: utf-8 -*-
import unittest
import mock
class DynamicFieldsMixinTestCase(unittest.TestCase):
"""Test functionality of the DynamicFieldsMixin class."""
def test_restrict_dynamic_fields(self):
| chewse/djangorestframework-dynamic-fields | test_dynamicfields.py | Python | mit | 218 |
#####################################################
#
# A library for getting match information for a given team at a given event
# out of the Blue Alliance API
#
# Authors: Andrew Merrill and Jacob Bendicksen (Fall 2014)
#
# Requires the blueapi.py library
######################################################
#this doesn't currently fully work
import blueapi
teamNumber = 1540
eventKey = '2014pncmp'
#returns a list of qualification matches that the team played in
def getTeamQualMatches(teamNumber,eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey,teamNumber))):
if blueapi.getTeamEventMatches(teamNumber,eventKey)[n]['comp_level'] == 'qm':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of qualification matches that the team played in
def getQualMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qm':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of quarterfinal matches that the team played in
def getTeamQFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'qf':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of quarterfinal matches that the team played in
def getQFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qf':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of semifinal matches that the team played in
def getTeamSFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'sf':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of semifinal matches that the team played in
def getSFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'sf':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of finals matches that the team played in
def getTeamFMatches(teamNumber, eventKey):
matches = []
for n in range(0,len(blueapi.getTeamEventMatches(eventKey))):
if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'f':
matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
#returns a list of qualification matches that the team played in
def getFMatches(eventKey):
matches = []
for n in range(0,len(blueapi.getEventMatches(eventKey))):
if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'f':
matches.append(blueapi.getEventMatches(eventKey)[n]['match_number'])
matches.sort()
return matches
def getMatchRedScore(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['score']
def getMatchBlueScore(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams']
def getMatchRedTeams(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['teams']
def getMatchBlueTeams(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams']
def getMatchVideo(matchNumber,eventKey):
videos = blueapi.getEventMatches(eventKey)[matchNumber]['videos']
for n in range(0,5):
if videos[n]['type'] == 'youtube':
return "youtu.be/" + videos[n]['key']
elif videos[n]['type'] == 'tba':
return videos[n]['key']
def getSetNumber(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['set_number']
def getTimeString(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['time_string']
def getMatchKey(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['key']
def getMatchTime(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['time']
def getScoreBreakdown(matchNumber,eventKey):
return blueapi.getEventMatches(eventKey)[matchNumber]['score_breakdown']
| jacobbendicksen/BlueAPI | matchinfo.py | Python | mit | 4,831 |
"""
WSGI config for gevsckio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gevsckio.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| arthuralvim/django-gevsckio-example | gevsckio/wsgi.py | Python | mit | 391 |
# -*- coding: utf-8 -*-
u'''\
:mod:`ecoxipy.pyxom` - Pythonic XML Object Model (PyXOM)
========================================================
This module implements the *Pythonic XML Object Model* (PyXOM) for the
representation of XML structures. To conveniently create PyXOM data structures
use :mod:`ecoxipy.pyxom.output`, for indexing use
:mod:`ecoxipy.pyxom.indexing` (if :attr:`Document.element_by_id` and
:attr:`Document.elements_by_name` are not enough for you).
.. _ecoxipy.pyxom.examples:
Examples
--------
XML Creation
^^^^^^^^^^^^
If you use the constructors be sure to supply the right data types, otherwise
use the :meth:`create` methods or use :class:`ecoxipy.MarkupBuilder`, which
take care of conversion.
>>> from ecoxipy import MarkupBuilder
>>> b = MarkupBuilder()
>>> document = Document.create(
... b.article(
... b.h1(
... b & '<Example>',
... data='to quote: <&>"\\''
... ),
... b.p(
... {'umlaut-attribute': u'äöüß'},
... 'Hello', Element.create('em', ' World',
... attributes={'count':1}), '!'
... ),
... None,
... b.div(
... Element.create('data-element', Text.create(u'äöüß <&>')),
... b(
... '<p attr="value">raw content</p>Some Text',
... b.br,
... (i for i in range(3))
... ),
... (i for i in range(3, 6))
... ),
... Comment.create('<This is a comment!>'),
... ProcessingInstruction.create('pi-target', '<PI content>'),
... ProcessingInstruction.create('pi-without-content'),
... b['foo:somexml'](
... b['foo:somexml']({'foo:bar': 1, 't:test': 2}),
... b['somexml']({'xmlns': ''}),
... b['bar:somexml'],
... {'xmlns:foo': 'foo://bar', 'xmlns:t': '',
... 'foo:bar': 'Hello', 'id': 'foo'}
... ),
... {'xmlns': 'http://www.w3.org/1999/xhtml/'}
... ), doctype_name='article', omit_xml_declaration=True
... )
Enforcing Well-Formedness
^^^^^^^^^^^^^^^^^^^^^^^^^
Using the :meth:`create` methods or passing the parameter
``check_well_formedness`` as :const:`True` to the appropriate constructors
enforces that the element, attribute and document type names are valid XML
names, and that processing instruction target and content as well as comment
contents conform to their constraints:
>>> from ecoxipy import XMLWellFormednessException
>>> def catch_not_well_formed(cls, *args, **kargs):
... try:
... return cls.create(*args, **kargs)
... except XMLWellFormednessException as e:
... print(e)
>>> t = catch_not_well_formed(Document, [], doctype_name='1nvalid-xml-name')
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_publicid='"')
The value "\\"" is not a valid document type public ID.
>>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_systemid='"\\'')
The value "\\"'" is not a valid document type system ID.
>>> t = catch_not_well_formed(Element, '1nvalid-xml-name', [], {})
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(Element, 't', [], attributes={'1nvalid-xml-name': 'content'})
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(ProcessingInstruction, '1nvalid-xml-name')
The value "1nvalid-xml-name" is not a valid XML processing instruction target.
>>> t = catch_not_well_formed(ProcessingInstruction, 'target', 'invalid PI content ?>')
The value "invalid PI content ?>" is not a valid XML processing instruction content because it contains "?>".
>>> t = catch_not_well_formed(Comment, 'invalid XML comment --')
The value "invalid XML comment --" is not a valid XML comment because it contains "--".
Navigation
^^^^^^^^^^
Use list semantics to retrieve child nodes and attribute access to retrieve
node information:
>>> print(document.doctype.name)
article
>>> print(document[0].name)
article
>>> print(document[0].attributes['xmlns'].value)
http://www.w3.org/1999/xhtml/
>>> print(document[0][-3].target)
pi-target
>>> document[0][1].parent is document[0]
True
>>> document[0][0] is document[0][1].previous and document[0][1].next is document[0][2]
True
>>> document.parent is None and document[0].previous is None and document[0].next is None
True
>>> document[0].attributes.parent is document[0]
True
You can retrieve iterators for navigation through the tree:
>>> list(document[0][0].ancestors)
[ecoxipy.pyxom.Element['article', {...}], ecoxipy.pyxom.Document[ecoxipy.pyxom.DocumentType('article', None, None), True, 'UTF-8']]
>>> list(document[0][1].children())
[ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!')]
>>> list(document[0][2].descendants())
[ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5')]
>>> list(document[0][-2].preceding_siblings)
[ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]]
>>> list(document[0][2][-1].preceding)
[ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]]
>>> list(document[0][0].following_siblings)
[ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]]
>>> list(document[0][1][0].following)
[ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]]
Descendants and children can also be retrieved in reverse document order:
>>> list(document[0][1].children(True)) == list(reversed(list(document[0][1].children())))
True
>>> list(document[0][2].descendants(True))
[ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>')]
Normally :meth:`~ContainerNode.descendants` traverses the XML tree depth-first,
but you can also use breadth-first traversal:
>>> list(document[0][2].descendants(depth_first=False))
[ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Text('raw content')]
>>> list(document[0][2].descendants(True, False))
[ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>')]
Normally :meth:`~ContainerNode.descendants` can also be given a depth limit:
>>> list(document[0].descendants(max_depth=2))
[ecoxipy.pyxom.Element['h1', {...}], ecoxipy.pyxom.Text('<Example>'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['somexml', {...}], ecoxipy.pyxom.Element['bar:somexml', {...}]]
>>> list(document[0].descendants(depth_first=False, max_depth=2))
[ecoxipy.pyxom.Element['h1', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Text('<Example>'), ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['somexml', {...}], ecoxipy.pyxom.Element['bar:somexml', {...}]]
Namespaces
""""""""""
PyXOM supports the interpretation of `Namespaces in XML
<http://www.w3.org/TR/REC-xml-names/>`_. Namespace prefix and local names are
calculated from :class:`Element` and :class:`Attribute` names:
>>> document[0].namespace_prefix == None
True
>>> print(document[0].local_name)
article
>>> print(document[0][-1].namespace_prefix)
foo
>>> print(document[0][-1].local_name)
somexml
>>> attr = document[0][-1].attributes['foo:bar']
>>> print(attr.namespace_prefix)
foo
>>> print(attr.local_name)
bar
The namespace URI is available as :attr:`Element.namespace_uri` and
:attr:`Attribute.namespace_uri` (originally defined as
:attr:`NamespaceNameMixin.namespace_uri`), these properties look up the
namespace prefix of the node in the parent elements (this information is
cached, so don't fear multiple retrieval):
>>> xhtml_namespace_uri = u'http://www.w3.org/1999/xhtml/'
>>> document[0][1].namespace_uri == xhtml_namespace_uri
True
>>> document[0][1][1].namespace_uri == xhtml_namespace_uri
True
>>> document[0][-1][0].namespace_uri == u'foo://bar'
True
>>> document[0][-1][0].attributes['foo:bar'].namespace_uri == u'foo://bar'
True
The namespace prefixes active on an element are available as the iterator
:attr:`Element.namespace_prefixes`:
>>> prefixes = sorted(list(document[0][-1][0].namespace_prefixes),
... key=lambda value: '' if value is None else value)
>>> prefixes[0] == None
True
>>> print(u', '.join(prefixes[1:]))
foo, t
>>> document[0][-1][0].get_namespace_uri(u'foo') == u'foo://bar'
True
>>> print(list(document[0].namespace_prefixes))
[None]
>>> document[0].get_namespace_uri(None) == u'http://www.w3.org/1999/xhtml/'
True
If an element or attribute is in no namespace, ``namespace_uri`` is
:const:`None`:
>>> document[0][-1][0].attributes['t:test'].namespace_uri == None
True
>>> document[0][-1][1].namespace_uri == None
True
If an undefined namespace prefix is used, the ``namespace_uri`` is
:const:`False`:
>>> document[0][-1][2].namespace_uri == False
True
Indexes
"""""""
On :class:`Document` instances :class:`ecoxipy.pyxom.indexing.IndexDescriptor`
attributes are defined for fast retrieval (after initially building the
index).
Use :attr:`~Document.element_by_id` to get elements by the value of their
``id`` attribute:
>>> document.element_by_id['foo'] is document[0][-1]
True
>>> 'bar' in document.element_by_id
False
:attr:`~Document.elements_by_name` allows retrieval of elements by their name:
>>> document[0][-1] in list(document.elements_by_name['foo:somexml'])
True
>>> 'html' in document.elements_by_name
False
Retrieve elements and attributes by their namespace data by using
:attr:`~Document.nodes_by_namespace`:
>>> from functools import reduce
>>> elements_and_attributes = set(
... filter(lambda node: isinstance(node, Element),
... document.descendants()
... )
... ).union(
... reduce(lambda x, y: x.union(y),
... map(lambda node: set(node.attributes.values()),
... filter(lambda node: isinstance(node, Element),
... document.descendants()
... )
... )
... )
... )
>>> set(document.nodes_by_namespace()) == set(filter(
... lambda node: node.namespace_uri is not False,
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace('foo://bar')) == set(filter(
... lambda node: node.namespace_uri == u'foo://bar',
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace(local_name='bar')) == set(filter(
... lambda node: node.local_name == u'bar',
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace('foo://bar', 'bar')) == set(filter(
... lambda node: node.namespace_uri == u'foo://bar' and node.local_name == u'bar',
... elements_and_attributes
... ))
True
Manipulation and Equality
^^^^^^^^^^^^^^^^^^^^^^^^^
All :class:`XMLNode` instances have attributes which allow for modification.
:class:`Document` and :class:`Element` instances also allow modification of
their contents like sequences.
Duplication and Comparisons
"""""""""""""""""""""""""""
Use :meth:`XMLNode.duplicate` to create a deep copy of a XML node:
>>> document_copy = document.duplicate()
>>> document is document_copy
False
Equality and inequality recursively compare XML nodes:
>>> document == document_copy
True
>>> document != document_copy
False
Attributes
""""""""""
The attributes of an :class:`Element` instance are available as
:attr:`Element.attributes`. This is an :class:`Attributes` instance which
contains :class:`Attribute` instances:
>>> document_copy[0][0].attributes['data']
ecoxipy.pyxom.Attribute('data', 'to quote: <&>"\\'')
>>> old_data = document_copy[0][0].attributes['data'].value
>>> document_copy[0][0].attributes['data'].value = 'foo bar'
>>> document_copy[0][0].attributes['data'].value == u'foo bar'
True
>>> 'data' in document_copy[0][0].attributes
True
>>> document == document_copy
False
>>> document != document_copy
True
>>> document_copy[0][0].attributes['data'].value = old_data
>>> document == document_copy
True
>>> document != document_copy
False
:class:`Attributes` instances allow for creation of :class:`Attribute`
instances:
>>> somexml = document_copy[0][-1]
>>> foo_attr = somexml[0].attributes.create_attribute('foo:foo', 'bar')
>>> foo_attr is somexml[0].attributes['foo:foo']
True
>>> foo_attr == somexml[0].attributes['foo:foo']
True
>>> foo_attr != somexml[0].attributes['foo:foo']
False
>>> 'foo:foo' in somexml[0].attributes
True
>>> foo_attr.namespace_uri == u'foo://bar'
True
Attributes may be removed:
>>> somexml[0].attributes.remove(foo_attr)
>>> 'foo:foo' in somexml[0].attributes
False
>>> foo_attr.parent == None
True
>>> foo_attr.namespace_uri == False
True
You can also add an attribute to an element's attributes, it is automatically
moved if it belongs to another element's attributes:
>>> somexml[0].attributes.add(foo_attr)
>>> 'foo:foo' in somexml[0].attributes
True
>>> foo_attr.parent == somexml[0].attributes
True
>>> foo_attr.parent != somexml[0].attributes
False
>>> foo_attr.namespace_uri == u'foo://bar'
True
>>> del somexml[0].attributes['foo:foo']
>>> 'foo:foo' in somexml[0].attributes
False
>>> attr = document[0][-1].attributes['foo:bar']
>>> attr.name = 'test'
>>> attr.namespace_prefix is None
True
>>> print(attr.local_name)
test
Documents and Elements
""""""""""""""""""""""
>>> document_copy[0].insert(1, document_copy[0][0])
>>> document_copy[0][0] == document[0][1]
True
>>> document_copy[0][0] != document[0][1]
False
>>> document_copy[0][1] == document[0][0]
True
>>> document_copy[0][1] != document[0][0]
False
>>> p_element = document_copy[0][0]
>>> document_copy[0].remove(p_element)
>>> document_copy[0][0].name == u'h1' and p_element.parent is None
True
>>> p_element in document_copy[0]
False
>>> p_element.namespace_uri == False
True
>>> document_copy[0][0].append(p_element)
>>> document_copy[0][0][-1] is p_element
True
>>> p_element in document_copy[0][0]
True
>>> p_element.namespace_uri == u'http://www.w3.org/1999/xhtml/'
True
>>> p_element in document[0]
False
>>> document[0][1] in document_copy[0][0]
False
>>> document[0][1] is document_copy[0][0][-1]
False
>>> document[0][1] == document_copy[0][0][-1]
True
>>> document[0][1] != document_copy[0][0][-1]
False
>>> document[0][-1].name = 'foo'
>>> document[0][-1].namespace_prefix is None
True
>>> print(document[0][-1].local_name)
foo
Indexes and Manipulation
""""""""""""""""""""""""
If a document is modified, the indexes should be deleted. This can be done
using :func:`del` on the index attribute or calling
:meth:`~Document.delete_indexes`.
>>> del document_copy[0][-1]
>>> document_copy.delete_indexes()
>>> 'foo' in document_copy.element_by_id
False
>>> 'foo:somexml' in document_copy.elements_by_name
False
XML Serialization
^^^^^^^^^^^^^^^^^
First we remove embedded non-HTML XML, as there are multiple attributes on the
element and the order they are rendered in is indeterministic, which makes it
hard to compare:
>>> del document[0][-1]
Getting the Unicode value of an document yields the XML document serialized as
an Unicode string:
>>> document_string = u"""<!DOCTYPE article><article xmlns="http://www.w3.org/1999/xhtml/"><h1 data="to quote: <&>"'"><Example></h1><p umlaut-attribute="äöüß">Hello<em count="1"> World</em>!</p><div><data-element>äöüß <&></data-element><p attr="value">raw content</p>Some Text<br/>012345</div><!--<This is a comment!>--><?pi-target <PI content>?><?pi-without-content?></article>"""
>>> import sys
>>> if sys.version_info[0] < 3:
... unicode(document) == document_string
... else:
... str(document) == document_string
True
Getting the :func:`bytes` value of an :class:`Document` creates a byte string
of the serialized XML with the encoding specified on creation of the instance,
it defaults to "UTF-8":
>>> bytes(document) == document_string.encode('UTF-8')
True
:class:`XMLNode` instances can also generate SAX events, see
:meth:`XMLNode.create_sax_events` (note that the default
:class:`xml.sax.ContentHandler` is :class:`xml.sax.saxutils.ContentHandler`,
which does not support comments):
>>> document_string = u"""<?xml version="1.0" encoding="UTF-8"?>\\n<article xmlns="http://www.w3.org/1999/xhtml/"><h1 data="to quote: <&>"'"><Example></h1><p umlaut-attribute="äöüß">Hello<em count="1"> World</em>!</p><div><data-element>äöüß <&></data-element><p attr="value">raw content</p>Some Text<br></br>012345</div><?pi-target <PI content>?><?pi-without-content ?></article>"""
>>> import sys
>>> from io import BytesIO
>>> string_out = BytesIO()
>>> content_handler = document.create_sax_events(out=string_out)
>>> string_out.getvalue() == document_string.encode('UTF-8')
True
>>> string_out.close()
You can also create indented XML when calling the
:meth:`XMLNode.create_sax_events` by supplying the ``indent_incr`` argument:
>>> indented_document_string = u"""\\
... <?xml version="1.0" encoding="UTF-8"?>
... <article xmlns="http://www.w3.org/1999/xhtml/">
... <h1 data="to quote: <&>"'">
... <Example>
... </h1>
... <p umlaut-attribute="äöüß">
... Hello
... <em count="1">
... World
... </em>
... !
... </p>
... <div>
... <data-element>
... äöüß <&>
... </data-element>
... <p attr="value">
... raw content
... </p>
... Some Text
... <br></br>
... 012345
... </div>
... <?pi-target <PI content>?>
... <?pi-without-content ?>
... </article>
... """
>>> string_out = BytesIO()
>>> content_handler = document.create_sax_events(indent_incr=' ', out=string_out)
>>> string_out.getvalue() == indented_document_string.encode('UTF-8')
True
>>> string_out.close()
Classes
-------
Document
^^^^^^^^
.. autoclass:: Document
.. autoclass:: DocumentType
Element
^^^^^^^
.. autoclass:: Element
.. autoclass:: Attribute
.. autoclass:: Attributes
Other Nodes
^^^^^^^^^^^
.. autoclass:: Text
.. autoclass:: Comment
.. autoclass:: ProcessingInstruction
Base Classes
^^^^^^^^^^^^
.. autoclass:: XMLNode
.. autoclass:: ContainerNode
.. autoclass:: ContentNode
.. autoclass:: NamespaceNameMixin
'''
from ._common import XMLNode, ContainerNode
from ._attributes import NamespaceNameMixin, Attribute, Attributes
from ._document import DocumentType, Document
from ._element import Element
from ._content_nodes import ContentNode, Text, Comment, ProcessingInstruction
| IvIePhisto/ECoXiPy | ecoxipy/pyxom/__init__.py | Python | mit | 21,834 |
from abc import abstractmethod
from threading import Timer
from ctx.uncertainty.measurers import clear_dobson_paddy
class Event:
def __init__(self, type, **kwargs):
self.type = type
self.properties = kwargs
class Observer:
def update(self):
raise NotImplementedError("Not implemented")
class Observable:
def __init__(self):
self._observers = []
def register(self, observer):
self._observers.append(observer)
def notify(self, event):
event.source = self
for observer in self._observers:
observer.update(event)
class Widget(Observable, Observer):
@abstractmethod
def update(self, event):
pass
def __init__(self, type, status_name, *generators):
super(Widget, self).__init__()
self.type = type
self.generators = generators
self.status = None
self.status_name = status_name
for generator in generators:
generator.register(self)
def get_property(self, type):
for generator in self.generators:
if generator.type == type:
return generator.property
class Generator(Observable):
def __init__(self, type, relevance, threshold, certainty_measurer=clear_dobson_paddy):
super().__init__()
self.certainty_measurer = certainty_measurer
self.property = None
self.type = type
self.relevance = relevance
self.threshold = threshold
def generate(self):
# generate a dict, e.g.: {"value": 12, "certainty" : 0.9}
raise NotImplementedError("Not implemented")
def has_acceptable_certainty(self, new_property):
certainty_level = self.certainty_measurer(self.relevance, new_property['accuracy'])
is_acceptable = certainty_level > self.threshold
return is_acceptable
def start(self, delay=5):
new_property = self.generate()
if new_property['value'] != self.property and self.has_acceptable_certainty(new_property):
self.property = new_property['value']
event = Event(self.type, property=new_property['value'])
super().notify(event)
timer_task = Timer(delay, lambda: self.start(delay), ())
timer_task.start()
| fmca/ctxpy | ctx/toolkit.py | Python | mit | 2,280 |
Subsets and Splits