text
stringlengths 4
1.02M
| meta
dict |
---|---|
from . import VecEnvWrapper
from baselines.common.running_mean_std import RunningMeanStd
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, eval=False):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.eval = eval
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
if not self.eval :
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
if not self.eval :
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset()
return self._obfilt(obs)
def save(self,loc):
s = {}
if self.ret_rms :
s['ret_rms'] = self.ret_rms
if self.ob_rms :
s['ob_rms'] = self.ob_rms
import pickle
with open(loc+'.env_stat.pkl', 'wb') as f :
pickle.dump(s,f)
def load(self,loc):
import pickle
with open(loc+'.env_stat.pkl', 'rb') as f :
s = pickle.load(f)
if self.ret_rms :
self.ret_rms = s['ret_rms']
if self.ob_rms :
self.ob_rms = s['ob_rms']
| {
"content_hash": "f457987fa124ece910dc82c7b2acde4c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 120,
"avg_line_length": 31.691176470588236,
"alnum_prop": 0.5591647331786543,
"repo_name": "dsbrown1331/CoRL2019-DREX",
"id": "50ef59e14360d959e5c81bc0818e10f5fd4c8db1",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drex-mujoco/learner/baselines/baselines/common/vec_env/vec_normalize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "591968"
},
{
"name": "Jupyter Notebook",
"bytes": "1160596"
},
{
"name": "Python",
"bytes": "1438389"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys, os
import logging
from ginga import AstroImage, colors
import ginga.toolkit as ginga_toolkit
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.util.toolbox import ModeIndicator
from ginga.misc import log
class FitsViewer(object):
def __init__(self, logger):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
from ginga.gw import Widgets, Viewers
self.app = Widgets.Application()
self.app.add_callback('shutdown', self.quit)
self.top = self.app.window("Ginga example2")
self.top.add_callback('closed', self.closed)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
hbox = Widgets.HBox()
hbox.set_border_width(2)
hbox.set_spacing(4)
v1 = Viewers.CanvasView(logger)
v1.enable_autocuts('on')
v1.set_autocut_params('zscale')
v1.enable_autozoom('on')
v1.set_zoom_algorithm('rate')
v1.set_zoomrate(1.4)
v1.show_pan_mark(True)
v1.set_callback('drag-drop', self.drop_file)
v1.set_callback('none-move', self.motion)
v1.set_bg(0.2, 0.2, 0.2)
v1.ui_setActive(True)
self.viewer1 = v1
self._mi1 = ModeIndicator(v1)
bd = v1.get_bindings()
bd.enable_all(True)
# shared canvas between the two viewers
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
self.canvas = canvas
# Tell viewer1 to use this canvas
v1.set_canvas(canvas)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
hbox.add_widget(v1, stretch=1)
# Add a second viewer viewing the same canvas
v2 = Viewers.CanvasView(logger)
v2.enable_autocuts('on')
v2.set_autocut_params('zscale')
v2.enable_autozoom('on')
v2.set_zoom_algorithm('rate')
v2.set_zoomrate(1.4)
v2.show_pan_mark(True)
v2.set_callback('drag-drop', self.drop_file)
v2.set_callback('none-move', self.motion)
v2.set_bg(0.2, 0.2, 0.2)
v2.ui_setActive(True)
self.viewer2 = v2
self._mi2 = ModeIndicator(v2)
# Tell viewer2 to use this same canvas
v2.set_canvas(canvas)
bd = v2.get_bindings()
bd.enable_all(True)
hbox.add_widget(v2, stretch=1)
vbox.add_widget(hbox, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.set_border_width(2)
wdrawtype = Widgets.ComboBox()
for name in self.drawtypes:
wdrawtype.append_text(name)
index = self.drawtypes.index('rectangle')
wdrawtype.set_index(index)
wdrawtype.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawtype = wdrawtype
wdrawcolor = Widgets.ComboBox()
for name in self.drawcolors:
wdrawcolor.append_text(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.set_index(index)
wdrawcolor.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawcolor = wdrawcolor
wfill = Widgets.CheckBox("Fill")
wfill.add_callback('activated', lambda w, tf: self.set_drawparams())
self.wfill = wfill
walpha = Widgets.SpinBox(dtype=float)
walpha.set_limits(0.0, 1.0, incr_value=0.1)
walpha.set_value(1.0)
walpha.set_decimals(2)
walpha.add_callback('value-changed', lambda w, val: self.set_drawparams())
self.walpha = walpha
wclear = Widgets.Button("Clear Canvas")
wclear.add_callback('activated', lambda w: self.clear_canvas())
wopen = Widgets.Button("Open File")
wopen.add_callback('activated', lambda w: self.open_file())
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', lambda w: self.quit())
hbox.add_widget(Widgets.Label(''), stretch=1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
Widgets.Label('Alpha:'), walpha, wclear, wquit):
hbox.add_widget(w, stretch=0)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
def set_drawparams(self):
index = self.wdrawtype.get_index()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_index()
fill = self.wfill.get_state()
alpha = self.walpha.get_value()
coord = 'data'
params = { 'color': self.drawcolors[index],
'alpha': alpha,
'coord': coord,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def load_file(self, viewer, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
viewer.set_image(image)
self.top.set_title(filepath)
def open_file(self):
res = Widgets.FileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(self.viewer1, fileName)
def drop_file(self, viewer, paths):
fileName = paths[0]
#print(fileName)
self.load_file(viewer, fileName)
def motion(self, viewer, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = viewer.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def closed(self, w):
self.logger.info("Top window closed.")
self.top = None
sys.exit()
def quit(self, *args):
self.logger.info("Attempting to shut down the application...")
if not self.top is None:
self.top.close()
sys.exit()
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
viewer = FitsViewer(logger)
viewer.top.resize(700, 540)
if len(args) > 0:
viewer.load_file(viewer.viewer1, args[0])
viewer.top.show()
viewer.top.raise_()
try:
viewer.app.mainloop()
except KeyboardInterrupt:
print("Terminating viewer...")
if viewer.top is not None:
viewer.top.close()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| {
"content_hash": "b0b31ecd50c09875a389e3589fd87760",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 82,
"avg_line_length": 31.929765886287626,
"alnum_prop": 0.572745365036137,
"repo_name": "eteq/ginga",
"id": "bd233016416cb2689697ab9bb4f93ff443258d41",
"size": "9682",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "ginga/examples/gw/shared_canvas.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2725923"
}
],
"symlink_target": ""
} |
import multiprocessing
from subprocess import check_call
import sys
from helpers import LuigiTestCase
import mock
from psutil import Process
from time import sleep
import luigi
import luigi.date_interval
import luigi.notifications
from luigi.worker import TaskException, TaskProcess
from luigi.scheduler import DONE, FAILED
luigi.notifications.DEBUG = True
class WorkerTaskTest(LuigiTestCase):
def test_constructor(self):
class MyTask(luigi.Task):
# Test overriding the constructor without calling the superconstructor
# This is a simple mistake but caused an error that was very hard to understand
def __init__(self):
pass
def f():
luigi.build([MyTask()], local_scheduler=True)
self.assertRaises(TaskException, f)
def test_run_none(self):
def f():
luigi.build([None], local_scheduler=True)
self.assertRaises(TaskException, f)
class TaskProcessTest(LuigiTestCase):
def test_update_result_queue_on_success(self):
# IMO this test makes no sense as it tests internal behavior and have
# already broken once during internal non-changing refactoring
class SuccessTask(luigi.Task):
def on_success(self):
return "test success expl"
task = SuccessTask()
result_queue = multiprocessing.Queue()
task_process = TaskProcess(task, 1, result_queue)
with mock.patch.object(result_queue, 'put') as mock_put:
task_process.run()
mock_put.assert_called_once_with((task.task_id, DONE, "test success expl", [], None))
def test_update_result_queue_on_failure(self):
# IMO this test makes no sense as it tests internal behavior and have
# already broken once during internal non-changing refactoring
class FailTask(luigi.Task):
def run(self):
raise BaseException("Uh oh.")
def on_failure(self, exception):
return "test failure expl"
task = FailTask()
result_queue = multiprocessing.Queue()
task_process = TaskProcess(task, 1, result_queue)
with mock.patch.object(result_queue, 'put') as mock_put:
task_process.run()
mock_put.assert_called_once_with((task.task_id, FAILED, "test failure expl", [], []))
def test_cleanup_children_on_terminate(self):
"""
Subprocesses spawned by tasks should be terminated on terminate
"""
class HangingSubprocessTask(luigi.Task):
def run(self):
python = sys.executable
check_call([python, '-c', 'while True: pass'])
task = HangingSubprocessTask()
queue = mock.Mock()
worker_id = 1
task_process = TaskProcess(task, worker_id, queue)
task_process.start()
parent = Process(task_process.pid)
while not parent.children():
# wait for child process to startup
sleep(0.01)
[child] = parent.children()
task_process.terminate()
child.wait(timeout=1.0) # wait for terminate to complete
self.assertFalse(parent.is_running())
self.assertFalse(child.is_running())
| {
"content_hash": "3ac854ce4bd1d46252c2207c424f952e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 97,
"avg_line_length": 32.59,
"alnum_prop": 0.6333231052470083,
"repo_name": "humanlongevity/luigi",
"id": "02c9627f3279c0a842d8489aa487f6c7c39824f3",
"size": "3862",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/worker_task_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "34521"
},
{
"name": "JavaScript",
"bytes": "82786"
},
{
"name": "Python",
"bytes": "1355595"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
} |
"""utils for ipython notebooks"""
from __future__ import division
import os, json
from ut.pstr.to import file as str_to_file
import re
from ut.pfile.iter import recursive_file_walk_iterator_with_filepath_filter
default_link_root = 'http://localhost:8888/notebooks/'
def _link_root_from_port(port):
return 'http://localhost:{}/notebooks/'.format(port)
def _mk_link_root(x):
if isinstance(x, int):
return _link_root_from_port(x)
elif x.startswith('http'):
return x
else:
return 'http://' + x
def max_common_prefix(a):
"""
Given a list of strings, returns the longest common prefix
:param a: list-like of strings
:return: the smallest common prefix of all strings in a
"""
if not a:
return ''
# Note: Try to optimize by using a min_max function to give me both in one pass. The current version is still faster
s1 = min(a)
s2 = max(a)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def all_table_of_contents_html_from_notebooks(
notebooks,
title=None,
link_root=default_link_root,
recursive: bool = False,
save_to_file='table_of_contents.html',
):
"""
Make an html page containing the table of contents of the listed notebooks, with
links that will open the notebook and bring you to that section.
Just wow.
:param notebooks: List of notebook filepaths, or folder that contains notebooks.
:param title: Title of html page
:param link_root: Root url to use for links
:param recursive: Whether to explore subfolders recursively
:param save_to_file: File where the html should be saved.
:return:
"""
folder = None
if isinstance(notebooks, str) and os.path.isdir(notebooks):
folder = os.path.abspath(os.path.expanduser(notebooks))
notebooks = ipynb_filepath_list(folder, recursive=recursive)
title = title or folder
s = '<b>{}</b><br><br>\n\n'.format(title)
elif title:
s = '<b>{}</b><br><br>\n\n'.format(title)
else:
s = '' ''
for f in notebooks:
if folder is not None:
_link_root = os.path.join(
link_root, os.path.dirname(f[(len(folder) + 1) :])
)
else:
_link_root = link_root
ss = table_of_contents_html_from_notebook(f, link_root=_link_root)
if ss is not None:
s += ss + '<br>\n\n'
if save_to_file is None:
return s
else:
if not isinstance(save_to_file, str):
save_to_file = 'table_of_contents.html'
str_to_file(s, save_to_file)
def _mk_link_html(filename, link_root=default_link_root):
link_root = _mk_link_root(link_root)
url = os.path.join(link_root, filename)
if filename.endswith('.ipynb'):
filename = filename[: -len('.ipynb')]
return '<b><a href="{}">{}</a></b>'.format(url, filename)
def _append_link_root_to_all_pound_hrefs(html, link_root=default_link_root):
return re.sub(r'href="(#[^"]+)"', r'href="{}\1"'.format(link_root), html)
def table_of_contents_html_from_notebook(ipynb_filepath, link_root=default_link_root):
filename = os.path.basename(ipynb_filepath)
d = json.load(open(ipynb_filepath)).get('cells', None)
if (
d is not None
and isinstance(d, list)
and len(d) > 0
and 'source' in d[0]
and len(d[0]['source']) >= 2
):
if d[0]['source'][0] == '# Table of Contents\n':
link_root = _mk_link_root(link_root)
link_root_for_file = os.path.join(link_root, filename)
return (
_mk_link_html(filename, link_root_for_file)
+ '\n\n'
+ _append_link_root_to_all_pound_hrefs(
d[0]['source'][1], link_root_for_file
)
)
def ipynb_filepath_list(root_folder='.', recursive=False):
root_folder = os.path.expanduser(root_folder)
if recursive:
return recursive_file_walk_iterator_with_filepath_filter(
root_folder, filt=lambda x: x.endswith('.ipynb')
)
else:
return map(
lambda x: os.path.abspath(os.path.join(root_folder, x)),
filter(lambda x: x.endswith('.ipynb'), os.listdir(root_folder)),
)
if __name__ == '__main__':
import argh
argh.dispatch_command(all_table_of_contents_html_from_notebooks)
# parser = argh.ArghParser()
# parser.add_commands([all_table_of_contents_html_from_notebooks,
# table_of_contents_html_from_notebook,
# ipynb_filepath_list])
# parser.dispatch()
| {
"content_hash": "a22f764df8f1a2e70fd8e9bbdf533938",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 120,
"avg_line_length": 31.816326530612244,
"alnum_prop": 0.5950395552704725,
"repo_name": "thorwhalen/ut",
"id": "68ec1edb005b2545b6253d8bb2e71604e1548958",
"size": "4677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/util/ipython.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
movie = Table('movie', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('name', VARCHAR(length=64)),
Column('avguserscore', VARCHAR(length=10)),
Column('cast', VARCHAR(length=300)),
Column('genre', VARCHAR(length=64)),
Column('rating', VARCHAR(length=10)),
Column('rlsdate', VARCHAR(length=10)),
Column('runtime', VARCHAR(length=10)),
Column('score', VARCHAR(length=10)),
Column('summary', VARCHAR(length=300)),
Column('url', VARCHAR(length=500)),
Column('api_rate', VARCHAR(length=300)),
Column('api_review', VARCHAR(length=300)),
Column('api_type', VARCHAR(length=30)),
)
movie = Table('movie', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('api_collection', String(length=30)),
Column('api_review', String(length=300)),
Column('api_rate', String(length=300)),
Column('name', String(length=64)),
Column('url', String(length=500)),
Column('rlsdate', String(length=10)),
Column('score', String(length=10)),
Column('summary', String(length=300)),
Column('rating', String(length=10)),
Column('cast', String(length=300)),
Column('genre', String(length=64)),
Column('avguserscore', String(length=10)),
Column('runtime', String(length=10)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['movie'].columns['api_type'].drop()
post_meta.tables['movie'].columns['api_collection'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['movie'].columns['api_type'].create()
post_meta.tables['movie'].columns['api_collection'].drop()
| {
"content_hash": "781df1ac45ff977618c700b1ea52cad0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 68,
"avg_line_length": 35.89473684210526,
"alnum_prop": 0.66911045943304,
"repo_name": "fixpy/watched_movies",
"id": "e72ede03c0c6cfc723eb68043d20b54fe59d1500",
"size": "2046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_repository/versions/009_migration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2544"
},
{
"name": "HTML",
"bytes": "11619"
},
{
"name": "JavaScript",
"bytes": "29094"
},
{
"name": "Python",
"bytes": "36751"
},
{
"name": "Shell",
"bytes": "537"
}
],
"symlink_target": ""
} |
import angr
######################################
# openlog
######################################
class closelog(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, ident, option, facility):
# A stub for closelog that does not do anything yet.
return
| {
"content_hash": "72f9aa56909c64209d1a013a26217e81",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.5050847457627119,
"repo_name": "iamahuman/angr",
"id": "80e0458d13c8d29eeae0e46c669e7507bfdaf1ff",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/procedures/libc/closelog.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39420"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "620"
},
{
"name": "Python",
"bytes": "4842037"
}
],
"symlink_target": ""
} |
from flexget import options
from flexget.event import event
from flexget.manager import Session
from flexget.terminal import TerminalTable, console, table_parser
from . import db
def do_cli(manager, options):
if options.action == 'clear':
num = db.clear_entries(options.task, all=True)
console('%s entries cleared from backlog.' % num)
else:
header = ['Title', 'Task', 'Expires']
table_data = []
with Session() as session:
entries = db.get_entries(options.task, session=session)
for entry in entries:
table_data.append(
[entry.title, entry.task, entry.expire.strftime('%Y-%m-%d %H:%M')]
)
table = TerminalTable(*header, table_type=options.table_type)
for row in table_data:
table.add_row(*row)
console(table)
@event('options.register')
def register_options():
parser = options.register_command(
'backlog', do_cli, help='View or clear entries from backlog plugin', parents=[table_parser]
)
parser.add_argument(
'action',
choices=['list', 'clear'],
help='Choose to show items in backlog, or clear all of them',
)
parser.add_argument('task', nargs='?', help='Limit to specific task (if supplied)')
| {
"content_hash": "97f28bbe3cce3b177c1d871df9db16d3",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 99,
"avg_line_length": 34.526315789473685,
"alnum_prop": 0.6158536585365854,
"repo_name": "Flexget/Flexget",
"id": "5ccf3f272833ea8c8bc51499f7670d8272804b62",
"size": "1312",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/components/backlog/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1237"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3797883"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1568"
}
],
"symlink_target": ""
} |
from app.airport.sale_airports_parser import build_airports_list
from app.common.http_methods_unittests import get_request
from app.common.target_urls import SALE_1_AIRPORT_TEST_URL, SALE_2_AIRPORTS_TEST_URL
import unittest
class TestParserStaff(unittest.TestCase):
def test_sale_1_airport(self):
html_page = get_request(SALE_1_AIRPORT_TEST_URL)
airports_list = build_airports_list(html_page)
self.assertEqual(1, len(airports_list))
first_airport = airports_list[0]
self.assertEqual(first_airport['airport_id'], 128773)
self.assertEqual(first_airport['cash'], 0)
self.assertEqual(first_airport['capacity'], 27)
self.assertEqual(first_airport['reputation'], 188124)
self.assertEqual(first_airport['price'], 724145248)
self.assertEqual(first_airport['vendor'], "Max333")
def test_sale_2_airports(self):
html_page = get_request(SALE_2_AIRPORTS_TEST_URL)
airports_list = build_airports_list(html_page)
self.assertEqual(2, len(airports_list))
first_airport = airports_list[0]
self.assertEqual(first_airport['airport_id'], 106982)
self.assertEqual(first_airport['cash'], 1092038031)
self.assertEqual(first_airport['capacity'], 41)
self.assertEqual(first_airport['reputation'], 1784487)
self.assertEqual(first_airport['price'], 8332773945)
self.assertEqual(first_airport['vendor'], "Mix456")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d9c8bfde08cc07b89cec46e948639ffa",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 40.75675675675676,
"alnum_prop": 0.6836870026525199,
"repo_name": "egenerat/flight-manager",
"id": "d4334a6370be27807a2db6ade7c98825421d0104",
"size": "1532",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "app/test/airport/test_sale_airport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37815"
},
{
"name": "Dockerfile",
"bytes": "100"
},
{
"name": "HTML",
"bytes": "89501"
},
{
"name": "JavaScript",
"bytes": "94035"
},
{
"name": "Python",
"bytes": "4946659"
},
{
"name": "Shell",
"bytes": "930"
}
],
"symlink_target": ""
} |
def getcallargs(func, *args, **kwds):
'''Get the actual value bounded to each formal parameter when calling
`func(*args,**kwds)`.
It works for methods too (bounded, unbounded, staticmethods, classmethods).
@returns: `(bindings, missing_args)`, where:
- `bindings` is a mapping of every formal parameter (including *varargs
and **kwargs if present) of the function to the respective bounded value.
- `missing_args` is a tuple of the formal parameters whose value was not
provided (i.e. using the respective default value)
Examples::
>>> def func(a, b='foo', c=None, *x, **y):
... pass
>>> getcallargs(func, 5)
({'a': 5, 'y': {}, 'c': None, 'b': 'foo', 'x': ()}, ('b', 'c'))
>>> getcallargs(func, 5, 'foo')
({'a': 5, 'y': {}, 'c': None, 'b': 'foo', 'x': ()}, ('c',))
>>> getcallargs(func, 5, c=['a', 'b'])
({'a': 5, 'y': {}, 'c': ['a', 'b'], 'b': 'foo', 'x': ()}, ('b',))
>>> getcallargs(func, 5, 6, 7, 8)
({'a': 5, 'y': {}, 'c': 7, 'b': 6, 'x': (8,)}, ())
>>> getcallargs(func, 5, z=3, b=2)
({'a': 5, 'y': {'z': 3}, 'c': None, 'b': 2, 'x': ()}, ('c',))
'''
arg2value = {}
f_name = func.func_name
spec_args, varargs, varkw, defaults = getargspec(func)
# handle methods
if ismethod(func):
# implicit 'self' (or 'cls' for classmethods) argument: func.im_self
if func.im_self is not None:
arg2value[spec_args.pop(0)] = func.im_self
elif not args or not isinstance(args[0], func.im_class):
got = args and ('%s instance' % type(args[0]).__name__) or 'nothing'
raise TypeError('unbound method %s() must be called with %s instance '
'as first argument (got %s instead)' %
(f_name, func.im_class.__name__, got))
num_args = len(args)
has_kwds = bool(kwds)
num_spec_args = len(spec_args)
num_defaults = len(defaults or ())
# get the expected arguments passed positionally
arg2value.update(izip(spec_args,args))
# get the expected arguments passed by name
for arg in spec_args:
if arg in kwds:
if arg in arg2value:
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name,arg))
else:
arg2value[arg] = kwds.pop(arg)
# fill in any missing values with the defaults
missing = []
if defaults:
for arg,val in izip(spec_args[-num_defaults:],defaults):
if arg not in arg2value:
arg2value[arg] = val
missing.append(arg)
# ensure that all required args have a value
for arg in spec_args:
if arg not in arg2value:
num_required = num_spec_args - num_defaults
raise TypeError('%s() takes at least %d %s argument%s (%d given)'
% (f_name, num_required,
has_kwds and 'non-keyword ' or '',
num_required>1 and 's' or '', num_args))
# handle any remaining named arguments
if varkw:
arg2value[varkw] = kwds
elif kwds:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, iter(kwds).next()))
# handle any remaining positional arguments
if varargs:
if num_args > num_spec_args:
arg2value[varargs] = args[-(num_args-num_spec_args):]
else:
arg2value[varargs] = ()
elif num_spec_args < num_args:
raise TypeError('%s() takes %s %d argument%s (%d given)' %
(f_name, defaults and 'at most' or 'exactly',
num_spec_args, num_spec_args>1 and 's' or '', num_args))
return arg2value, tuple(missing)
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "7f9e2b1eb8f211ee135f8260995ff1b2",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 84,
"avg_line_length": 43.96666666666667,
"alnum_prop": 0.5241344452868335,
"repo_name": "ActiveState/code",
"id": "7f6a69a0087300bd03c6fe52d86aee0a346352ef",
"size": "3957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/551779_Introspecting_call_arguments/recipe-551779.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class AssopyApphook(CMSApp):
name = _("Assopy")
urls = ["assopy.urls"]
class ConferenceApphook(CMSApp):
name = _("Conference")
urls = ["conference.urls"]
class P3Apphook(CMSApp):
name = _("P3")
urls = ["p3.urls"]
class BlogApphook(CMSApp):
name = _("Blog")
urls = ["microblog.urls"]
apphook_pool.register(AssopyApphook)
apphook_pool.register(ConferenceApphook)
apphook_pool.register(P3Apphook)
apphook_pool.register(BlogApphook)
| {
"content_hash": "7208c1ba8300513a76bc42cb63e4216e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 55,
"avg_line_length": 20.724137931034484,
"alnum_prop": 0.7054908485856906,
"repo_name": "barrachri/epcon",
"id": "e6105f44ef483483f8fb36612c3adb87ddb3915a",
"size": "601",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "cms_utils/cms_app.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "1490"
},
{
"name": "CSS",
"bytes": "4751434"
},
{
"name": "HTML",
"bytes": "2177936"
},
{
"name": "JavaScript",
"bytes": "3465605"
},
{
"name": "Makefile",
"bytes": "3338"
},
{
"name": "PHP",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "1255065"
},
{
"name": "Ruby",
"bytes": "1870"
},
{
"name": "Shell",
"bytes": "1679"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
from django.core.management.sql import sql_delete, sql_all
def sql_reset(app, style, connection):
"Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module."
return sql_delete(app, style, connection) + sql_all(app, style, connection)
| {
"content_hash": "9d4ff31a64a8e3f25941fcf2b476b3f1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 92,
"avg_line_length": 54.4,
"alnum_prop": 0.7389705882352942,
"repo_name": "gregmuellegger/django-reset",
"id": "e240c5765633bde1a262a772c590018ad1251143",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_reset/management/sql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5457"
}
],
"symlink_target": ""
} |
'''
ooiservices.config
Configuration Management for OOI Services
'''
import yaml
import pkg_resources
def reload():
__config_file__ = pkg_resources.resource_string(__name__, "config.yml")
__config_dict__ = yaml.load(__config_file__)
globals().update(__config_dict__)
reload() # Initialize the globals on the first load of this module
| {
"content_hash": "fdd12a865ed4da4bc57a098aa6b4c30f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 21.9375,
"alnum_prop": 0.6923076923076923,
"repo_name": "ednad/ooi-ui-services",
"id": "3aa34566bde6e91ea34c2e9c0a2e95cf41e0cd47",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooiservices/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26287"
}
],
"symlink_target": ""
} |
import hashlib
import os.path
from .compat import IO_ERRORS
class FileHashCache:
def __init__(self, initial_data):
self.file_hashes = {}
self.previous_file_hashes = {}
if initial_data:
self.previous_file_hashes = {
os.path.abspath(f): h
for f, h in initial_data.items()
}
def hash_missing_files(self, filenames):
self.file_hashes.update({
filename: _hash_file(filename)
for filename in filenames
if filename not in self.file_hashes
})
def is_identical(self, filename):
if filename not in self.file_hashes:
self.file_hashes[filename] = _hash_file(filename)
old_hash = self.previous_file_hashes.get(filename)
new_hash = self.file_hashes[filename]
return old_hash and new_hash and old_hash == new_hash
def to_json(self):
return {
os.path.relpath(f): h
for f, h in self.file_hashes.items()
}
def _hash_file(filename):
try:
with open(filename, 'rb') as f:
return hashlib \
.new('sha1', f.read()) \
.hexdigest()
except IO_ERRORS:
return None
| {
"content_hash": "7c167a5eb200d013a489b8e96135c0dc",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 61,
"avg_line_length": 25.46938775510204,
"alnum_prop": 0.5512820512820513,
"repo_name": "mhallin/cov-exclude-py",
"id": "232e280ea9d82c9217176bb3d93bd0e6d3945a88",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covexclude/filehashcache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22378"
}
],
"symlink_target": ""
} |
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class StreamErrorProtocolEntity(ProtocolEntity):
TYPE_CONFLICT = "conflict"
'''
<stream:error>
<conflict></conflict>
<text>Replaced by new connection</text>
</stream:error>
'''
TYPE_ACK = "ack"
'''
<stream:error>
<ack></ack>
</stream:error>
'''
TYPE_XML_NOT_WELL_FORMED = "xml-not-well-formed"
'''
<stream:error>
<xml-not-well-formed>
</xml-not-well-formed>
</stream:error>
'''
TYPES = (TYPE_CONFLICT, TYPE_ACK, TYPE_XML_NOT_WELL_FORMED)
def __init__(self, data = None):
super(StreamErrorProtocolEntity, self).__init__("stream:error")
data = data or {}
self.setErrorData(data)
def setErrorData(self, data):
self.data = data
def getErrorData(self):
return self.data
def getErrorType(self):
for k in self.data.keys():
if k in self.__class__.TYPES:
return k
def __str__(self):
out = "Stream Error type: %s\n" % self.getErrorType()
out += "%s" % self.getErrorData()
out += "\n"
return out
def toProtocolTreeNode(self):
node = super(StreamErrorProtocolEntity, self).toProtocolTreeNode()
type = self.getErrorType()
node.addChild(ProtocolTreeNode(type))
if type == self.__class__.TYPE_CONFLICT and "text" in self.data:
node.addChild(ProtocolTreeNode("text", data=self.data["text"]))
return node
@staticmethod
def fromProtocolTreeNode(protocolTreeNode):
data = {}
for child in protocolTreeNode.getAllChildren():
data[child.tag] = child.data
return StreamErrorProtocolEntity(data)
| {
"content_hash": "547e040fb7b22967fde1dce4145d71d6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 27,
"alnum_prop": 0.590347923681257,
"repo_name": "svub/whatsapp-rest-webservice",
"id": "10882e8dd0be231f70b9f41719ef5686e833a0b4",
"size": "1782",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "service/yowsup/yowsup/layers/auth/protocolentities/stream_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "529740"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
from themis.group import Groups
import re, yaml
from themis.utils import is_valid_redis_key, BaseData, Features, ThemisMetaData
from themis.static import POLICY_CUSTOM_CALLBACK, FEATURES_CUSTOM_CALLBACK, \
METADATA_CUSTOM_CALLBACK, DEFAULT_FEATURES_VALUES, RESERVERD_KEYWORDS
class PolicyError(Exception): pass
class Policy(Groups):
POLICY_TYPES = ['bypass', 'regular', 'bypass+']
JAILBY_VALUES = [ 'SASLUsername', 'SenderIP', 'Sender', 'Sender+', 'SenderDomain', 'SenderDomain+' ]
JAILACTION_VALUES = ['block', 'hold', 'monitor']
# TODO: Add params policy_name and policy_namespace
POLICY_PARAMS = ['Source', 'Destination', 'Enable', 'Type', 'Priority', 'JailBy', 'JailSpec', 'JailAction', 'ReplyData', 'OnlyHeaders',
'CountRCPT', 'StopHere', 'RequestsMon', 'SubjectProbation', 'CountSentProbation', 'IpProbation', 'BlockProbation', 'ActionHeaders', 'SPF']
def __init__(self, redis):
super(Policy, self).__init__(redis)
def setpolicy(self, pdata):
try:
if self.getpolicy(pdata.policy_name):
raise PolicyError('Policy "%s" already exists' % pdata.policy_name)
except ValueError, e:
# Policy does not exists
pass
except Exception, e:
raise PolicyError('Error setting policy %s: %s' % (pdata.policy_name, e))
pdata.validate_policy_args(self)
# If it's a pool policy should be listed only in list:policies:pool_name
if pdata.pool_policy:
list_policy_name = ':'.join(('list', 'policies', pdata.pool_name))
else:
list_policy_name = ':'.join(('list', 'policies'))
with self.redis.pipeline() as pipe:
pipe.hmset(pdata.policy_namespace, pdata.as_dict, POLICY_CUSTOM_CALLBACK)
pipe.zadd(list_policy_name, pdata.priority, pdata.policy_namespace)
pipe.execute()
def modifypolicy(self, json_data):
pdata = self.getpolicy(json_data['policy_name'])
pdata.validate_policy_args(self)
try:
for key in json_data:
pdata.__dict__[key.lower()] = json_data[key]
except KeyError, k:
raise PolicyError('Could not find key for record: %s' % k)
# VALIDATE NEW DATA
pdata.do_init()
pdata.validate_policy_args(self)
if pdata.pool_policy:
list_policy_name = ':'.join(('list', 'policies', pdata.pool_name))
else:
list_policy_name = ':'.join(('list', 'policies'))
with self.redis.pipeline() as pipe:
pipe.hmset(pdata.policy_namespace, pdata.as_dict, POLICY_CUSTOM_CALLBACK)
pipe.zadd(list_policy_name, pdata.priority, pdata.policy_namespace)
pipe.execute()
# TODO: Fix
def get_requests(self, target, messagesBySecStoreDays, sleep_time=None):
from datetime import datetime
from time import sleep
namespace = 'requestsbysec:%s' % target
messagesBySecStoreDays = self.redis.hget('config:themis:features:global', 'messagesBySecStoreDays')
now = datetime.now()
now_in_seconds = now.hour * 60 * (int(messagesBySecStoreDays) * 60) + now.second
if not sleep_time:
response = self.redis.hget(namespace, 'second:%s' % now_in_seconds)
if not response:
raise ValueError('Could not find any request: second:%s' % now_in_seconds)
return response
else:
while True:
now = datetime.now()
now_in_seconds = now.hour * 60 * (int(messagesBySecStoreDays) * 60) + now.second
print self.redis.hget(namespace, 'second:%s' % now_in_seconds)
sleep(sleep_time)
def config_features(self, namespace, config_file):
if namespace in RESERVERD_KEYWORDS:
raise ValueError('Reserved word found: %s. Use another name' % ', '.join(RESERVERD_KEYWORDS))
if not config_file:
global_config = DEFAULT_FEATURES_VALUES
else:
with open(config_file) as f:
_, global_config, _ = yaml.load_all(f)
feats = Features(**global_config)
# sanity check for key items in config file
feats.strict_check()
self.redis.hmset('config:themis:features:%s' % namespace, feats.as_dict, FEATURES_CUSTOM_CALLBACK)
def edit_features(self, namespace, feature, value):
try:
feat = Features(**self.redis.hgetall('config:themis:features:%s' % namespace))
feat.strict_check()
except Exception, e:
raise ValueError('Strict check error, inconsistent features. ERROR: %s' % e)
self.redis.hset('config:themis:features:%s' % namespace, feature, value, feat_mapping=True)
def del_features(self, namespace):
self.redis.delete('config:themis:features:%s' % namespace)
def get_features(self, namespace):
if namespace == 'list':
callback = {}
[callback.update({key : str(value)}) for key, value in FEATURES_CUSTOM_CALLBACK.items()]
return callback
return self.redis.hgetall('config:themis:features:%s' % namespace)
def get_metadata(self, target):
if target == 'list':
callback = {}
[callback.update({key : str(value)}) for key, value in METADATA_CUSTOM_CALLBACK.items()]
return callback
return self.redis.hgetall(target)
def edit_metadata(self, target, key, value):
try:
tmetadata = ThemisMetaData(**self.redis.hgetall(target))
tmetadata.strict_check()
except Exception, e:
raise ValueError('Strict check error, inconsistent metadata key. ERROR: %s' % e)
# If get here it is safe to edit
self.redis.hset(target, key, value, feat_mapping=True)
def search_keys(self, target_lookup):
return self.scan(target_lookup) or []
def lookup_delete(self, target_lookup, debug=False):
if re.match(r'^policy.*|^list.*|^group.*|^config.*|^pool.*', target_lookup):
raise ValueError('Cannot delete keys with the starting names: list, group, config and pool.')
scan_result = self.scan(target_lookup)
if not scan_result:
raise ValueError('Could not find any keys to delete')
total = len(scan_result)
for key in scan_result:
if debug:
print 'Deleting key:', key
self.redis.delete(key)
return 'SUCCESS - Deleted %s key(s)' % total
def add_default_metadata(self, target, config_file):
if target in RESERVERD_KEYWORDS:
raise ValueError('Reserved word found: %s. Use another name' % ', '.join(RESERVERD_KEYWORDS))
tmetadata = ThemisMetaData(**ThemisMetaData.METADATA_DEFAULT_VALUES)
if not config_file:
global_config = DEFAULT_FEATURES_VALUES
else:
with open(config_file) as f:
_, global_config, _ = yaml.load_all(f)
tmetadata.update_features(**global_config)
self.redis.hmset(target, tmetadata.as_dict, dict(FEATURES_CUSTOM_CALLBACK.items() + METADATA_CUSTOM_CALLBACK.items()))
def add_actionheaders(self, policy_name, sourcehdr, regexp, actionheaders):
"""
:param sourcehdr: A string with the lookup header
:param regexp: A string with the regexp that will be applied to the sourcehdr if exists
:param action: A tuple of new headers to add in case of a match
"""
pdata = self.getpolicy(policy_name)
try:
# Check if this header is already set
pdata.actionheaders[sourcehdr]
raise ValueError('Source header already exists: %s' % sourcehdr)
except KeyError:
pdata.actionheaders[sourcehdr] = [regexp] + actionheaders
pdata._check_action_headers()
self.redis.hmset(pdata.policy_namespace, pdata.as_dict, POLICY_CUSTOM_CALLBACK)
def modify_actionheaders(self, policy_name, sourcehdr, regexp, actionheaders):
"""
:param sourcehdr: A string with the lookup header
:param regexp: A string with the regexp that will be applied to the sourcehdr if exists
:param action: A tuple of new headers to add in case of a match
"""
pdata = self.getpolicy(policy_name)
try:
# Check if this header is already set
pdata.actionheaders[sourcehdr]
pdata.actionheaders[sourcehdr] = [regexp] + actionheaders
pdata._check_action_headers()
self.redis.hmset(pdata.policy_namespace, pdata.as_dict, POLICY_CUSTOM_CALLBACK)
except KeyError:
raise ValueError('Source Header %s does not exists. Cannot modify a header that does not exists' % sourcehdr)
def del_actionheaders(self, policy_name, sourcehdr, clear=False):
pdata = self.getpolicy(policy_name)
if clear:
pdata.actionheaders = {}
else:
if sourcehdr not in pdata.actionheaders:
raise ValueError('Could not find source header %s' % sourcehdr)
del pdata.actionheaders[sourcehdr]
self.redis.hmset(pdata.policy_namespace, pdata.as_dict, POLICY_CUSTOM_CALLBACK)
def addpool(self, pool_name, servers):
is_valid_redis_key(pool_name)
if pool_name in RESERVERD_KEYWORDS:
raise ValueError('Reserved word found: %s. Use another name' % ', '.join(RESERVERD_KEYWORDS))
try:
self.getpool(pool_name)
raise ValueError('Pool "%s" already exists' % pool_name)
except Exception:
pass
for server in servers:
# http://stackoverflow.com/questions/11809631/fully-qualified-domain-name-validation?answertab=votes#tab-top
if not re.match(r'(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)', server):
raise ValueError('Is not a qualified server: %s' % server)
pool_namespace = ':'.join(('pool', pool_name))
with self.redis.pipeline() as pipe:
pipe.sadd(pool_namespace, *servers)
pipe.sadd('list:pools', pool_namespace)
pipe.execute()
def getpool(self, pool_name):
pool_namespace = ':'.join(('pool', pool_name))
pool = list(self.redis.smembers(pool_namespace))
if not pool:
raise ValueError('Pool "%s" does not exists' % pool_name)
return { pool_name : pool }
def editpool(self, pool_name, servers):
is_valid_redis_key(pool_name)
self.getpool(pool_name)
for server in servers:
# http://stackoverflow.com/questions/11809631/fully-qualified-domain-name-validation?answertab=votes#tab-top
if not re.match(r'(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)', server):
raise ValueError('Is not a qualified server: %s' % server)
pool_namespace = ':'.join(('pool', pool_name))
with self.redis.pipeline() as pipe:
pipe.sadd(pool_namespace, *servers)
pipe.sadd('list:pools', pool_namespace)
pipe.execute()
def list_pools(self):
pools = {}
for pool_name in self.scan('pool:*'):
pools[pool_name] = list(self.redis.smembers(pool_name))
if not pools:
raise ValueError('Could not find any pools')
return pools
def remove_pool(self, pool_name):
is_valid_redis_key(pool_name)
pool_namespace = ':'.join(('pool', pool_name))
with self.redis.pipeline() as pipe:
pipe.delete(pool_namespace)
pipe.srem('list:pools', pool_namespace)
del_result, _ = pipe.execute()
if not del_result:
raise ValueError('Could not find pool name %s' % pool_name)
def remove_server_from_pool(self, pool_name, servers):
is_valid_redis_key(pool_name)
pool_namespace = ':'.join(('pool', pool_name))
if type(servers) is not list:
raise TypeError('Wrong type of parameters, expect list, found: %s' % type(servers))
if len(servers) >= len(self.redis.smembers(pool_namespace)):
raise IndexError('You MUST NOT remove all the servers, remove the pool instead')
result = self.redis.srem(pool_namespace, *servers)
if not result:
raise ValueError('Could not find any servers to delete: %s' % servers)
return result
def get_all_policies(self):
"""
Get all policies searching by string that starts with 'policy:'
return a list of policies
"""
policies = {}
for policy_name in self.scan('policy:*'):
# Extract word 'policy' from policy_name
policy_name = ':'.join(policy_name.split(':')[1:])
policies[policy_name] = self.getpolicy(policy_name)
#pdata = PolicyData(**self.redis.hgetall(policy_name, POLICY_CUSTOM_CALLBACK))
#policies[pdata.policy_name] = pdata
if not policies:
raise ValueError('Could not find any policy, using base search "policy:*"')
return policies
#return self.redis.zrange('list:policies', 0, -1)
def delete(self, policy_name):
""" Delete a single policy
:param policy_name: The name of the policy
"""
pdata = self.getpolicy(policy_name)
with self.redis.pipeline() as pipe:
pipe.delete(pdata.policy_namespace)
if pdata.pool_policy:
pipe.zrem('list:policies:%s' % pdata.pool_name, pdata.policy_namespace)
else:
pipe.zrem('list:policies', pdata.policy_namespace)
del_result, _ = pipe.execute()
if not del_result:
raise ValueError('Could not find policy by the name: ' + policy_name)
def get_all_data_policies(self, mta_hostname=None, fallback=False):
""" Get all enabled policies
Returns a list of PolicyData objects
"""
search = ['list:policies']
if mta_hostname:
# ['pool:pool_name01', 'pool:pool_name02', ...]
pools = list(self.redis.smembers('list:pools'))
ismember = []
with self.redis.pipeline() as pipe:
for pool in pools:
pipe.sismember(pool, mta_hostname)
# [True, False, ...]
ismember = pipe.execute()
pool = [pool for m, pool in zip(ismember, pools) if m]
if pool:
# Expect pool:pool_name from 'pool'
pool = pool[0].split(':')[1]
search.insert(0, 'list:policies:%s' % pool)
if not fallback:
search.remove('list:policies')
policies = []
try:
for search_pattern in search:
for policy_name in self.redis.zrange(search_pattern, 0, -1):
#policy_data = self.redis.hgetall(policy_name)
pdata = PolicyData(**self.redis.hgetall(policy_name, POLICY_CUSTOM_CALLBACK))
if not pdata.enable:
continue
pdata.priority = self.redis.zscore(search_pattern, pdata.policy_namespace)
if pdata.priority is None:
raise ValueError('Could not extract priority from policy: %s Search pattern: %s' % (pdata.policy_namespace, search_pattern))
policies.append(pdata)
# validate if group exists
self.getgroup(pdata.source)
self.getgroup(pdata.destination)
except Exception, e:
raise PolicyError('Error parsing policies, check database consistency: %s' % e)
return policies
def getpolicy(self, policy_name):
policy_namespace = ':'.join(('policy', policy_name))
data = self.redis.hgetall(policy_namespace, POLICY_CUSTOM_CALLBACK)
if not data:
raise ValueError('Cant find any policy for name: %s' % policy_name)
try:
pdata = PolicyData(**data)
list_policy_name = 'list:policies'
if pdata.pool_policy:
list_policy_name = ':'.join((list_policy_name, pdata.pool_name))
pdata.priority = self.redis.zscore(list_policy_name, pdata.policy_namespace)
if pdata.priority is None:
raise ValueError('Could not extract priority from policy: %s. \
List Policies Namespace: %s' % (list_policy_name, pdata.policy_namespace))
except Exception, e:
raise PolicyError('Inconsistency policy data, check stored data. %s' % e)
return pdata
class PolicyData(BaseData):
def __init__(self, **entries):
# convert keys to lower
entries = dict((k.lower(), v) for k,v in entries.iteritems())
self.__dict__.update(entries)
self.do_init()
# Override
@property
def as_dict(self):
if 'inverted_source' in self.__dict__ and self.inverted_source:
self.source = '!' + self.source
if 'inverted_destination' in self.__dict__ and self.inverted_destination:
self.destination = '!' + self.destination
return super(PolicyData, self).as_dict
def do_init(self):
is_valid_redis_key(self.policy_name)
if self.policy_name in RESERVERD_KEYWORDS:
raise ValueError('Reserved word found: %s. Use another name' % ', '.join(RESERVERD_KEYWORDS))
self.policy_namespace = ':'.join(('policy', self.policy_name))
self.pool_policy = False
if ':' in self.policy_name:
self.pool_policy = True
split_policy = self.policy_name.split(':')
if len(split_policy) > 2:
raise ValueError('Accept only one colon for policy name')
self.pool_name, _ = split_policy
self._validate()
self._check_jailspec()
self._check_inverted()
self._check_action_headers()
self.is_destination_any = 'any' in self.destination
self.is_source_any = 'any' in self.source
def _validate(self):
for entry in self.as_dict.keys():
if not entry in [param.lower() for param in Policy.POLICY_PARAMS + ['policy_name']]:
raise NameError('Wrong key found: %s' % entry)
def _check_jailspec(self):
for spec in self.jailspec:
if type(spec) is not tuple:
raise ValueError('JailSpec in wrong format. Should be requests:time. E.g.: 1:1000')
def _check_inverted(self):
self.inverted_source = '!' in self.source
self.inverted_destination = '!' in self.destination
try:
# Extract only numbers, characters and underscore.
self.source = re.search(r'[\w]+', self.source).group()
self.destination = re.search(r'[\w]+', self.destination).group()
except Exception, e:
raise ValueError('Error extracting data from source or destination: %s' % e)
def _check_action_headers(self):
if self.actionheaders:
# Expected: {'X-HDR01' : ['REGEX', ('X-NEW-HDR', 'X-VAL'), ('X-NEW-HDR', 'X-VAL'), ...], 'X-HDR02' : [...]}
actionheaders = dict(self.actionheaders)
try:
for hdr, hdrlist in actionheaders.items():
# regexp value
hdrlist.pop(0)
for hdrtuple in hdrlist:
if type(hdrtuple) is not tuple:
raise ValueError('Expected tuple')
except Exception, e:
raise ValueError('ActionHeaders in wrong format. Should be ... %s' % e)
def validate_policy_args(self, grp):
for key, value in self.__dict__.items():
if key in ['source', 'destination']:
# Ignore any start symbols !#@...
value = re.search(r'[\w.]+$', value)
if not value:
raise ValueError('Could not find pattern: %s' % value)
if not 'any' == value.group():
grp.getgroup(value.group())
elif key in ['countrcpt', 'stophere', 'requestsmon', 'enable', 'spf']:
if value not in ['TRUE', 'FALSE', True, False]:
raise ValueError('Enable, StopHere, RequestsMon and CountRCPT only accepts TRUE or FALSE')
elif key == 'type':
if value not in Policy.POLICY_TYPES:
raise TypeError('Invalid argument, Type must be: ' + ' '.join(Policy.POLICY_TYPES))
elif key == 'jailby':
if value not in [param for param in Policy.JAILBY_VALUES]:
raise TypeError('Invalid argument, JailBy must be: ' + ' '.join(Policy.JAILBY_VALUES))
elif key == 'jailaction':
if value not in Policy.JAILACTION_VALUES:
raise TypeError('Invalid argument, JailAction must be: ' + ' '.join(Policy.JAILACTION_VALUES))
elif key == 'replydata':
if len(value) > 60 or type(value) is not str:
raise ValueError('ReplyData accepts string type with 60 characters only.')
elif len(re.findall('%s', value)) > 1:
raise ValueError('Too many format strings detected, accepts only one') | {
"content_hash": "d484b370f7cfa9b0281fc63a5ef623d9",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 140,
"avg_line_length": 40.938428874734605,
"alnum_prop": 0.653874079452339,
"repo_name": "sandromello/themis-py",
"id": "d4476d3aa8385aea36b9c3e8f142b5254ab4b778",
"size": "19282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/themis/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "847"
},
{
"name": "Python",
"bytes": "117949"
},
{
"name": "Shell",
"bytes": "2932"
}
],
"symlink_target": ""
} |
import asyncio
import time
from typing import Awaitable, Callable, Dict, Generic, TypeVar
import prometheus_client as pc # type: ignore
import sortedcontainers
from prometheus_async.aio import time as prom_async_time # type: ignore
CACHE_HITS = pc.Counter('cache_hits_count', 'Number of Cache Hits', ['cache_name'])
CACHE_MISSES = pc.Counter('cache_misses_count', 'Number of Cache Hits', ['cache_name'])
CACHE_EVICTIONS = pc.Counter('cache_evictions_count', 'Number of Cache Hits', ['cache_name'])
CACHE_LOAD_LATENCY = pc.Summary(
'cache_load_latency_seconds', 'Latency of loading cache values in seconds', ['cache_name']
)
T = TypeVar('T')
U = TypeVar('U')
class TimeLimitedMaxSizeCache(Generic[T, U]):
def __init__(self, load: Callable[[T], Awaitable[U]], lifetime_ns: int, num_slots: int, cache_name: str):
assert lifetime_ns > 0
assert num_slots > 0
self.load = load
self.lifetime_ns = lifetime_ns
self.num_slots = num_slots
self.cache_name = cache_name
self._futures: Dict[T, asyncio.Future] = {}
self._cache: Dict[T, U] = {}
self._expiry_time: Dict[T, int] = {}
self._keys_by_expiry = sortedcontainers.SortedSet(key=lambda k: self._expiry_time[k])
self._shutting_down = False
async def shutdown(self):
"""Wait for all outstanding futures to complete and prevent new lookups.
This class does not manage any resources itself and this function is *not required* to be
called.
"""
self._shutting_down = True
await asyncio.wait(self._futures.values())
assert len(self._futures) == 0
async def lookup(self, k: T) -> U:
if self._shutting_down:
raise ValueError('Cache is shutting down.')
if k in self._expiry_time:
assert k in self._cache
if self._expiry_time[k] <= time.monotonic_ns():
self._remove(k)
if k in self._cache:
CACHE_HITS.labels(cache_name=self.cache_name).inc()
return self._cache[k]
CACHE_MISSES.labels(cache_name=self.cache_name).inc()
if k in self._futures:
return await self._futures[k]
self._futures[k] = asyncio.create_task(self.load(k))
try:
v = await prom_async_time(CACHE_LOAD_LATENCY.labels(cache_name=self.cache_name), self._futures[k])
finally:
del self._futures[k]
self._put(k, v)
if self._over_capacity():
CACHE_EVICTIONS.labels(cache_name=self.cache_name).inc()
self._evict_oldest()
return v
def _put(self, k: T, v: U) -> None:
expiry_time = time.monotonic_ns() + self.lifetime_ns
self._cache[k] = v
self._expiry_time[k] = expiry_time
self._keys_by_expiry.add(k)
def _remove(self, k: T) -> None:
del self._cache[k]
self._keys_by_expiry.remove(k)
del self._expiry_time[k]
def _over_capacity(self) -> bool:
return len(self._keys_by_expiry) > self.num_slots
def _evict_oldest(self) -> None:
oldest_key = self._keys_by_expiry[0]
self._remove(oldest_key)
| {
"content_hash": "ce9fb047c642530d28963881bf7a76a1",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 110,
"avg_line_length": 34.56521739130435,
"alnum_prop": 0.6091194968553459,
"repo_name": "hail-is/hail",
"id": "e577d4801a21e5f7ea7346f4782f2547b09d86d0",
"size": "3180",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gear/gear/time_limited_max_size_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
"""Provide some utilities"""
import logging
from os import PathLike
from pathlib import Path
from io import StringIO
from typing import (
Any, Callable, Iterable, List, Mapping, Optional, Tuple, Union
)
# pylint: disable=unused-import
try: # pragma: no cover
from functools import cached_property
except ImportError: # pragma: no cover
from cached_property import cached_property
# pylint: enable=unused-import
try: # pragma: no cover
import importlib_metadata
except ImportError: # pragma: no cover
# pylint: disable=ungrouped-imports
from importlib import metadata as importlib_metadata
from rich.logging import RichHandler as _RichHandler
from rich.console import Console, RenderableType
from rich.highlighter import ReprHighlighter
from rich.table import Table
from rich.text import Text
from rich.panel import Panel
from rich.pretty import Pretty
from more_itertools import consecutive_groups
from simplug import SimplugContext
from .defaults import (LOGGER_NAME,
DEFAULT_CONSOLE_WIDTH,
DEFAULT_CONSOLE_WIDTH_SHIFT)
from .exceptions import ConfigurationError
from .plugin import plugin
# pylint: disable=invalid-name
class RichHandler(_RichHandler):
"""Subclass of rich.logging.RichHandler, showing log levels as a single
character"""
def get_level_text(self, record: logging.LogRecord) -> Text:
"""Get the level name from the record.
Args:
record (LogRecord): LogRecord instance.
Returns:
Text: A tuple of the style and level name.
"""
level_name = record.levelname
level_text = Text.styled(
level_name[0].upper(), f"logging.level.{level_name.lower()}"
)
return level_text
_logger_handler = RichHandler(show_path=False,
show_level=True,
console=Console(),
rich_tracebacks=True,
markup=True)
_logger_handler.setFormatter(
logging.Formatter('/%(plugin_name)-7s %(message)s')
)
def get_logger(name: str = LOGGER_NAME,
level: Optional[Union[str, int]] = None) -> logging.Logger:
"""Get the logger by given plugin name
Args:
level: The initial level of the logger
Returns:
The logger
"""
log = logging.getLogger(f'pipen.{name}')
log.addHandler(_logger_handler)
if level is not None:
log.setLevel(level.upper() if isinstance(level, str) else level)
return logging.LoggerAdapter(log, {'plugin_name': name})
logger = get_logger()
def get_console_width(default: int = DEFAULT_CONSOLE_WIDTH,
shift: int = DEFAULT_CONSOLE_WIDTH_SHIFT) -> int:
"""Get the console width
Args:
default: The default console width if failed to get
shift: The shift to subtract from the width
as we have time, level, plugin name in log
"""
try:
return logger.logger.handlers[0].console.width - shift
except (AttributeError, IndexError): # pragma: no cover
return default - shift
def get_plugin_context(plugins: Optional[List[Any]]) -> SimplugContext:
"""Get the plugin context to enable and disable plugins per pipeline
Args:
plugins: A list of plugins to enable or a list of names with 'no:'
as prefix to disable
Returns:
The plugin context manager
"""
if plugins is None:
return plugin.plugins_only_context(None)
no_plugins = [isinstance(plug, str) and plug.startswith('no:')
for plug in plugins]
if any(no_plugins) and not all(no_plugins):
raise ConfigurationError(
'Either all plugin names start with "no:" or '
'none of them does.'
)
if all(no_plugins):
return plugin.plugins_but_context(
plug[3:] for plug in plugins
)
return plugin.plugins_only_context(plugins)
def log_rich_renderable(
renderable: RenderableType,
color: str,
logfunc: Callable,
*args,
**kwargs
) -> None:
"""Log a rich renderable to logger
Args:
renderable: The rich renderable
splitline: Whether split the lines or log the entire message
logfunc: The log function, if message is not the first argument,
use functools.partial to wrap it
*args: The arguments to the log function
**kwargs: The keyword arguments to the log function
"""
console = Console(file=StringIO())
console.print(renderable)
for line in console.file.getvalue().splitlines():
logfunc(f'[{color}]{line}[/{color}]' if color else line,
*args,
**kwargs)
def render_scope(scope: Mapping, title: str) -> RenderableType:
"""Log a mapping to console
Args:
scope: The mapping object
title: The title of the scope
"""
highlighter = ReprHighlighter()
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="left")
for key, value in sorted(scope.items()):
items_table.add_row(
Text.assemble((key, "scope.key")),
Text.assemble(('=', 'scope.equals')),
Pretty(value, highlighter=highlighter, overflow='fold')
)
return Panel(
items_table,
title=title,
width=min(DEFAULT_CONSOLE_WIDTH, get_console_width()),
border_style="scope.border",
padding=(0, 1),
)
def pipen_banner() -> RenderableType:
"""The banner for pipen"""
from . import __version__
table = Table(width=min(DEFAULT_CONSOLE_WIDTH, get_console_width()),
show_header=False,
show_edge=False,
show_footer=False,
show_lines=False,
caption=f"version: {__version__}")
table.add_column(justify='center')
table.add_row(r" _____________________________________ __")
table.add_row(r" ___ __ \___ _/__ __ \__ ____/__ | / /")
table.add_row(r" __ /_/ /__ / __ /_/ /_ __/ __ |/ / ")
table.add_row(r"_ ____/__/ / _ ____/_ /___ _ /| / ")
table.add_row(r"/_/ /___/ /_/ /_____/ /_/ |_/ ")
table.add_row("")
return table
def brief_list(blist: List[int]) -> str:
"""Briefly show an integer list, combine the continuous numbers.
Args:
blist: The list
Returns:
The string to show for the briefed list.
"""
ret = []
for group in consecutive_groups(blist):
group = list(group)
if len(group) > 1:
ret.append(f'{group[0]}-{group[1]}')
else:
ret.append(str(group[0]))
return ', '.join(ret)
def get_mtime(path: PathLike, dir_depth: int = 1) -> float:
"""Get the modification time of a path.
If path is a directory, try to get the last modification time of the
contents in the directory at given dir_depth
Args:
dir_depth: The depth of the directory to check the
last modification time
Returns:
The last modification time of path
"""
path = Path(path)
if not path.is_dir() or dir_depth == 0:
return path.stat().st_mtime
mtime = 0
for file in path.glob('*'):
mtime = max(mtime, get_mtime(file, dir_depth-1))
return mtime
def is_subclass(obj: Any, cls: type) -> bool:
"""Tell if obj is a subclass of cls
Differences with issubclass is that we don't raise Type error if obj
is not a class
Args:
obj: The object to check
cls: The class to check
Returns:
True if obj is a subclass of cls otherwise False
"""
try:
return issubclass(obj, cls)
except TypeError:
return False
def load_entrypoints(group: str) -> Iterable[Tuple[str, Any]]:
"""Load objects from setuptools entrypoints by given group name
Args:
group: The group name of the entrypoints
Returns:
An iterable of tuples with name and the loaded object
"""
for dist in importlib_metadata.distributions(): # pragma: no cover
for epoint in dist.entry_points:
if epoint.group != group:
continue
obj = epoint.load()
yield (epoint.name, obj)
def get_shebang(script: str) -> Optional[str]:
"""Get the shebang of the script
Args:
script: The script string
Returns:
None if the script does not contain a shebang, otherwise the shebang
without `#!` prefix
"""
if '\n' not in script:
script += '\n'
shebang_line, _ = script.split('\n', 1)
if not shebang_line.startswith('#!'):
return None
return shebang_line[2:].strip()
def truncate_text(text: str, width: int, end: str = '…') -> str:
"""Truncate a text not based on words/whitespaces
Otherwise, we could use textwrap.shorten.
Args:
text: The text to be truncated
width: The max width of the the truncated text
end: The end string of the truncated text
Returns:
The truncated text with end appended.
"""
if len(text) <= width:
return text
return text[:(width - len(end))] + end
| {
"content_hash": "e29017cf9d7f4b74a228784b9166a78d",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 76,
"avg_line_length": 30.281045751633986,
"alnum_prop": 0.6025253615368013,
"repo_name": "pwwang/pyppl",
"id": "c43930fb5e21d5e9b90277b75e1c44ec03eb4ac0",
"size": "9268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipen/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "234192"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='encrypted_app_ticket.proto',
package='',
serialized_pb=_b('\n\x1a\x65ncrypted_app_ticket.proto\"\xad\x01\n\x12\x45ncryptedAppTicket\x12\x19\n\x11ticket_version_no\x18\x01 \x01(\r\x12\x1b\n\x13\x63rc_encryptedticket\x18\x02 \x01(\r\x12\x1c\n\x14\x63\x62_encrypteduserdata\x18\x03 \x01(\r\x12\'\n\x1f\x63\x62_encrypted_appownershipticket\x18\x04 \x01(\r\x12\x18\n\x10\x65ncrypted_ticket\x18\x05 \x01(\x0c\x42\x05H\x01\x80\x01\x00')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ENCRYPTEDAPPTICKET = _descriptor.Descriptor(
name='EncryptedAppTicket',
full_name='EncryptedAppTicket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ticket_version_no', full_name='EncryptedAppTicket.ticket_version_no', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crc_encryptedticket', full_name='EncryptedAppTicket.crc_encryptedticket', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cb_encrypteduserdata', full_name='EncryptedAppTicket.cb_encrypteduserdata', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cb_encrypted_appownershipticket', full_name='EncryptedAppTicket.cb_encrypted_appownershipticket', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encrypted_ticket', full_name='EncryptedAppTicket.encrypted_ticket', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=204,
)
DESCRIPTOR.message_types_by_name['EncryptedAppTicket'] = _ENCRYPTEDAPPTICKET
EncryptedAppTicket = _reflection.GeneratedProtocolMessageType('EncryptedAppTicket', (_message.Message,), dict(
DESCRIPTOR = _ENCRYPTEDAPPTICKET,
__module__ = 'encrypted_app_ticket_pb2'
# @@protoc_insertion_point(class_scope:EncryptedAppTicket)
))
_sym_db.RegisterMessage(EncryptedAppTicket)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\001\200\001\000'))
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "2c2cba75e4e6eede7f36eac0fae35745",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 390,
"avg_line_length": 39.07446808510638,
"alnum_prop": 0.7247481622651784,
"repo_name": "adamb70/CSGO-Market-Float-Finder",
"id": "0628009d7d61134b498691c7f0f34c71addf7d75",
"size": "3770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysteamkit/protobuf/encrypted_app_ticket_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163661"
}
],
"symlink_target": ""
} |
import mock
import netaddr
import testtools
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
GATEWAY_SAMPLE7 = ("""
default dev qg-31cd36 metric 1
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_dummy(self):
ip_lib.IPWrapper().add_dummy('dummy0')
self.execute.assert_called_once_with([], 'link',
('add', 'dummy0',
'type', 'dummy'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent._run.assert_has_calls([
mock.call(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent._as_root.assert_has_calls(
[mock.call(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table=table, priority=priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'priority', str(priority),
'table', str(table),
'type', 'unicast'))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table=table, priority=priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table=table, priority=priority)
self._assert_sudo([ip_version],
('del', 'priority', str(priority),
'table', str(table), 'type', 'unicast'))
def test__parse_line(self):
def test(ip_version, line, expected):
actual = self.rule_cmd._parse_line(ip_version, line)
self.assertEqual(expected, actual)
test(4, "4030201:\tfrom 1.2.3.4/24 lookup 10203040",
{'from': '1.2.3.4/24',
'table': '10203040',
'type': 'unicast',
'priority': '4030201'})
test(6, "1024: from all iif qg-c43b1928-48 lookup noscope",
{'priority': '1024',
'from': '::/0',
'type': 'unicast',
'iif': 'qg-c43b1928-48',
'table': 'noscope'})
def test__make_canonical_all_v4(self):
actual = self.rule_cmd._make_canonical(4, {'from': 'all'})
self.assertEqual({'from': '0.0.0.0/0', 'type': 'unicast'}, actual)
def test__make_canonical_all_v6(self):
actual = self.rule_cmd._make_canonical(6, {'from': 'all'})
self.assertEqual({'from': '::/0', 'type': 'unicast'}, actual)
def test__make_canonical_lookup(self):
actual = self.rule_cmd._make_canonical(6, {'lookup': 'table'})
self.assertEqual({'table': 'table', 'type': 'unicast'}, actual)
def test__make_canonical_iif(self):
actual = self.rule_cmd._make_canonical(6, {'iif': 'iface_name'})
self.assertEqual({'iif': 'iface_name', 'type': 'unicast'}, actual)
def test__make_canonical_fwmark(self):
actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400'})
self.assertEqual({'fwmark': '0x400/0xffffffff',
'type': 'unicast'}, actual)
def test__make_canonical_fwmark_with_mask(self):
actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400/0x00ff'})
self.assertEqual({'fwmark': '0x400/0xff', 'type': 'unicast'}, actual)
def test__make_canonical_fwmark_integer(self):
actual = self.rule_cmd._make_canonical(6, {'fwmark': 0x400})
self.assertEqual({'fwmark': '0x400/0xffffffff',
'type': 'unicast'}, actual)
def test__make_canonical_fwmark_iterable(self):
actual = self.rule_cmd._make_canonical(6, {'fwmark': (0x400, 0xffff)})
self.assertEqual({'fwmark': '0x400/0xffff', 'type': 'unicast'}, actual)
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
observed = self.link_cmd.set_up()
self.assertEqual(self.parent._as_root.return_value, observed)
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
observed = self.link_cmd.set_down()
self.assertEqual(self.parent._as_root.return_value, observed)
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global', dadfailed=False, tentative=False,
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='link', dadfailed=False, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'),
dict(scope='link', dadfailed=True, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link', dadfailed=False, tentative=False,
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(expected, self.addr_cmd.list())
self._assert_call([], ('show', 'tap0'))
def test_wait_until_address_ready(self):
self.parent._run.return_value = ADDR_SAMPLE
# this address is not tentative or failed so it should return
self.assertIsNone(self.addr_cmd.wait_until_address_ready(
'2001:470:9:1224:fd91:272:581e:3a32'))
def test_wait_until_address_ready_non_existent_address(self):
self.addr_cmd.list = mock.Mock(return_value=[])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready('abcd::1234')
def test_wait_until_address_ready_timeout(self):
tentative_address = 'fe80::3023:39ff:febc:22ae'
self.addr_cmd.list = mock.Mock(return_value=[
dict(scope='link', dadfailed=False, tentative=True, dynamic=False,
cidr=tentative_address + '/64')])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready(tentative_address,
wait_time=1)
def test_list_filtered(self):
expected = [
dict(scope='global', tentative=False, dadfailed=False,
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}},
{'sample': GATEWAY_SAMPLE7,
'expected': {'metric': 1}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_add_gateway_subtable(self):
self.route_cmd.table(self.table).add_gateway(self.gateway, self.metric)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_success(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_success_subtable(self):
self.route_cmd.table(table=self.table).delete_gateway(self.gateway)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway_cannot_find_device(self):
self.parent._as_root.side_effect = RuntimeError("Cannot find device")
exc = self.assertRaises(exceptions.DeviceNotFoundError,
self.route_cmd.delete_gateway,
self.gateway, table=self.table)
self.assertIn(self.parent.name, str(exc))
def test_del_gateway_other_error(self):
self.parent._as_root.side_effect = RuntimeError()
self.assertRaises(RuntimeError, self.route_cmd.delete_gateway,
self.gateway, table=self.table)
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_add_route_no_via(self):
self.route_cmd.add_route(self.cidr, table=self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'dev', self.parent.name,
'table', self.table))
def test_add_route_with_scope(self):
self.route_cmd.add_route(self.cidr, scope='link')
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'dev', self.parent.name,
'scope', 'link'))
def test_add_route_no_device(self):
self.parent._as_root.side_effect = RuntimeError("Cannot find device")
self.assertRaises(exceptions.DeviceNotFoundError,
self.route_cmd.add_route,
self.cidr, self.ip, self.table)
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route_no_via(self):
self.route_cmd.delete_route(self.cidr, table=self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'dev', self.parent.name,
'table', self.table))
def test_delete_route_with_scope(self):
self.route_cmd.delete_route(self.cidr, scope='link')
self._assert_sudo([self.ip_version],
('del', self.cidr,
'dev', self.parent.name,
'scope', 'link'))
def test_delete_route_no_device(self):
self.parent._as_root.side_effect = RuntimeError("Cannot find device")
self.assertRaises(exceptions.DeviceNotFoundError,
self.route_cmd.delete_route,
self.cidr, self.ip, self.table)
def test_list_routes(self):
self.parent._run.return_value = (
"default via 172.124.4.1 dev eth0 metric 100\n"
"10.0.0.0/22 dev eth0 scope link\n"
"172.24.4.0/24 dev eth0 proto kernel src 172.24.4.2\n")
routes = self.route_cmd.table(self.table).list_routes(self.ip_version)
self.assertEqual([{'cidr': '0.0.0.0/0',
'dev': 'eth0',
'metric': '100',
'table': 14,
'via': '172.124.4.1'},
{'cidr': '10.0.0.0/22',
'dev': 'eth0',
'scope': 'link',
'table': 14},
{'cidr': '172.24.4.0/24',
'dev': 'eth0',
'proto': 'kernel',
'src': '172.24.4.2',
'table': 14}], routes)
def test_list_onlink_routes_subtable(self):
self.parent._run.return_value = (
"10.0.0.0/22\n"
"172.24.4.0/24 proto kernel src 172.24.4.2\n")
routes = self.route_cmd.table(self.table).list_onlink_routes(
self.ip_version)
self.assertEqual(['10.0.0.0/22'], [r['cidr'] for r in routes])
self._assert_call([self.ip_version],
('list', 'dev', self.parent.name,
'table', self.table, 'scope', 'link'))
def test_add_onlink_route_subtable(self):
self.route_cmd.table(self.table).add_onlink_route(self.cidr)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'dev', self.parent.name,
'table', self.table,
'scope', 'link'))
def test_delete_onlink_route_subtable(self):
self.route_cmd.table(self.table).delete_onlink_route(self.cidr)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'dev', self.parent.name,
'table', self.table,
'scope', 'link'))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
def test_list_routes(self):
self.parent._run.return_value = (
"default via 2001:db8::1 dev eth0 metric 100\n"
"2001:db8::/64 dev eth0 proto kernel src 2001:db8::2\n")
routes = self.route_cmd.table(self.table).list_routes(self.ip_version)
self.assertEqual([{'cidr': '::/0',
'dev': 'eth0',
'metric': '100',
'table': 14,
'via': '2001:db8::1'},
{'cidr': '2001:db8::/64',
'dev': 'eth0',
'proto': 'kernel',
'src': '2001:db8::2',
'table': 14}], routes)
class TestIPRoute(TestIpRouteCommand):
"""Leverage existing tests for IpRouteCommand for IPRoute
This test leverages the tests written for IpRouteCommand. The difference
is that the 'dev' argument should not be passed for each of the commands.
So, this test removes the dev argument from the expected arguments in each
assert.
"""
def setUp(self):
super(TestIPRoute, self).setUp()
self.parent = ip_lib.IPRoute()
self.parent._run = mock.Mock()
self.parent._as_root = mock.Mock()
self.route_cmd = self.parent.route
self.check_dev_args = False
def _remove_dev_args(self, args):
def args_without_dev():
previous = None
for arg in args:
if 'dev' not in (arg, previous):
yield arg
previous = arg
return tuple(arg for arg in args_without_dev())
def _assert_call(self, options, args):
if not self.check_dev_args:
args = self._remove_dev_args(args)
super(TestIPRoute, self)._assert_call(options, args)
def _assert_sudo(self, options, args, use_root_namespace=False):
if not self.check_dev_args:
args = self._remove_dev_args(args)
super(TestIPRoute, self)._assert_sudo(options, args)
def test_pullup_route(self):
# This method gets the interface name passed to it as an argument. So,
# don't remove it from the expected arguments.
self.check_dev_args = True
super(TestIPRoute, self).test_pullup_route()
def test_del_gateway_cannot_find_device(self):
# This test doesn't make sense for this case since dev won't be passed
pass
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
def test_flush(self):
self.neigh_cmd.flush(4, '192.168.0.1')
self._assert_sudo([4], ('flush', 'to', '192.168.0.1'))
class TestArpPing(TestIPCmdBase):
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
address = '20.0.0.1'
config = mock.Mock()
config.send_arp_for_ha = ARPING_COUNT
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
config)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch('eventlet.spawn_n')
def test_no_ipv6_addr_notif(self, spawn_n):
ipv6_addr = 'fd00::1'
config = mock.Mock()
config.send_arp_for_ha = 3
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
ipv6_addr,
config)
self.assertFalse(spawn_n.called)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
| {
"content_hash": "18fcbd0bbfbc9c222880a02a96bdadf3",
"timestamp": "",
"source": "github",
"line_count": 1303,
"max_line_length": 79,
"avg_line_length": 43.116653875671524,
"alnum_prop": 0.53644470550542,
"repo_name": "SamYaple/neutron",
"id": "af205002da3851e94d3e0ed7687ae61ca445e85e",
"size": "56817",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/linux/test_ip_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7687444"
},
{
"name": "Shell",
"bytes": "14690"
}
],
"symlink_target": ""
} |
from __future__ import division
import base64
import os
import vistrails.core.db.action
from vistrails.core.modules.basic_modules import File, Boolean, String, Directory
from vistrails.core.modules.vistrails_module import Module, ModuleError, NotCacheable
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.db.services.locator import DBLocator
from vistrails.core.system import get_elementtree_library
from vistrails.db.services import io
from vistrails.db.versions import currentVersion
ElementTree = get_elementtree_library()
class VtlFileCreator(NotCacheable, Module):
"""This Module creates a vtl file for the workflow where it is
present.
By default it generates a string with an <vtlink> </vtlink> XML
element or it can write the vtl file to disk if filename is
provided. If directory is provided, the final filename will be
the concatenation of directory and filename.
Other input ports:
execute: Boolean
tells VisTrails to execute the workflow when the vtl is open
showSpreadsheetOnly: Boolean
tells VisTrails to hide the builder window and show only the
spreadsheet when the vtl is open
embedWorkflow: Boolean
it will embed a vistrail encoded as base64 containing only
the workflow where the module is. It can be loaded by
VisTrails.
"""
def __init__(self):
Module.__init__(self)
self.locator = None
self.version = -1
self.pipeline = None
self.execute = False
self.embedWorkflow = False
self.showSpreadsheetOnly = False
self.forceDB = False
def get_locator_and_version(self):
self.locator = self.moduleInfo['locator']
self.version = self.moduleInfo['version']
self.pipeline = self.moduleInfo['pipeline']
if self.locator is None:
raise ModuleError(self, 'could not get the locator for this pipeline')
if self.version is None:
raise ModuleError(self, 'could not get the version number of this peline')
@staticmethod
def generate_vtl(locator,version,pipeline,execute=False,forceDB=False,
showSpreadsheetOnly=False,embedWorkflow=False):
"""generate_vtl(locator:DBLocator or XMLLocator,
version: str, pipeline:Pipeline, execute:boolean,
forceDB:boolean, showspreadsheetOnly:boolean,
embedWorkflow: boolean) -> str
It generates the contents of a .vtl file with the information
given.
"""
node = ElementTree.Element('vtlink')
if isinstance(locator, DBLocator):
node.set('host', str(locator.host))
node.set('port', str(locator.port))
node.set('database', str(locator.db))
node.set('vtid', str(locator.obj_id))
elif locator is not None:
node.set('filename', str(locator.name))
node.set('version', str(version))
node.set('execute', str(execute))
node.set('forceDB', str(forceDB))
node.set('showSpreadsheetOnly', str(showSpreadsheetOnly))
if embedWorkflow == True:
vistrail = Vistrail()
action_list = []
for module in pipeline.module_list:
action_list.append(('add', module))
for connection in pipeline.connection_list:
action_list.append(('add', connection))
action = vistrails.core.db.action.create_action(action_list)
vistrail.add_action(action, 0L)
vistrail.addTag("Imported workflow", action.id)
if not forceDB:
node.set('version', str(action.id))
if not vistrail.db_version:
vistrail.db_version = currentVersion
pipxmlstr = io.serialize(vistrail)
vtcontent = base64.b64encode(pipxmlstr)
node.set('vtcontent',vtcontent)
return ElementTree.tostring(node)
def compute(self):
self.get_locator_and_version()
if self.has_input('execute'):
self.execute = self.get_input('execute')
if self.has_input('forceDB'):
self.forceDB = self.get_input('forceDB')
if self.has_input('showSpreadsheetOnly'):
self.showSpreadsheetOnly = self.get_input('showSpreadsheetOnly')
if self.has_input('embedWorkflow'):
self.embedWorkflow = self.get_input('embedWorkflow')
xmlstring = self.generate_vtl(self.locator,self.version,self.pipeline,
self.execute,self.forceDB,
self.showSpreadsheetOnly,self.embedWorkflow)
if self.has_input('filename'):
filename = self.get_input('filename')
if self.has_input('directory'):
directory = self.get_input('directory').name
filename = os.path.join(directory,filename)
file_ = open(filename,'w')
file_.write(xmlstring)
file_.close()
self.set_output("xmlstring", xmlstring)
_input_ports = [('execute', Boolean, True),
('showspreadsheetOnly', Boolean, True),
('embedWorkflow', Boolean, True),
('forceDB', Boolean, True),
('filename', String),
('directory', Directory)]
_output_ports = [('xmlstring', String)]
_modules = [VtlFileCreator]
| {
"content_hash": "300ca60ca9f13da138b6f13dbdc663a0",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 86,
"avg_line_length": 39.479166666666664,
"alnum_prop": 0.5980650835532102,
"repo_name": "VisTrails/VisTrails",
"id": "e528e11e04e342f98d3a60052b05548084527b68",
"size": "7598",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/packages/vtlcreator/init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import copy
from core import actor
from core.actor import *
from ui import text, infobubble
from utils import utility
def load_data():
Balloon.pickup_sound = utility.load_sound('pop')
Bonus250.master_animation_list.build_animation('Idle', ['balloonblue'])
Bonus500.master_animation_list.build_animation('Idle', ['balloongreen'])
BonusX2.master_animation_list.build_animation('Idle', ['balloonred'])
BonusX2.pickup_sound = utility.load_sound('doublePoints')
BonusCombo.master_animation_list.build_animation('Idle', ['balloonyellow'])
BonusCombo.pickup_sound = utility.load_sound('combo')
class Balloon(actor.Actor):
def __init__(self):
actor.Actor.__init__(self)
self.actor_type = ACTOR_TYPE_PICKUP
self.bound_style = BOUND_STYLE_KILL
self.bounds = -32, -32, SCREEN_WIDTH + 32, SCREEN_HEIGHT + 32
# MOVEMENT VARIABLES
self.wave = 0
self.move_right = True
self.x_movement = 1
self.velocity = vector.Vector2d.zero
def actor_update(self):
self.active = True
if self.move_right:
self.wave -= 1
if self.wave < -0.20 * FRAMES_PER_SECOND:
self.move_right = False
else:
self.wave += 1
if self.wave > FRAMES_PER_SECOND:
self.move_right = True
self.x_movement = (self.wave / FRAMES_PER_SECOND) * 1.5
self.velocity = vector.Vector2d(self.x_movement, -3)
def collide(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
utility.play_sound(self.pickup_sound)
self.die()
class Bonus250(Balloon):
master_animation_list = animation.Animation()
def __init__(self, position, text_group):
Balloon.__init__(self)
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.hitrect = pygame.Rect(0,0,60,60)
self.hitrect_offset_y = -5
self.text_group = text_group
self.position = vector.Vector2d(position)
def die(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.increment_score(250, self.position, self.text_group)
self.active = False
self.kill()
del self
class Bonus500(Balloon):
master_animation_list = animation.Animation()
def __init__(self, position, text_group):
Balloon.__init__(self)
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.hitrect = pygame.Rect(0, 0, 60, 60)
self.hitrect_offset_y = -5
self.text_group = text_group
self.position = vector.Vector2d(position)
def die(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.increment_score(500, self.position, self.text_group)
self.active = False
self.kill()
del self
class BonusX2(Balloon):
master_animation_list = animation.Animation()
def __init__(self, position, text_group):
Balloon.__init__(self)
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.hitrect = pygame.Rect(0, 0, 60, 60)
self.hitrect_offset_y = -5
self.position = vector.Vector2d(position)
self.text_group = text_group
def die(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.point_bonus += 5 * FRAMES_PER_SECOND
temp_image = text.Text(FONT_PATH, 30, FONT_COLOR, 'Double Points!', 1).image
help_bubble = infobubble.InfoBubble(temp_image, self.object_collided_with, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.text_group.add(help_bubble)
self.active = False
self.kill()
del self
class BonusCombo(Balloon):
master_animation_list = animation.Animation()
def __init__(self, position, text_group):
Balloon.__init__(self)
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.hitrect = pygame.Rect(0, 0, 60, 60)
self.hitrect_offset_y = -5
self.position = vector.Vector2d(position)
self.text_group = text_group
def die(self):
if self.object_collided_with.actor_type == ACTOR_PLAYER:
self.object_collided_with.combo_bonus += 5 * FRAMES_PER_SECOND
temp_image = text.Text(FONT_PATH, 30, FONT_COLOR, 'Combo Time!', 1).image
help_bubble = infobubble.InfoBubble(temp_image, self.object_collided_with, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.text_group.add(help_bubble)
self.active = False
self.kill()
del self
| {
"content_hash": "edf20c206c61fa50c1ee244e78b76a60",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 111,
"avg_line_length": 33.084848484848486,
"alnum_prop": 0.6021249313061,
"repo_name": "JoshuaSkelly/TroubleInCloudLand",
"id": "539ccda95391968012de8cf470bb4028b0aa25e1",
"size": "5459",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/balloon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "60"
},
{
"name": "Python",
"bytes": "244790"
}
],
"symlink_target": ""
} |
"""
tests.test_component_mqtt_eventstream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests MQTT eventstream component.
"""
import json
import unittest
from unittest.mock import ANY, patch
import homeassistant.components.mqtt_eventstream as eventstream
from homeassistant.const import EVENT_STATE_CHANGED
from homeassistant.core import State
from homeassistant.remote import JSONEncoder
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant,
mock_mqtt_component,
fire_mqtt_message,
mock_state_change_event,
fire_time_changed
)
class TestMqttEventStream(unittest.TestCase):
""" Test the MQTT eventstream module. """
def setUp(self): # pylint: disable=invalid-name
super(TestMqttEventStream, self).setUp()
self.hass = get_test_home_assistant()
self.mock_mqtt = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def add_eventstream(self, sub_topic=None, pub_topic=None):
""" Add a mqtt_eventstream component to the hass. """
config = {}
if sub_topic:
config['subscribe_topic'] = sub_topic
if pub_topic:
config['publish_topic'] = pub_topic
return eventstream.setup(self.hass, {eventstream.DOMAIN: config})
def test_setup_succeeds(self):
self.assertTrue(self.add_eventstream())
def test_setup_with_pub(self):
# Should start off with no listeners for all events
self.assertEqual(self.hass.bus.listeners.get('*'), None)
self.assertTrue(self.add_eventstream(pub_topic='bar'))
self.hass.pool.block_till_done()
# Verify that the event handler has been added as a listener
self.assertEqual(self.hass.bus.listeners.get('*'), 1)
@patch('homeassistant.components.mqtt.subscribe')
def test_subscribe(self, mock_sub):
sub_topic = 'foo'
self.assertTrue(self.add_eventstream(sub_topic=sub_topic))
self.hass.pool.block_till_done()
# Verify that the this entity was subscribed to the topic
mock_sub.assert_called_with(self.hass, sub_topic, ANY)
@patch('homeassistant.components.mqtt.publish')
@patch('homeassistant.core.dt_util.datetime_to_str')
def test_state_changed_event_sends_message(self, mock_datetime, mock_pub):
now = '00:19:19 11-01-2016'
e_id = 'fake.entity'
pub_topic = 'bar'
mock_datetime.return_value = now
# Add the eventstream component for publishing events
self.assertTrue(self.add_eventstream(pub_topic=pub_topic))
self.hass.pool.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_eventstream state change on initialization, etc.
mock_pub.reset_mock()
# Set a state of an entity
mock_state_change_event(self.hass, State(e_id, 'on'))
self.hass.pool.block_till_done()
# The order of the JSON is indeterminate,
# so first just check that publish was called
mock_pub.assert_called_with(self.hass, pub_topic, ANY)
self.assertTrue(mock_pub.called)
# Get the actual call to publish and make sure it was the one
# we were looking for
msg = mock_pub.call_args[0][2]
event = {}
event['event_type'] = EVENT_STATE_CHANGED
new_state = {
"last_updated": now,
"state": "on",
"entity_id": e_id,
"attributes": {},
"last_changed": now
}
event['event_data'] = {"new_state": new_state, "entity_id": e_id}
# Verify that the message received was that expected
self.assertEqual(json.loads(msg), event)
@patch('homeassistant.components.mqtt.publish')
def test_time_event_does_not_send_message(self, mock_pub):
self.assertTrue(self.add_eventstream(pub_topic='bar'))
self.hass.pool.block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_eventstream state change on initialization, etc.
mock_pub.reset_mock()
fire_time_changed(self.hass, dt_util.utcnow())
self.assertFalse(mock_pub.called)
def test_receiving_remote_event_fires_hass_event(self):
sub_topic = 'foo'
self.assertTrue(self.add_eventstream(sub_topic=sub_topic))
self.hass.pool.block_till_done()
calls = []
self.hass.bus.listen_once('test_event', lambda _: calls.append(1))
self.hass.pool.block_till_done()
payload = json.dumps(
{'event_type': 'test_event', 'event_data': {}},
cls=JSONEncoder
)
fire_mqtt_message(self.hass, sub_topic, payload)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
| {
"content_hash": "cacf6393278bd62bb40d228244ae237b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 35.143884892086334,
"alnum_prop": 0.6374616171954964,
"repo_name": "sfam/home-assistant",
"id": "5e1680ad2a424e302a31bd38632cab1bcff1f5c2",
"size": "4885",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/test_mqtt_eventstream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1338771"
},
{
"name": "Python",
"bytes": "1400448"
},
{
"name": "Shell",
"bytes": "4573"
}
],
"symlink_target": ""
} |
from selenium.webdriver.common.keys import Keys
from tests.integration.tests.test_textarea import Test as TestTextarea
from . import VisualTest
class Test(VisualTest):
urls = TestTextarea.urls
def test_test_default_usecase(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_default_usecase.url))
self.assertScreenshot('form', 'textarea_default_usecase', threshold=1)
def test_missing_value_error(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_missing_value_error.url))
self.driver.find_element_by_css_selector("button").send_keys(Keys.RETURN)
self.assertScreenshot('form', 'textarea_missing_value_error', threshold=1)
def test_render_with_value(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_render_with_value.url))
self.driver.find_element_by_css_selector("textarea").send_keys('1234')
self.driver.find_element_by_css_selector("button").send_keys(Keys.RETURN)
self.assertScreenshot('form', 'textarea_render_with_value', threshold=1)
def test_part_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_group_class.url))
self.assertScreenshot('form', 'textarea_part_group_class', threshold=1)
def test_part_add_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_add_group_class.url))
self.assertScreenshot('form', 'textarea_part_add_group_class', threshold=1)
def test_part_prefix(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_prefix.url))
self.assertScreenshot('form', 'textarea_part_prefix', threshold=1)
def test_part_add_control_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_add_control_class.url))
self.assertScreenshot('form', 'textarea_part_add_control_class', threshold=1)
def test_part_label(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_label.url))
self.assertScreenshot('form', 'textarea_part_label', threshold=1)
def test_part_add_label_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_add_label_class.url))
self.assertScreenshot('form', 'textarea_part_add_label_class', threshold=1)
def test_part_help_text(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_help_text.url))
self.assertScreenshot('form', 'textarea_part_help_text', threshold=1)
def test_part_errors(self):
self.driver.get('%s%s' % (self.live_server_url, TestTextarea.test_part_errors.url))
self.assertScreenshot('form', 'textarea_part_errors', threshold=1)
| {
"content_hash": "56048ecece5b7f85267807adf337eea0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 102,
"avg_line_length": 50.19642857142857,
"alnum_prop": 0.6905016008537886,
"repo_name": "afifnz/django-material",
"id": "6d3d3c26c69371187cbe6f395a47c1fc9b6dd83a",
"size": "2811",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/visual/tests/test_textarea.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "218692"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "JavaScript",
"bytes": "361169"
},
{
"name": "Python",
"bytes": "173377"
}
],
"symlink_target": ""
} |
import datetime
from controler import webControl
from models import messageModel
import logging
__author__ = 'Jesse'
class JSON(webControl):
""" Forwards all JSON related HTTP requests to controller methods"""
@classmethod
def getMessages(cls, self):
"""Handles text JSON GET requests
GETS should be in the format server/raw/numberOfPostsToGetViaJSON"""
logging.debug("getMessages")
output = ['']
path = self.environ['PATH_INFO']
path = str(path)
if path is not "/": path = path.split('/')
# MAIN PROCESSING HERE!
numberToGet = int(path[2])
logging.debug('Number To Get:' + str(numberToGet))
output.append(cls.getMessagesFromDBasJSONObjectArray(numberToGet)) # Calls controller
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def getMessage(cls, self):
"""Handles all text JSON GET requests
GETS should be in the format /msg/indexToGet"""
logging.debug("getSingleMessage")
output = ['']
path = self.environ['PATH_INFO']
path = str(path)
if path is not "/": path = path.split('/')
# MAIN PROCESSING HERE!
indexToGet = int(path[2])
logging.debug('Index To Get:' + str(indexToGet))
output.append(str(cls.getMessageAsJSONObject(indexToGet))) # Calls controller
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def POST_Messages(cls, self):
"""Handles all text JSON PUT requests
PUT should be in the format create=message, edit=index+message=newmessage, delete=index """
# logging.debug('JSON PUTs')
output = ['']
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
request_body = self.environ['wsgi.input'].read(request_body_size)
if request_body_size != 0:
returnValue = cls.postControl(request_body)
output.append('Request Received (' + str(cls.cleanInput(request_body)) + ') : ' + str(returnValue))
else:
output.append('Empty Request')
logging.warning('Empty Request Body')
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def getMessagesSearch(cls, self):
""" Calls search function and adds returned JSON to HTML output
GETS should be in the format /search/messagesToSearchFor"""
logging.debug("searchMessageJSON")
output = ['']
path = self.environ['PATH_INFO']
path = str(path)
if path is not "/": path = path.split('/')
# MAIN PROCESSING HERE!
msgToSearchFor = path[2]
logging.debug('Messages To Get:' + str(msgToSearchFor))
output.append(str(webControl.searchForMessagesJSON(msgToSearchFor))) # Calls controller
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
class HTMLHelper(webControl):
@staticmethod
def getHeader():
# Might need to generate separate page to handle request?
output = []
output.append('<pre>')
output.append("ForgeLand Message Board: ")
output.append(' <a href="/">Create</a>')
output.append(' <a href="/edit">Edit</a>')
output.append(' <a href="/delete">Delete</a>')
output.append(' <a href="/search">Search</a>')
output.append('<hr>')
return output
@staticmethod
def getForm(formType, output):
if formType == "create":
output.append('<form method="post">'
'<input type="text" name="create" value="Message">'
'<input type="submit" value="Create" onclick="reloadPage()"></form>')
if formType == "edit":
output.append('<form method="post">'
'<input type="text" name="edit" value="New message">'
'<input type="text" name="index" value="Index">'
'<input type="submit" value="Edit"></form>')
if formType == "delete":
output.append('<form method="post">'
'<input type="text" name="delete" value="Index">'
'<input type="submit" value="Delete"></form>')
if formType == "search":
output.append('<form method="get">'
'<input type="text" name="q">'
'<input type="submit" value="Search"></form>')
return output
@classmethod
def getMessagesTable(cls, output, search=None):
""" Adds all messages to the HTML output for display"""
logging.debug("Getting messages")
# TODO Should move this code to the controller! (It's really short though -_-)
output.append("<table><tr><th>Message</th><th>Timestamp</th><th>Index</th></tr>")
if search is None:
indexList = cls.getMessagesFromDB()
else:
indexList = cls.getMessagesFromDBSearch(search)
for x in indexList:
message = str(messageModel.message(x)) # Fields stored as unicode, just to make life hard -_-
timeStamp = str(messageModel.getTimestamp(x))
msgIndex = str(messageModel.getIndex(x))
# Cannot use cls.message to call, it needs to directly access its associated class
output.append('<tr>')
output.append('<td>' + message + '</td>')
output.append('<td>' + timeStamp + '</td>')
output.append('<td>' + msgIndex + '</td>')
output.append('</tr>')
output.append('</table>')
return output
@staticmethod
def getFooter(output):
str(datetime.datetime.now().replace(microsecond=0))
output.append('<hr>Retrieved @ ' + str(datetime.datetime.now().replace(microsecond=0)))
return output
class HTTP(HTMLHelper):
"""Handles all web and text requests over HTTP"""
@classmethod
def GET_MainIndex(cls, self):
""" HTML for create new message view + POST controller"""
output = HTMLHelper.getHeader()
output = HTMLHelper.getForm("create", output)
output = HTMLHelper.getMessagesTable(output)
# command=create&input=someTextHere
# If we detect input, do this
if self.environ['REQUEST_METHOD'] == 'POST':
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
request_body = self.environ['wsgi.input'].read(request_body_size)
cls.postControl(request_body)
output = HTMLHelper.getFooter(output)
output = ''.join(output)
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def GET_edit(cls, self):
""" HTML for create new message view + POST controller"""
output = HTMLHelper.getHeader()
output = HTMLHelper.getForm("edit", output)
output = HTMLHelper.getMessagesTable(output)
# command=create&input=someTextHere
# If we detect input, do this
if self.environ['REQUEST_METHOD'] == 'POST':
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
request_body = self.environ['wsgi.input'].read(request_body_size)
cls.postControl(request_body)
output = HTMLHelper.getFooter(output)
output = ''.join(output)
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def GET_delete(cls, self):
""" HTML for create new message view + POST controller"""
output = HTMLHelper.getHeader()
output = HTMLHelper.getForm("delete", output)
output = HTMLHelper.getMessagesTable(output)
# command=create&input=someTextHere
# If we detect input, do this
if self.environ['REQUEST_METHOD'] == 'POST':
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
request_body = self.environ['wsgi.input'].read(request_body_size)
cls.postControl(request_body)
output = HTMLHelper.getFooter(output)
output = ''.join(output)
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@classmethod
def GET_search(cls, self):
""" HTML for create new message view + POST controller"""
output = HTMLHelper.getHeader()
output = HTMLHelper.getForm('search', output)
if self.environ['REQUEST_METHOD'] == 'GET':
query = str(self.environ['QUERY_STRING'])
if query.find('=') is not -1:
query = query.split('=')
search = query[1]
if search == "":
output = HTMLHelper.getMessagesTable(output)
else:
output.append('Searching for: ' + search)
output = HTMLHelper.getMessagesTable(output, search)
else:
output = HTMLHelper.getMessagesTable(output)
output = HTMLHelper.getFooter(output)
output = ''.join(output)
output_len = sum(len(line) for line in output)
status = '200 OK'
response_headers = [('Content-type', 'text/html'), ('Content-Length', str(output_len))]
self.start(status, response_headers)
yield ''.join(output)
@staticmethod
def notFound(self):
status = '404 Not Found'
response_headers = [('Content-type', 'text/plain')]
self.start(status, response_headers)
yield "Not Found"
| {
"content_hash": "9f4f29bc7ebecfabf40543b2ed63cbcc",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 102,
"avg_line_length": 33.47457627118644,
"alnum_prop": 0.6548860759493671,
"repo_name": "Jelloeater/forgeLandWall",
"id": "9b997d70e21521f148072665ab481d148f038ed4",
"size": "9875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forgeLandWall/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "22094"
},
{
"name": "Python",
"bytes": "32257"
}
],
"symlink_target": ""
} |
"""
This is the single point of entry to generate the sample configuration
file for dragonflow. It collects all the necessary info from the other modules
in this package. It is assumed that:
* every other module in this package has a 'list_opts' function which
return a dict where
* the keys are strings which are the group names
* the value of each key is a list of config options for that group
* the dragonflow.conf package doesn't have further packages with config options
* this module is only used in the context of sample file generation
"""
import collections
import imp
import os
import pkgutil
from dragonflow._i18n import _ as _i18
LIST_OPTS_FUNC_NAME = "list_opts"
def list_opts():
opts = collections.defaultdict(list)
imported_modules = _import_modules()
_append_config_options(imported_modules, opts)
return [(key, val) for key, val in opts.items()]
def _import_modules():
imported_modules = []
package_path = os.path.dirname(os.path.abspath(__file__))
for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]):
if modname == __name__.split('.')[-1] or ispkg:
continue
path = ('%s/%s.py' % (package_path, modname))
mod = imp.load_source(modname, path)
if not hasattr(mod, LIST_OPTS_FUNC_NAME):
msg = _i18("The module '%s' should have a '%s' function which "
"returns the config options." % (mod.__name__,
LIST_OPTS_FUNC_NAME))
raise Exception(msg)
else:
imported_modules.append(mod)
return imported_modules
def _append_config_options(imported_modules, config_options):
for mod in imported_modules:
configs = mod.list_opts()
for key, val in configs.items():
config_options[key].extend(val)
| {
"content_hash": "bed2bd656c27ea301c22c5338cdbbe06",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 33.836363636363636,
"alnum_prop": 0.645889306824288,
"repo_name": "openstack/dragonflow",
"id": "f107ab4ea6b059b80b6de60a0f73681f914ea894",
"size": "2416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragonflow/conf/opts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2386"
},
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "1740942"
},
{
"name": "Ruby",
"bytes": "4449"
},
{
"name": "Shell",
"bytes": "70410"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateInvite(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateInvite Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateInvite, self).__init__(temboo_session, '/Library/Fitbit/Social/CreateInvite')
def new_input_set(self):
return CreateInviteInputSet()
def _make_result_set(self, result, path):
return CreateInviteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateInviteChoreographyExecution(session, exec_id, path)
class CreateInviteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateInvite
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerSecret', value)
def set_InvitedUserEmail(self, value):
"""
Set the value of the InvitedUserEmail input for this Choreo. ((conditional, string) The email address of the user to invite; user can be a Fitbit member already. Required unless providing the InvitedUserID.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserEmail', value)
def set_InvitedUserID(self, value):
"""
Set the value of the InvitedUserID input for this Choreo. ((conditional, string) The Fitbit user id of the user to send an invite to. Required unless providing the InvitedUserEmail.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(CreateInviteInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(CreateInviteInputSet, self)._set_input('UserID', value)
class CreateInviteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateInvite Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class CreateInviteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateInviteResultSet(response, path)
| {
"content_hash": "f497abea5dc448278845c47ba2c162a2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 215,
"avg_line_length": 46.40659340659341,
"alnum_prop": 0.6940563580393085,
"repo_name": "jordanemedlock/psychtruths",
"id": "ad0fa7c6517b0cc3e0e04b248b8071b8d35346b8",
"size": "5095",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Fitbit/Social/CreateInvite.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ZapWalletTXesTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(61)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500000)
txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
#restart bitcoind
self.nodes[0].stop()
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
self.nodes[0].stop()
bitcoind_processes[0].wait()
#restart bitcoind with zapwallettxes
self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
#there must be a expection because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
if __name__ == '__main__':
ZapWalletTXesTest ().main ()
| {
"content_hash": "c680bc04de433ce2000e1900866cbab9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 106,
"avg_line_length": 37,
"alnum_prop": 0.612987987987988,
"repo_name": "psionin/smartcoin",
"id": "c5a329d0c6e9586968d80c250391197005d776d1",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/zapwallettxes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1102755"
},
{
"name": "C++",
"bytes": "3857098"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "135902"
},
{
"name": "Makefile",
"bytes": "85486"
},
{
"name": "Objective-C",
"bytes": "3275"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "415970"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "19837"
},
{
"name": "Shell",
"bytes": "38142"
}
],
"symlink_target": ""
} |
from pyearth._record import ForwardPassRecord, ForwardPassIteration
from pyearth._util import gcv
from ..testing_utils import assert_list_almost_equal
num_samples = 1000
num_variables = 10
penalty = 3.0
sst = 100.0
varnames = ['x' + str(i) for i in range(num_variables)]
record = ForwardPassRecord(num_samples, num_variables,
penalty, sst, varnames)
record.append(ForwardPassIteration(0, 3, 3, 63.0, 3))
record.append(ForwardPassIteration(0, 3, 14, 34.0, 5))
record.append(ForwardPassIteration(3, 6, 12, 18.0, 7))
mses = [sst, 63.0, 34.0, 18.0]
sizes = [1, 3, 5, 7]
def test_statistics():
mses = [record.mse(i) for i in range(len(record))]
mses_ = [mses[i] for i in range(len(record))]
gcvs = [record.gcv(i) for i in range(len(record))]
gcvs_ = [gcv(mses[i], sizes[i], num_samples, penalty)
for i in range(len(record))]
rsqs = [record.rsq(i) for i in range(len(record))]
rsqs_ = [1 - (mses[i] / sst)
for i in range(len(record))]
grsqs = [record.grsq(i) for i in range(len(record))]
grsqs_ = [1 - (record.gcv(i) / gcv(sst, 1, num_samples, penalty))
for i in range(len(record))]
assert_list_almost_equal(mses, mses_)
assert_list_almost_equal(gcvs, gcvs_)
assert_list_almost_equal(rsqs, rsqs_)
| {
"content_hash": "9d9c40007fd1682d41049e2549a4e7d7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 69,
"avg_line_length": 38.411764705882355,
"alnum_prop": 0.6378254211332313,
"repo_name": "DucQuang1/py-earth",
"id": "86ea5d990227e6b8efc18d1e915252f2a29637bd",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyearth/test/record/test_forward_pass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171874"
}
],
"symlink_target": ""
} |
"""QwikSwitch USB Modem library for Python.
See: http://www.qwikswitch.co.za/qs-usb.php
Currently supports relays, buttons and LED dimmers
Source: http://www.github.com/kellerza/pyqwikswitch
"""
import logging
import math
from enum import Enum
import attr
_LOGGER = logging.getLogger(__name__)
QS_CMD = 'cmd'
CMD_BUTTONS = ['TOGGLE', 'SCENE EXE', 'LEVEL']
"""
Commands ['cmd'] strings used by QS buttons
Toggle - Normal button
Scene exe - execute scene
Level - switch all lights off
""" # pylint: disable=W0105
QS_ID = 'id'
QS_VALUE = 'val'
QS_TYPE = 'type'
QS_NAME = 'name'
QSVAL = 'qs_val'
QSDATA = 'data'
CMD_UPDATE = 'update'
URL_LISTEN = "{}/&listen"
URL_VERSION = "{}/&version?"
URL_SET = "{}/{}={}"
URL_DEVICES = "{}/&device"
class QSType(Enum):
"""Supported QSUSB types."""
relay = 1 # rel
dimmer = 2 # dim
humidity_temperature = 3 # hum
unknown = 9
_MAX = 255
# pylint: disable=too-many-return-statements, too-many-branches
def _legacy_status(stat):
"""Legacy status method from the 'qsmobile.js' library.
Pass in the 'val' from &devices or the
'data' received after calling a specific ID.
"""
# 2d0c00002a0000
if stat[:2] == '30' or stat[:2] == '47': # RX1 CT
ooo = stat[4:5]
# console.log("legstat. " + o);
if ooo == '0':
return 0
if ooo == '8':
return 100
if stat == '7e':
return 0
if stat == '7f':
return 100
if len(stat) == 6: # old
try:
val = int(stat[4:], 16)
except ValueError:
val = 0
hwt = stat[:2]
if hwt == '01': # old dim
return round(((125 - val) / 125) * 100)
if hwt == '02': # old rel
return 100 if val == 127 else 0
if hwt == '28': # LED DIM
if stat[2:4] == '01':
if stat[4:] == '78':
return 0
return round(((120 - val) / 120) * 100)
# Additional decodes not part of qsmobile.js
if stat.upper().find('ON') >= 0: # Relay
return 100
if (not stat) or stat.upper().find('OFF') >= 0:
return 0
if stat.endswith('%'): # New style dimmers
if stat[:-1].isdigit:
return int(stat[:-1])
_LOGGER.debug("val='%s' used a -1 fallback in legacy_status", stat)
return -1 # fallback to return an int
# return stat
@attr.s(slots=True)
class QSDev():
"""A single QS device."""
data = attr.ib(validator=attr.validators.instance_of(dict))
qstype = attr.ib(init=False, validator=attr.validators.instance_of(QSType))
value = attr.ib(
default=-5, validator=attr.validators.instance_of((float, int)))
def __attrs_post_init__(self):
"""Init."""
# pylint: disable=no-member
_types = {'rel': QSType.relay, 'dim': QSType.dimmer,
'hum': QSType.humidity_temperature}
self.qstype = _types.get(self.data.get(QS_TYPE, ''), QSType.unknown)
@property
def name(self):
"""Return the name from the qsusb data."""
# pylint: disable=unsubscriptable-object
try:
return self.data[QS_NAME]
except IndexError:
return self.data[QS_ID]
@property
def qsid(self):
"""Return the name from the qsusb data."""
return self.data.get(QS_ID, '') # pylint: disable=no-member
@property
def is_dimmer(self):
"""Return the name from the qsusb data."""
return self.qstype == QSType.dimmer
class QSDevices(dict):
"""Represent the devices from QS Mobile."""
def __init__(self, cb_value_changed, cb_set_qsvalue, dim_adj=1):
"""Initialize."""
self.dim_adj = dim_adj
self._cb_value_changed = cb_value_changed
self._cb_set_qsvalue = cb_set_qsvalue
super().__init__()
def set_value(self, qsid, new):
# Set value & encode new to be passed to QSUSB
"""Set a value."""
try:
dev = self[qsid]
except KeyError:
raise KeyError("Device {} not found".format(qsid))
if new < 0:
new = 0
if new == dev.value:
return
if dev.is_dimmer:
new = _MAX if new > (_MAX * .9) else new
else: # QSType.relay and any other
new = _MAX if new > 0 else 0
def success():
"""Success closure to update value."""
self[qsid].value = new
_LOGGER.debug("set success %s=%s", qsid, new)
self._cb_value_changed(self, qsid, new)
newqs = round(math.pow(round(new / _MAX * 100), 1 / self.dim_adj))
_LOGGER.debug("%s hass=%s --> %s", qsid, new, newqs)
self._cb_set_qsvalue(qsid, newqs, success)
def update_devices(self, devices):
"""Update values from response of URL_DEVICES, callback if changed."""
for qspacket in devices:
try:
qsid = qspacket[QS_ID]
except KeyError:
_LOGGER.debug("Device without ID: %s", qspacket)
continue
if qsid not in self:
self[qsid] = QSDev(data=qspacket)
dev = self[qsid]
dev.data = qspacket
# Decode value from QSUSB
newqs = _legacy_status(qspacket[QS_VALUE])
if dev.is_dimmer:
# Adjust dimmer exponentially to get a smoother effect
newqs = min(round(math.pow(newqs, self.dim_adj)), 100)
newin = round(newqs * _MAX / 100)
if abs(dev.value - newin) > 1: # Significant change
_LOGGER.debug("%s qs=%s --> %s", qsid, newqs, newin)
dev.value = newin
self._cb_value_changed(self, qsid, newin)
def decode_qwikcord(packet, channel=1):
"""Extract the qwikcord current measurements from val (CTavg, CTsum)."""
val = str(packet.get('val', ''))
if len(val) != 16:
return None
if channel == 1:
return int(val[6:12], 16) # CTavg
return int(val[12:], 16) # CTsum
def decode_door(packet, channel=1):
"""Decode a door sensor."""
val = str(packet.get(QSDATA, ''))
if len(val) == 6 and val.startswith('46') and channel == 1:
return val[-1] == '0'
return None
# byte 0:
# 4e = imod
# 46 = Door sensor
# byte 1: firmware
# byte 2: bit values
# 00/64: Door open / Close
# 17/xx: All open / Channels 1-4 at 0004 0321
# byte 3: last change (imod)
def decode_imod(packet, channel=1):
"""Decode an 4 channel imod. May support 6 channels."""
val = str(packet.get(QSDATA, ''))
if len(val) == 8 and val.startswith('4e'):
try:
_map = ((5, 1), (5, 2), (5, 4), (4, 1), (5, 1), (5, 2))[
channel - 1]
return (int(val[_map[0]], 16) & _map[1]) == 0
except IndexError:
return None
return None
# byte 0: 0f = pir
# byte 1: firmware
# byte 2 and 3: number of seconds (in hex) that the PIR sends
# until a device should react.
def decode_pir(packet, channel=1):
"""Decode a PIR."""
val = str(packet.get(QSDATA, ''))
if len(val) == 8 and val.startswith('0f') and channel == 1:
return int(val[-4:], 16) > 0
return None
# byte 0: 34 = temperature / humidity
# byte 1: firmware
# byte 2-3: humidity
# byte 4-5: temperature
def decode_temperature(packet, channel=1):
"""Decode the temperature."""
val = str(packet.get(QSDATA, ''))
if len(val) == 12 and val.startswith('34') and channel == 1:
temperature = int(val[-4:], 16)
return round(float((-46.85 + (175.72 * (temperature / pow(2, 16))))))
return None
def decode_humidity(packet, channel=1):
"""Decode the humidity."""
val = str(packet.get(QSDATA, ''))
if len(val) == 12 and val.startswith('34') and channel == 1:
humidity = int(val[4:-4], 16)
return round(float(-6 + (125 * (humidity / pow(2, 16)))))
return None
SENSORS = {
'imod': (decode_imod, bool),
'door': (decode_door, bool),
'pir': (decode_pir, bool),
'temperature': (decode_temperature, '°C'),
'humidity': (decode_humidity, '%'),
'qwikcord': (decode_qwikcord, 'A/s'),
}
| {
"content_hash": "a3777ade45dd1fe8750a8dbef0c0f750",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 29.035211267605632,
"alnum_prop": 0.5511763279165656,
"repo_name": "kellerza/pyqwikswitch",
"id": "5ddf845b0adcb9c8f293764524ae550119bcdca4",
"size": "8247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyqwikswitch/qwikswitch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21932"
}
],
"symlink_target": ""
} |
from six import with_metaclass, text_type as unicode
import inspect
import os.path
from robot.errors import DataError
from robot.utils import (get_error_details, is_string, is_list_like,
is_dict_like, split_args_from_name_or_path,
type_name, Importer)
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
class Listeners(with_metaclass(_RecursionAvoidingMetaclass, object)):
_start_attrs = ('id', 'doc', 'starttime', 'longname')
_end_attrs = _start_attrs + ('endtime', 'elapsedtime', 'status', 'message')
_kw_extra_attrs = ('args', 'assign', 'kwname', 'libname',
'-id', '-longname', '-message')
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __bool__(self):
return bool(self._listeners)
#PY2
def __nonzero__(self):
return self.__bool__()
def _import_listeners(self, listener_data):
listeners = []
for listener in listener_data:
try:
listeners.append(ListenerProxy(listener))
except DataError as err:
if not is_string(listener):
listener = type_name(listener)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (listener, unicode(err)))
return listeners
def start_suite(self, suite):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.start_suite, suite.name, attrs)
def _get_suite_attrs(self, suite):
return {
'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.test_count,
'source': suite.source or ''
}
def end_suite(self, suite):
for listener in self._listeners:
self._notify_end_suite(listener, suite)
def _notify_end_suite(self, listener, suite):
if listener.version == 1:
listener.call_method(listener.end_suite, suite.status,
suite.full_message)
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs['statistics'] = suite.stat_message
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_test, test.name, test.doc,
list(test.tags))
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for listener in self._listeners:
self._notify_end_test(listener, test)
def _notify_end_test(self, listener, test):
if listener.version == 1:
listener.call_method(listener.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.end_test, test.name, attrs)
def start_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=True)
listener.call_method(listener.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=False)
listener.call_method(listener.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def imported(self, import_type, name, attrs):
for listener in self._listeners:
method = getattr(listener, '%s_import' % import_type.lower())
listener.call_method(method, name, attrs)
def log_message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.log_message, self._create_msg_dict(msg))
def message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, file_type, path):
for listener in self._listeners:
method = getattr(listener, '%s_file' % file_type.lower())
listener.call_method(method, path)
def close(self):
for listener in self._listeners:
listener.call_method(listener.close)
def _get_start_attrs(self, item, *extra):
return self._get_attrs(item, self._start_attrs, extra)
def _get_end_attrs(self, item, *extra):
return self._get_attrs(item, self._end_attrs, extra)
def _get_attrs(self, item, default, extra):
names = self._get_attr_names(default, extra)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, default, extra):
names = list(default)
for name in extra:
if not name.startswith('-'):
names.append(name)
elif name[1:] in names:
names.remove(name[1:])
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if is_dict_like(value):
return dict(value)
if is_list_like(value):
return list(value)
return value
class ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close', 'library_import', 'resource_import',
'variables_import']
def __init__(self, listener):
if is_string(listener):
name, args = split_args_from_name_or_path(listener)
listener = self._import_listener(name, args)
else:
name = type_name(listener)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
if self.version == 1:
LOGGER.warn("Listener '%s' uses deprecated API version 1. "
"Switch to API version 2 instead." % self.name)
def _import_listener(self, name, args):
importer = Importer('listener')
return importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
try:
method(*args)
except:
message, details = get_error_details()
LOGGER.error("Calling listener method '%s' of listener '%s' "
"failed: %s" % (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
| {
"content_hash": "2a0e9e1bcada69626892bfb075e58cb7",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 86,
"avg_line_length": 39.32156862745098,
"alnum_prop": 0.5768425251820086,
"repo_name": "userzimmermann/robotframework",
"id": "034b7a6de231e5c6f2504a5636fc35f8c1512417",
"size": "10635",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "src/robot/output/listeners.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140949"
},
{
"name": "Java",
"bytes": "59815"
},
{
"name": "JavaScript",
"bytes": "160761"
},
{
"name": "Python",
"bytes": "2179296"
},
{
"name": "RobotFramework",
"bytes": "2033202"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import numpy as np
def swap_signs(cameras, x3d):
"""
Swaps signs of the camera and 3D points
so the projective depths are positive
Parameters
----------
camera: list
Camera matrices
x3d: numpy array
array containing 3D points
Returns
-------
camera: cameras with the correct sign. empty if error
x3d: points with the correct sign. empty if error
"""
n_views = len(cameras)
n_points = x3d.shape[1]
signs = np.zeros((n_views, n_points))
for i in range(n_views):
signs[i, :] = np.sign(np.dot(cameras[i], x3d))[2, :]
signp = signs[:, 0]
signs *= signp
signx = signs[0, :]
signs *= signx
if np.any(signs < 0):
return [], []
x3d_signed = x3d * signx
cameras_signed = [cameras[i]*signp[i] for i in range(n_views)]
return cameras_signed, x3d_signed | {
"content_hash": "08946ee10e791b94cd26c54f7b9a4953",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 20.045454545454547,
"alnum_prop": 0.5816326530612245,
"repo_name": "guillempalou/scikit-cv",
"id": "b22128667339457c8ec2b4b2145af5f0e6abb55f",
"size": "882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skcv/multiview/n_views/projective_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16403"
},
{
"name": "JavaScript",
"bytes": "54513"
},
{
"name": "Python",
"bytes": "107299"
},
{
"name": "Shell",
"bytes": "6705"
},
{
"name": "TeX",
"bytes": "63238"
}
],
"symlink_target": ""
} |
from reporter.emailing import (
RECIPIENT_SPIRAL_ADMIN as RECIPIENT_ADMIN
)
from reporter.uhl_reports.civicrm.enrolment_dq import (
DuplicateStudyIdReport,
MissingStudyNumber,
MultipleRecruitementsReport,
CivicrmInvalidCaseStatus
)
from reporter.core import Schedule
CASE_TYPE_ID = 25
class SpiralCiviCrmMissingStudyNumber(MissingStudyNumber):
def __init__(self):
super().__init__(
CASE_TYPE_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class SpiralCiviCrmDuplicateStudyNumber(DuplicateStudyIdReport):
def __init__(self):
super().__init__(
CASE_TYPE_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class SpiralCiviCrmMultipleRecruitments(MultipleRecruitementsReport):
def __init__(self):
super().__init__(
CASE_TYPE_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class SpiralCivicrmInvalidCaseStatus(CivicrmInvalidCaseStatus):
def __init__(self):
super().__init__(
CASE_TYPE_ID,
[
'Recruited',
'Excluded',
'Withdrawn'
],
[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
| {
"content_hash": "cf0758f991c6ebf5722d2658cf01d4af",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 69,
"avg_line_length": 25.74074074074074,
"alnum_prop": 0.5719424460431655,
"repo_name": "LCBRU/reporter",
"id": "5c7de2b7b288b25b2785ff9cc398c06104628308",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporter/uhl_reports/spiral/data_quality/civicrm.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "386"
},
{
"name": "HTML",
"bytes": "3199"
},
{
"name": "Python",
"bytes": "600192"
}
],
"symlink_target": ""
} |
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ips')
def make_float_ip(elem):
elem.set('id')
elem.set('ip')
elem.set('pool')
elem.set('fixed_ip')
elem.set('instance_id')
class FloatingIPTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('floating_ip',
selector='floating_ip')
make_float_ip(root)
return xmlutil.MasterTemplate(root, 1)
class FloatingIPsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('floating_ips')
elem = xmlutil.SubTemplateElement(root, 'floating_ip',
selector='floating_ips')
make_float_ip(elem)
return xmlutil.MasterTemplate(root, 1)
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['instance']['uuid']
except (TypeError, KeyError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
snagiibfa = self.network_api.get_instance_id_by_floating_address
instance_id = snagiibfa(context, address)
if instance_id:
return self.compute_api.get(context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.NotAuthorized:
raise webob.exc.HTTPUnauthorized()
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def _get_fixed_ip(self, context, fixed_ip_id):
if fixed_ip_id is None:
return None
try:
return self.network_api.get_fixed_ip(context, fixed_ip_id)
except exception.FixedIpNotFound:
return None
def _get_instance(self, context, instance_id):
return self.compute_api.get(context, instance_id)
def _set_metadata(self, context, floating_ip):
fixed_ip_id = floating_ip['fixed_ip_id']
floating_ip['fixed_ip'] = self._get_fixed_ip(context,
fixed_ip_id)
instance_uuid = None
if floating_ip['fixed_ip']:
instance_uuid = floating_ip['fixed_ip']['instance_uuid']
if instance_uuid:
floating_ip['instance'] = self._get_instance(context,
instance_uuid)
else:
floating_ip['instance'] = None
@wsgi.serializers(xml=FloatingIPTemplate)
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
self._set_metadata(context, floating_ip)
return _translate_floating_ip_view(floating_ip)
@wsgi.serializers(xml=FloatingIPsTemplate)
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
for floating_ip in floating_ips:
self._set_metadata(context, floating_ip)
return _translate_floating_ips_view(floating_ips)
@wsgi.serializers(xml=FloatingIPTemplate)
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps, nmfi:
if pool:
nmfi.message = _("No more floating ips in pool %s.") % pool
else:
nmfi.message = _("No more floating ips available.")
raise nmfi
return _translate_floating_ip_view(ip)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
floating_ip = self.network_api.get_floating_ip(context, id)
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if floating_ip.get('fixed_ip_id'):
disassociate_floating_ip(self, context, instance, address)
# release ip from project
self.network_api.release_floating_ip(context, address)
return webob.Response(status_int=202)
def _get_ip_by_id(self, context, value):
"""Checks that value is id and then returns its address."""
return self.network_api.get_floating_ip(context, value)['address']
class FloatingIPActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.network_api = network.API()
@wsgi.action('addFloatingIp')
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['addFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
instance = self.compute_api.get(context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist.
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0]['address'])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_ips[0]['address'])
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except Exception:
msg = _('Error. Unable to associate floating ip')
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('removeFloatingIp')
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['removeFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
# get the floating ip object
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if instance and floating_ip.get('fixed_ip_id'):
disassociate_floating_ip(self, context, instance, address)
return webob.Response(status_int=202)
else:
return webob.Response(status_int=404)
class Floating_ips(extensions.ExtensionDescriptor):
"""Floating IPs support"""
name = "Floating_ips"
alias = "os-floating-ips"
namespace = "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1"
updated = "2011-06-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={})
resources.append(res)
return resources
def get_controller_extensions(self):
controller = FloatingIPActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| {
"content_hash": "c9edb78b1ce199bcff507e3150df598b",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 79,
"avg_line_length": 35.68027210884354,
"alnum_prop": 0.6147759771210677,
"repo_name": "NoBodyCam/TftpPxeBootBareMetal",
"id": "6fb9f0cc1a0c67513cd79e0496c284b0bef7846f",
"size": "11296",
"binary": false,
"copies": "1",
"ref": "refs/heads/tftp_pxe_boot",
"path": "nova/api/openstack/compute/contrib/floating_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6568288"
},
{
"name": "Shell",
"bytes": "17010"
}
],
"symlink_target": ""
} |
from typing import Dict
from ezdxf.document import Drawing
from ezdxf.tools import codepage
from ezdxf.sections.header import HeaderSection
from ezdxf.sections.classes import ClassesSection
from ezdxf.sections.tables import TablesSection
from ezdxf.sections.blocks import BlocksSection
from ezdxf.sections.entities import EntitySection
from ezdxf.sections.objects import ObjectsSection
from ezdxf.sections.acdsdata import AcDsDataSection
from .const import *
from .fileheader import FileHeader
from .header_section import load_header_section
from .classes_section import load_classes_section
__all__ = ["readfile", "load"]
def readfile(filename: str, crc_check=False) -> "Drawing":
data = open(filename, "rb").read()
return load(data, crc_check)
def load(data: bytes, crc_check=False) -> Drawing:
doc = DwgDocument(data, crc_check=crc_check)
doc.load()
return doc.doc
class DwgDocument:
def __init__(self, data: Bytes, crc_check=False):
self.data = memoryview(data)
self.crc_check = crc_check
self.specs = FileHeader(data, crc_check=crc_check)
self.doc: Drawing = self._setup_doc()
# Store DXF object types by class number:
self.dxf_object_types: Dict[int, str] = dict()
def _setup_doc(self) -> Drawing:
doc = Drawing(dxfversion=self.specs.version)
doc.encoding = self.specs.encoding
doc.header = HeaderSection.new()
# Setup basic header variables not stored in the header section of the DWG file.
doc.header["$ACADVER"] = self.specs.version
doc.header["$ACADMAINTVER"] = self.specs.maintenance_release_version
doc.header["$DWGCODEPAGE"] = codepage.tocodepage(self.specs.encoding)
doc.classes = ClassesSection(doc)
# doc.tables = TablesSection(doc)
# doc.blocks = BlocksSection(doc)
# doc.entities = EntitySection(doc)
# doc.objects = ObjectsSection(doc)
# doc.acdsdata = AcDsDataSection(doc)
return doc
def load(self):
self.load_header()
self.load_classes()
self.load_objects()
self.store_objects()
def load_header(self) -> None:
hdr_section = load_header_section(self.specs, self.data, self.crc_check)
hdr_vars = hdr_section.load_header_vars()
self.set_header_vars(hdr_vars)
def set_header_vars(self, hdr_vars: Dict):
pass
def load_classes(self) -> None:
cls_section = load_classes_section(
self.specs, self.data, self.crc_check
)
for class_num, dxfclass in cls_section.load_classes():
self.doc.classes.register(dxfclass)
self.dxf_object_types[class_num] = dxfclass.dxf.name
def load_objects(self) -> None:
pass
def store_objects(self) -> None:
pass
| {
"content_hash": "f89c8b91b8e88987f86e6cd4884ecdeb",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 32.825581395348834,
"alnum_prop": 0.6687920651788877,
"repo_name": "mozman/ezdxf",
"id": "a198fa081cec619c666b3720a2e16aa98d6e4961",
"size": "2888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ezdxf/addons/dwg/loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n7pyatv/protocols/mrp/protobuf/CryptoPairingMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"|\n\x14\x43ryptoPairingMessage\x12\x13\n\x0bpairingData\x18\x01 \x01(\x0c\x12\x0e\n\x06status\x18\x02 \x01(\x05\x12\x12\n\nisRetrying\x18\x03 \x01(\x08\x12\x1c\n\x14isUsingSystemPairing\x18\x04 \x01(\x08\x12\r\n\x05state\x18\x05 \x01(\x05:E\n\x14\x63ryptoPairingMessage\x12\x10.ProtocolMessage\x18\' \x01(\x0b\x32\x15.CryptoPairingMessage')
CRYPTOPAIRINGMESSAGE_FIELD_NUMBER = 39
cryptoPairingMessage = DESCRIPTOR.extensions_by_name['cryptoPairingMessage']
_CRYPTOPAIRINGMESSAGE = DESCRIPTOR.message_types_by_name['CryptoPairingMessage']
CryptoPairingMessage = _reflection.GeneratedProtocolMessageType('CryptoPairingMessage', (_message.Message,), {
'DESCRIPTOR' : _CRYPTOPAIRINGMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.CryptoPairingMessage_pb2'
# @@protoc_insertion_point(class_scope:CryptoPairingMessage)
})
_sym_db.RegisterMessage(CryptoPairingMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(cryptoPairingMessage)
DESCRIPTOR._options = None
_CRYPTOPAIRINGMESSAGE._serialized_start=111
_CRYPTOPAIRINGMESSAGE._serialized_end=235
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "7653d79cdc7d081e6a60c03ec3af39ca",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 518,
"avg_line_length": 54.77142857142857,
"alnum_prop": 0.8069900886802295,
"repo_name": "postlund/pyatv",
"id": "0ed694ba2dec66f502c6b47e817adad22d46a70f",
"size": "2066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyatv/protocols/mrp/protobuf/CryptoPairingMessage_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "456"
},
{
"name": "Python",
"bytes": "1432120"
},
{
"name": "Shell",
"bytes": "2108"
}
],
"symlink_target": ""
} |
from gi.repository import Gtk, Gdk, Pango, PangoCairo, GdkPixbuf, GLib
import cairo
import sys
import os
import numpy
from threading import Thread, Condition, Event, Lock
from time import sleep, time
try:
import cv2
except:
print("python-opencv is required for video access. Please install.")
from cave.libcave.video import Video
from cave.libcave.cameralink import CameraLink
from cave.libcave.tags.registered_tags import get_class_from_tagtype
from cave.libcave.videoutils import verify_video
from misc.log import with_logging, supress_output
MAX_FPS = 30 #max FPS to attempt display
@with_logging
class VideoManagerThread(Thread):
def __init__(self, callback):
Thread.__init__(self)
self.next_frame = None
self.camlink = None
self.callback = callback
self.video = None
self.c = Condition()
self.kill = Event()
self.export_to_vision = False
self.enable_display = True
self.width = 640
self.height = 480
self.limit_speed = True #Disable speed limiting only in testing
self.grab_lock = Lock() #Preview manager may be grabbing images concurrently
self.last_frame = None #Last frame number captured
self.new_link = None
self.start()
def new_camlink(self, name, height, width, nchannels):
with self.c:
self.new_link = (name, height, width, nchannels)
self.c.notify()
#self.camlink = CameraLink(name, height=height, width=width, nChannels=nchannels)
def set_cap(self, cap, video):
self.log.info("Capture source changed")
with self.c:
self.cap = cap
self.video = video
self.last_frame = None
def request_frame(self, frame):
with self.c:
self.next_frame = frame
self.c.notify()
def destroy(self):
with self.c:
self.kill.set()
self.c.notify()
# Grabs a frame from the currently loaded video
# None if image grab failed, otherwise a tuple is returned:
# img: raw opencv image
# frame: cairo drawing imagesurface data (can be None if display disabled (testing))
# width: width of image returned
# height: height of image returned
def grab_frame(self, frame_number, resize=None):
with self.grab_lock:
#Grab frame twice to avoid internal buffering (only seems to be a problem on some computers)
# UPDATE: this doesn't seem necessary anymore; verify
#self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
#self.cap.grab()
#Seek to correct frame
if self.last_frame is None or frame_number != self.last_frame + 1:
if frame_number < 0: #DEBUG DEBUG DEBUG
self.log.error("Frame number was %d" % frame_number)
else:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
success, img = self.cap.read()
self.last_frame = frame_number
if not success or img is None:
#failure
self.log.error("Failed to grab frame %d" % frame_number)
return None
else:
if resize is not None:
ow = len(img[0]) #input frame width
oh = len(img) #input frame height
w,h = resize
if w is None:
assert(h)
w = int(float(h) * ow / oh)
if h is None:
assert(w)
h = int(float(w) * oh / ow)
img = cv2.resize(img, (w,h))
width = len(img[0])
height = len(img)
frame = None
if self.enable_display:
#cairo requires a 4th channel; add one
z = numpy.zeros((height, width, 1), dtype=numpy.uint8)
aimg = numpy.concatenate((img, z), 2).flatten()
frame = cairo.ImageSurface.create_for_data(aimg, cairo.FORMAT_RGB24,
width, height, width*4)
return (img, frame, width, height)
def cleanup(self):
if self.camlink is not None:
self.camlink.cleanup()
def run(self):
while True:
with self.c:
if self.kill.is_set():
break
quit = False
while self.next_frame is None:
self.c.wait()
if self.kill.is_set():
quit = True
break
# We want to do all CameraLink calls from the same thread.
if self.new_link is not None:
self.cleanup()
name, height, width, nchannels = self.new_link
self.camlink = CameraLink(name, height=height, width=width, nChannels=nchannels)
self.new_link = None
if quit:
break
frame_to_process = self.next_frame
self.next_frame = None
t1 = time()
image_result = self.grab_frame(frame_to_process)
if image_result is None:
#Should never be null unless something is wrong with the filesystem
self.log.error("Unable to read frame %d from file!" % frame_to_process)
else:
img, self.frame, self.width, self.height = image_result
if self.frame is not None:
Gdk.threads_enter()
self.callback(self.frame)
Gdk.threads_leave()
if self.export_to_vision:
self.camlink.send_image(img)
if self.limit_speed:
dt = time() - t1
self.kill.wait(min(0, 1.0 / MAX_FPS - dt)) #sleep
self.cleanup()
@with_logging
class VideoBox(Gtk.DrawingArea):
"""
Box for displaying videos
"""
def __init__ (self, parent):
Gtk.DrawingArea.__init__(self)
self.parent = parent
self.connect('draw', self._do_expose)
self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK | Gdk.EventMask.ENTER_NOTIFY_MASK | Gdk.EventMask.SCROLL_MASK)
self.connect("button-press-event", self._click) #mouse button is pressed (either left or right)
self.connect("motion-notify-event", self._hover) #mouse is held down and in motion
self.connect("enter-notify-event", self._enter)
self.connect("leave-notify-event", self._exit)
self.connect("key-press-event", self._keypress)
self.connect("scroll-event", self._scroll)
self.set_size_request(320, 240)
self.frame = None
self.video = None
self.width = 640
self.height = 480
self.length = 100
self.cap_source_cache = {}
self.tag_type_instance = None
def get_frame_xy(self, sx, sy):
allocation = self.get_allocation()
frame_width = allocation.width
frame_height = allocation.height
#Video origin
vid_x_o = frame_width / 2 - self.width / 2
vid_y_o = frame_height / 2 - self.height / 2
return int(sx - vid_x_o), int(sy - vid_y_o)
def _click(self, area, event):
if self.tag_type_instance is not None:
x,y = self.get_frame_xy(event.x, event.y)
if self.tag_type_instance.click(area, event, x, y):
#Increment the played frame
self.parent.increment_frame(1)
else:
self.queue_draw()
def _hover(self, area, event):
if self.tag_type_instance is not None:
x,y = self.get_frame_xy(event.x, event.y)
self.tag_type_instance.hover(area, event, x, y)
self.queue_draw()
def _enter(self, area, event):
if self.tag_type_instance is not None:
x,y = self.get_frame_xy(event.x, event.y)
self.tag_type_instance.enter(area, event, x, y)
def _exit(self, area, event):
if self.tag_type_instance is not None:
x,y = self.get_frame_xy(event.x, event.y)
self.tag_type_instance.exit(area, event, x, y)
self.queue_draw()
def _scroll(self, area, event):
if self.tag_type_instance is not None:
x,y = self.get_frame_xy(event.x, event.y)
self.tag_type_instance.scroll(area, event, x, y)
self.queue_draw()
def _keypress(self, area, event):
if self.tag_type_instance is not None:
self.tag_type_instance.keypress(area, event)
self.queue_draw()
def export_to_vision(self, val):
self.vmt.export_to_vision = val
if val:
self.log.info("Now exporting frames to vision")
else:
self.log.info("Stopping export of frames to vision")
def enable_display(self, val):
self.vmt.enable_display = val
#Start the rendering thread
def start_thread(self):
self.vmt = VideoManagerThread(self._new_frame_arrived)
#Callback from the rendering thread
def _new_frame_arrived(self, frame):
self.frame = frame
self.width = self.vmt.width
self.height = self.vmt.height
self.queue_draw()
#Kill the rendering thread
def kill_thread(self):
if self.vmt is not None:
self.vmt.destroy()
#Sets a tag for use here
def set_tag(self, tag):
if tag is None:
self.tag_type_instance = None
else:
TagTypeClass = get_class_from_tagtype(tag.tag_type)
self.tag_type_instance = TagTypeClass(tag, lambda : self.parent.timeline.cursor)
#Loads a new video for playback
#@supress_output
def load_video(self, video):
if video is None:
return
filename = Video.db.get_absolute_path(video.video_path)
self.log.info("Load filename %s" % filename)
self.video = video
#opencv seems to be doing some indexing or something as it performs seeks
#if video not in self.cap_source_cache:
# This causes problems when multiple databases are loaded.
#cap = cv2.VideoCapture(filename)
#else:
# self.log.info("Loaded capture source from cache")
# self.cap = self.cap_source_cache[video]
cap, self.width, self.height, self.length, self.nchannels = \
verify_video(filename)
self.cap_source_cache[video] = cap
self.vmt.new_camlink(video.linked_camera, self.height, self.width, self.nchannels)
self.vmt.set_cap(cap, video)
self.vmt.request_frame(0)
#Set the frame displayed
def set_frame(self, frame):
frame = max(0, min(self.length, frame)) #bounds check
self.vmt.request_frame(frame)
#Carries out on-screen drawing when queue_draw is called
def _do_expose(self, widget, cr):
# Get the size of the area that cairo has allocated for our drawing
allocation = self.get_allocation()
frame_width = allocation.width
frame_height = allocation.height
if self.tag_type_instance is None:
scale_factor = min(float(frame_height) / self.height,
float(frame_width) / self.width)
else:
scale_factor = 1.0
#Video origin
vid_x_o = (frame_width / 2 / scale_factor - self.width / 2)
vid_y_o = (frame_height / 2 / scale_factor - self.height / 2)
if self.frame is None:
# Nothing to draw!
return
#Draw the video frame
cr.scale(scale_factor, scale_factor)
cr.set_source_surface(self.frame, vid_x_o, vid_y_o)
cr.paint()
#Draw tag-related components
if self.tag_type_instance is not None:
self.tag_type_instance.draw(widget, cr, vid_x_o, vid_y_o)
# Warn that auto-scale was disabled.
cr.set_source_rgb(0.7,0,0)
cr.move_to(0, frame_height - 5)
# This is mostly because I'm too lazy right now to implement
# the rescaling logic needed for tagging.
cr.show_text("Video auto-scaling disabled in tagging mode.")
| {
"content_hash": "4aed5241b1b89e9507ccd0d1a5d1dd49",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 223,
"avg_line_length": 34.97752808988764,
"alnum_prop": 0.5672984259556698,
"repo_name": "cuauv/software",
"id": "6cb12bee1f72b9d21f0f41a039c85d267b1766fe",
"size": "12452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cave/videobox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "271780"
},
{
"name": "C++",
"bytes": "2831785"
},
{
"name": "CMake",
"bytes": "5365"
},
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "Dockerfile",
"bytes": "2758"
},
{
"name": "Emacs Lisp",
"bytes": "19028"
},
{
"name": "GLSL",
"bytes": "6783"
},
{
"name": "HTML",
"bytes": "3642"
},
{
"name": "Haskell",
"bytes": "4770"
},
{
"name": "JavaScript",
"bytes": "113413"
},
{
"name": "Makefile",
"bytes": "12887"
},
{
"name": "Nix",
"bytes": "16335"
},
{
"name": "OCaml",
"bytes": "3804"
},
{
"name": "PureBasic",
"bytes": "58"
},
{
"name": "Python",
"bytes": "2141765"
},
{
"name": "Scheme",
"bytes": "129544"
},
{
"name": "Shell",
"bytes": "68820"
},
{
"name": "TeX",
"bytes": "25243"
},
{
"name": "Vim script",
"bytes": "125505"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import posixpath
import errno
from urlparse import urlparse
class ProxyFS(object):
def __init__(self, filesystems_dict, default_scheme):
if default_scheme not in filesystems_dict:
raise ValueError(
'Default scheme "%s" is not a member of provided schemes: %s' % (default_scheme, filesystems_dict.keys()))
self._fs_dict = filesystems_dict
self._fs_set = set(self._fs_dict.values())
self._default_scheme = default_scheme
self._default_fs = self._fs_dict[self._default_scheme]
def __getattr__(self, item):
if hasattr(self, "_default_fs"):
return getattr(object.__getattribute__(self, "_default_fs"), item)
else:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, item))
def __setattr__(self, key, value):
if hasattr(self, "_default_fs") and hasattr(self._default_fs, key):
setattr(self._default_fs, key, value)
else:
object.__setattr__(self, key, value)
def _get_scheme(self, path):
split = urlparse(path)
if split.scheme:
return split.scheme
if path and path[0] == posixpath.sep:
return self._default_scheme
def _get_fs(self, path):
scheme = self._get_scheme(path)
if not scheme:
raise IOError(errno.EINVAL, 'Can not figure out scheme for path "%s"' % path)
try:
return self._fs_dict[scheme]
except KeyError:
raise IOError(errno.EINVAL, 'Unknown scheme %s, available schemes: %s' % (scheme, self._fs_dict.keys()))
def _get_fs_pair(self, src, dst):
"""
Returns two FS for source and destination paths respectively.
If `dst` is not self-contained path assumes it's relative path to `src`.
"""
src_fs = self._get_fs(src)
dst_scheme = self._get_scheme(dst)
if not dst_scheme:
return src_fs, src_fs
return src_fs, self._get_fs(dst)
def setuser(self, user):
"""Set a new user. Return the current user."""
curr = self.user
for fs in self._fs_set:
fs.setuser(user)
return curr
def do_as_user(self, username, fn, *args, **kwargs):
prev = self.user
try:
self.setuser(username)
return fn(*args, **kwargs)
finally:
self.setuser(prev)
def do_as_superuser(self, fn, *args, **kwargs):
return self.do_as_user(self._default_fs.superuser, fn, *args, **kwargs)
# Proxy methods to suitable filesystem
# ------------------------------------
def isdir(self, path):
return self._get_fs(path).isdir(path)
def isfile(self, path):
return self._get_fs(path).isfile(path)
def stats(self, path):
return self._get_fs(path).stats(path)
def listdir_stats(self, path, **kwargs):
return self._get_fs(path).listdir_stats(path, **kwargs)
def listdir(self, path, glob=None):
return self._get_fs(path).listdir(path, glob)
def normpath(self, path):
return self._get_fs(path).normpath(path)
def open(self, path, *args, **kwargs):
return self._get_fs(path).open(path, *args, **kwargs)
def exists(self, path):
return self._get_fs(path).exists(path)
def isroot(self, path):
return self._get_fs(path).isroot(path)
def join(self, first, *comp_list):
return self._get_fs(first).join(first, *comp_list)
def mkdir(self, path, *args, **kwargs):
return self._get_fs(path).mkdir(path, *args, **kwargs)
def read(self, path, *args, **kwargs):
return self._get_fs(path).read(path, *args, **kwargs)
def append(self, path, *args, **kwargs):
return self._get_fs(path).append(path, *args, **kwargs)
def rmtree(self, path, *args, **kwargs):
self._get_fs(path).rmtree(path, *args, **kwargs)
def remove(self, path, skip_trash=False):
self._get_fs(path).remove(path, skip_trash)
def restore(self, path):
self._get_fs(path).restore(path)
def create(self, path, *args, **kwargs):
self._get_fs(path).create(path, *args, **kwargs)
def create_home_dir(self, home_path=None):
if home_path is None:
home_path = self.get_home_dir()
self._get_fs(home_path).create_home_dir(home_path)
def chown(self, path, *args, **kwargs):
self._get_fs(path).chown(path, *args, **kwargs)
def chmod(self, path, *args, **kwargs):
self._get_fs(path).chmod(path, *args, **kwargs)
def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
self._get_fs(remote_dst).copyFromLocal(local_src, remote_dst, *args, **kwargs)
def mktemp(self, subdir='', prefix='tmp', basedir=None):
fs = basedir and self._get_fs(basedir) or self.default_fs
return fs.mktemp(subdir=subdir, prefix=prefix, basedir=basedir)
def purge_trash(self):
for fs in self.fs_set:
if hasattr(fs, 'purge_trash'):
fs.purge_trash()
# Handle file systems interactions
# --------------------------------
def copy(self, src, dst, *args, **kwargs):
src_fs, dst_fs = self._get_fs_pair(src, dst)
op = src_fs.copy if src_fs is dst_fs else self._copy_between_filesystems
return op(src, dst, *args, **kwargs)
def _copy_between_filesystems(self, src, dst, recursive=False, *args, **kwargs):
raise NotImplementedError("Will be addressed in HUE-2934")
def copyfile(self, src, dst, *args, **kwargs):
src_fs, dst_fs = self._get_fs_pair(src, dst)
op = src_fs.copyfile if src_fs is dst_fs else self._copyfile_between_filesystems
return op(src, dst, *args, **kwargs)
def _copyfile_between_filesystems(self, src, dst, *args, **kwargs):
raise NotImplementedError("Will be addressed in HUE-2934")
def copy_remote_dir(self, src, dst, *args, **kwargs):
src_fs, dst_fs = self._get_fs_pair(src, dst)
op = src_fs.copy_remote_dir if src_fs is dst_fs else self._copy_remote_dir_between_filesystems
return op(src, dst, *args, **kwargs)
def _copy_remote_dir_between_filesystems(self, src, dst, *args, **kwargs):
raise NotImplementedError("Will be addressed in HUE-2934")
def rename(self, old, new):
old_fs, new_fs = self._get_fs_pair(old, new)
op = old_fs.rename if old_fs is new_fs else self._rename_between_filesystems
return op(old, new)
def _rename_between_filesystems(self, old, new):
raise NotImplementedError("Will be addressed in HUE-2934")
def rename_star(self, old_dir, new_dir):
old_fs, new_fs = self._get_fs_pair(old_dir, new_dir)
op = old_fs.rename_star if old_fs is new_fs else self._rename_star_between_filesystems
return op(old_dir, new_dir)
def _rename_star_between_filesystems(self, old, new):
raise NotImplementedError("Will be addressed in HUE-2934")
| {
"content_hash": "a9017fbea3a1f76bdf2e3ea11a40e94c",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 114,
"avg_line_length": 33.943005181347154,
"alnum_prop": 0.648450618226225,
"repo_name": "MobinRanjbar/hue",
"id": "e6cd18a9f8e5bdb6e05719256d7371a1c65f721a",
"size": "7321",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/lib/fs/proxyfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2397157"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "453436"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "24042046"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "3220761"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Makefile",
"bytes": "114862"
},
{
"name": "Mako",
"bytes": "2450286"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "38423121"
},
{
"name": "Scala",
"bytes": "215057"
},
{
"name": "Shell",
"bytes": "54810"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "259222"
},
{
"name": "XSLT",
"bytes": "516845"
}
],
"symlink_target": ""
} |
"""
Created on Fri Oct 9 14:59:18 2015
@author: radar
"""
import sys
from distutils.core import setup, Extension
compile_args = [-03]
setup(name='APE',
version = '0.1.0',
description = 'Automated PEST Enviornment',
author = 'Nick Voss',
author_email = '[email protected]',
licsence = 'MIT',
py_modules = ['pcf'],
zip_safe = False,
) | {
"content_hash": "0152e64838c2a8d7f8887778a47afae3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 20.263157894736842,
"alnum_prop": 0.5922077922077922,
"repo_name": "nvoss12838/APE",
"id": "635b860df659079394a7c36649f652cd1c29856c",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35959"
}
],
"symlink_target": ""
} |
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
import scipy.io
import numpy as np
from ..features import build_features
firebrick=mcolors.CSS4_COLORS['firebrick']
red=mcolors.CSS4_COLORS['red']
coral=mcolors.CSS4_COLORS['coral']
seagreen=mcolors.CSS4_COLORS['seagreen']
grey=mcolors.CSS4_COLORS['grey']
royalblue=mcolors.CSS4_COLORS['royalblue']
color_ctgry=(grey, royalblue, red, coral, seagreen)
spat_corr={'grey': 'None', 'royalblue': 'Delta---', 'red':'Delta--', 'coral':'Delta-', 'seagreen':'None+' }
def plot_stim(stim, data_categories, file):
""" Plot retinal stimulus with color encodings for correlation categories """
NUM_COLORS = len(data_categories)
#cm = plt.get_cmap('gist_rainbow')
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in data_categories])
for i in range(NUM_COLORS):
ax.plot(stim[i][0,:], 150*np.ones(stim[i].shape[1]), '.', color=color_ctgry[data_categories[i]])#(np.arange(10)*(i+1))
plt.xticks(rotation='vertical')
plt.title(file)
return None
def plot_file_stim(file):
""" Plot retinal stimulus data and assign encodings to correlation categories"""
data_retina=scipy.io.loadmat(dir+file)
stim=data_retina['stimTimes'][0,0]
data_categories=build_features.correlation_categories(data_retina)
print(file, data_categories)
try:
plot_stim(stim, data_categories, file)
except:
pass
def plot_cp_results(sum_diff_corr,stim, data_retina, params):
""" Plot dynamics of summary statistic"""
stim=data_retina['stimTimes'][0,0]
data_categories=build_features.correlation_categories(data_retina)
res=params['res']
block_width=params['block_width']
gauss_width=params['gauss_width']
file=data_retina['file']
method_corr=params['methods']
for method in method_corr:
#color=iter(cm.Dark2(np.linspace(0,1,len(stim.dtype.names))))
NUM_COLORS=len(stim.dtype.names)
cm = plt.get_cmap('Dark2')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
if method=='pop_sum':
for count,i in enumerate(stim.dtype.names):
#subc=next(color)
plt.plot(stim[i][0,:], 150*np.ones(stim[i].shape[1]), '.', color=color_ctgry[data_categories[count]])
for i in sum_diff_corr[method]:
time_pt=np.arange(0,res*i.size,res)
plt.plot(time_pt,i)
plt.xticks(rotation='vertical')
else:
time_pt=np.arange(0,res*block_width*sum_diff_corr[method].size,res*block_width)
plt.plot(time_pt, sum_diff_corr[method])
for count,i in enumerate(stim.dtype.names):
#subc=next(color)
#print(i) printing stim names
plt.plot(stim[i][0,:], np.nanmax(sum_diff_corr[method][np.isfinite(sum_diff_corr[method])])*np.ones(stim[i].shape[1]), '.', color=color_ctgry[data_categories[count]])
plt.xticks(rotation='vertical')
plt.title(method + ", " + file) | {
"content_hash": "f5972198e0424ba073d9fb5037992503",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 182,
"avg_line_length": 38.01190476190476,
"alnum_prop": 0.6313811462574381,
"repo_name": "curious-abhinav/change-point",
"id": "a94c0c0a1df82c04a65d30bfce258e1b1fd1c718",
"size": "3193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/visualization/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "983807"
},
{
"name": "Makefile",
"bytes": "4330"
},
{
"name": "Python",
"bytes": "19794"
}
],
"symlink_target": ""
} |
import os
from oslo_log import log as logging
from oslo_utils import netutils
from trove.common import cfg
from trove.common.db.postgresql import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import stream_codecs
from trove.common import utils
from trove.guestagent.backup.backupagent import BackupAgent
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.strategies import backup
from trove.guestagent.strategies.replication import base
AGENT = BackupAgent()
CONF = cfg.CONF
REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.experimental' \
'.postgresql_impl'
LOG = logging.getLogger(__name__)
TRIGGER_FILE = '/tmp/postgresql.trigger'
REPL_USER = 'replicator'
SLAVE_STANDBY_OVERRIDE = 'SlaveStandbyOverride'
class PostgresqlReplicationStreaming(base.Replication):
def __init__(self, *args, **kwargs):
super(PostgresqlReplicationStreaming, self).__init__(*args, **kwargs)
@property
def repl_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackup',
REPL_BACKUP_NAMESPACE)
@property
def repl_incr_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackupIncremental',
REPL_BACKUP_NAMESPACE)
@property
def repl_backup_extra_opts(self):
return CONF.backup_runner_options.get('PgBaseBackup', '')
def get_master_ref(self, service, snapshot_info):
master_ref = {
'host': netutils.get_my_ipv4(),
'port': cfg.get_configuration_property('postgresql_port')
}
return master_ref
def backup_required_for_replication(self):
return True
def snapshot_for_replication(self, context, service,
location, snapshot_info):
snapshot_id = snapshot_info['id']
replica_number = snapshot_info.get('replica_number', 1)
LOG.debug("Acquiring backup for replica number %d.", replica_number)
# Only create a backup if it's the first replica
if replica_number == 1:
AGENT.execute_backup(
context, snapshot_info, runner=self.repl_backup_runner,
extra_opts=self.repl_backup_extra_opts,
incremental_runner=self.repl_incr_backup_runner)
else:
LOG.info(_("Using existing backup created for previous replica."))
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info
}
return snapshot_id, log_position
def _get_or_create_replication_user(self, service):
"""There are three scenarios we need to deal with here:
- This is a fresh master, with no replicator user created.
Generate a new u/p
- We are attaching a new slave and need to give it the login creds
Send the creds we have stored in PGDATA/.replpass
- This is a failed-over-to slave, who will have the replicator user
but not the credentials file. Recreate the repl user in this case
"""
LOG.debug("Checking for replicator user")
pwfile = os.path.join(service.pgsql_data_dir, ".replpass")
admin = service.build_admin()
if admin.user_exists(REPL_USER):
if operating_system.exists(pwfile, as_root=True):
LOG.debug("Found existing .replpass, returning pw")
pw = operating_system.read_file(pwfile, as_root=True)
else:
LOG.debug("Found user but not .replpass, recreate")
u = models.PostgreSQLUser(REPL_USER)
admin._drop_user(context=None, user=u)
pw = self._create_replication_user(service, admin, pwfile)
else:
LOG.debug("Found no replicator user, create one")
pw = self._create_replication_user(service, admin, pwfile)
repl_user_info = {
'name': REPL_USER,
'password': pw
}
return repl_user_info
def _create_replication_user(self, service, admin, pwfile):
"""Create the replication user. Unfortunately, to be able to
run pg_rewind, we need SUPERUSER, not just REPLICATION privilege
"""
pw = utils.generate_random_password()
operating_system.write_file(pwfile, pw, as_root=True)
operating_system.chown(pwfile, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
operating_system.chmod(pwfile, FileMode.SET_USR_RWX(),
as_root=True)
repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw)
admin._create_user(context=None, user=repl_user)
admin.alter_user(None, repl_user, True,
'REPLICATION', 'SUPERUSER', 'LOGIN')
return pw
def enable_as_master(self, service, master_config, for_failover=False):
"""For a server to be a master in postgres, we need to enable
the replication user in pg_hba and ensure that WAL logging is
at the appropriate level (use the same settings as backups)
"""
LOG.debug("Enabling as master, with cfg: %s ", master_config)
self._get_or_create_replication_user(service)
hba_entry = "host replication replicator 0.0.0.0/0 md5 \n"
tmp_hba = '/tmp/pg_hba'
operating_system.copy(service.pgsql_hba_config, tmp_hba,
force=True, as_root=True)
operating_system.chmod(tmp_hba, FileMode.SET_ALL_RWX(),
as_root=True)
with open(tmp_hba, 'a+') as hba_file:
hba_file.write(hba_entry)
operating_system.copy(tmp_hba, service.pgsql_hba_config,
force=True, as_root=True)
operating_system.chmod(service.pgsql_hba_config,
FileMode.SET_USR_RWX(),
as_root=True)
operating_system.remove(tmp_hba, as_root=True)
service.reload_configuration()
def enable_as_slave(self, service, snapshot, slave_config):
"""Adds appropriate config options to postgresql.conf, and writes out
the recovery.conf file used to set up replication
"""
LOG.debug("Got slave_config: %s", str(slave_config))
self._write_standby_recovery_file(service, snapshot, sslmode='prefer')
self.enable_hot_standby(service)
# Ensure the WAL arch is empty before restoring
service.recreate_wal_archive_dir()
def detach_slave(self, service, for_failover):
"""Touch trigger file in to disable recovery mode"""
LOG.info(_("Detaching slave, use trigger to disable recovery mode"))
operating_system.write_file(TRIGGER_FILE, '')
operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def _wait_for_failover():
"""Wait until slave has switched out of recovery mode"""
return not service.pg_is_in_recovery()
try:
utils.poll_until(_wait_for_failover, time_out=120)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for slave to exit"
"recovery mode"))
def cleanup_source_on_replica_detach(self, admin_service, replica_info):
pass
def _rewind_against_master(self, service):
"""Call pg_rewind to resync datadir against state of new master
We should already have a recovery.conf file in PGDATA
"""
rconf = operating_system.read_file(
service.pgsql_recovery_config,
codec=stream_codecs.KeyValueCodec(line_terminator='\n'),
as_root=True)
conninfo = rconf['primary_conninfo'].strip()
# The recovery.conf file we want should already be there, but pg_rewind
# will delete it, so copy it out first
rec = service.pgsql_recovery_config
tmprec = "/tmp/recovery.conf.bak"
operating_system.move(rec, tmprec, as_root=True)
cmd_full = " ".join(["pg_rewind",
'--target-pgdata=' + service.pgsql_data_dir,
'--source-server=' + conninfo])
out, err = utils.execute("sudo", "su", "-", service.pgsql_owner,
"-c", "%s" % cmd_full, check_exit_code=0)
LOG.debug("Got stdout %(out)s and stderr %(err)s from pg_rewind",
{'out': str(out), 'err': str(err)})
operating_system.move(tmprec, rec, as_root=True)
def demote_master(self, service):
"""In order to demote a master we need to shutdown the server and call
pg_rewind against the new master to enable a proper timeline
switch.
"""
service.stop_db()
self._rewind_against_master(service)
service.start_db()
def connect_to_master(self, service, snapshot):
"""All that is required in postgresql to connect to a slave is to
restart with a recovery.conf file in the data dir, which contains
the connection information for the master.
"""
assert operating_system.exists(service.pgsql_recovery_config,
as_root=True)
service.restart()
def _remove_recovery_file(self, service):
operating_system.remove(service.pgsql_recovery_config, as_root=True)
def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'):
LOG.info(_("Snapshot data received: %s"), str(snapshot))
logging_config = snapshot['log_position']
conninfo_params = \
{'host': snapshot['master']['host'],
'port': snapshot['master']['port'],
'repl_user': logging_config['replication_user']['name'],
'password': logging_config['replication_user']['password'],
'sslmode': sslmode}
conninfo = 'host=%(host)s ' \
'port=%(port)s ' \
'dbname=os_admin ' \
'user=%(repl_user)s ' \
'password=%(password)s ' \
'sslmode=%(sslmode)s ' % conninfo_params
recovery_conf = "standby_mode = 'on'\n"
recovery_conf += "primary_conninfo = '" + conninfo + "'\n"
recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n"
recovery_conf += "recovery_target_timeline='latest'\n"
operating_system.write_file(service.pgsql_recovery_config,
recovery_conf,
codec=stream_codecs.IdentityCodec(),
as_root=True)
operating_system.chown(service.pgsql_recovery_config,
user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def enable_hot_standby(self, service):
opts = {'hot_standby': 'on',
'wal_level': 'hot_standby'}
# wal_log_hints for pg_rewind is only supported in 9.4+
if service.pg_version[1] in ('9.4', '9.5'):
opts['wal_log_hints'] = 'on'
service.configuration_manager.\
apply_system_override(opts, SLAVE_STANDBY_OVERRIDE)
def get_replica_context(self, service):
LOG.debug("Calling get_replica_context")
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info
}
return {
'master': self.get_master_ref(None, None),
'log_position': log_position
}
| {
"content_hash": "9f3a7c9b6931dffdee7c1c151bdbde62",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 79,
"avg_line_length": 41.144827586206894,
"alnum_prop": 0.5952899765336909,
"repo_name": "zhangg/trove",
"id": "cdfd4649fb66459f0a564e72e54248375a291e1a",
"size": "12559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/guestagent/strategies/replication/experimental/postgresql_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4546016"
},
{
"name": "Shell",
"bytes": "145524"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup
import string
import sys
from setting import proxy_enable,proxy_url
import urllib2
import mechanize
def get_lyricsmint(url):
""" get_lyrics received a lyricspagepage that fetch fro url provide by
get_link ,and give user a lyrics user wish"""
lyric=""
if(proxy_enable):
proxy = urllib2.ProxyHandler({'http':proxy_url})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
req = urllib2.urlopen(req)
html_lyrics = req.read()
soup = BeautifulSoup(html_lyrics, 'html.parser')
songlyric=soup.find_all("div", id="lyric")
# lyrics= str(songlyric[0].find_all("p"))
lyrics=""
for phase in songlyric[0].find_all("p"):
lyrics=lyrics+str(phase)+"\n"
lyrics=lyrics.replace("</p>","\n")
lyrics=lyrics.replace("<br/>","\n")
return BeautifulSoup(lyrics).text
def get_azlyrics(url):
""" get_lyrics received a lyricspagepage that fetch fro url provide by
get_link ,and give user a lyrics user wish"""
lyric=""
if(proxy_enable):
proxy = urllib2.ProxyHandler({'http':proxy_url})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
req = urllib2.urlopen(url)
html_lyrics = req.read()
soup = BeautifulSoup(html_lyrics, 'html.parser')
songlyric=soup.find_all("div" ,class_=None)
soup = BeautifulSoup(str(songlyric[1]), 'html.parser')
return soup.get_text()
| {
"content_hash": "5817f00d5cd2f29c8e0b8de44c2a2bdf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 37.52173913043478,
"alnum_prop": 0.6477404403244496,
"repo_name": "ceaniket/lyricsar",
"id": "fab34a4df4bf36ac5443c53f36eb20230ecd4f83",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lyricsar/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10974"
}
],
"symlink_target": ""
} |
import logging
from jcli import exception
from server import Server
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
class Plugin(Server):
"""Manages plugin command execution"""
def __init__(self, action, url, user, password, plugin_args=None):
super(Plugin, self).__init__(url, user, password)
self.action = action
self.plugin_args = plugin_args
def list_plugins(self):
"""Print list of all the plugins"""
try:
plugins = self.server.get_plugins()
for name, info in plugins.items():
logger.info(name[0])
except Exception as e:
raise exception.JcliException(e)
def plugin_info(self):
"""Print information on a specific plugin."""
plugin_name = self.plugin_args.name[0]
try:
plugin_json = self.server.get_plugin_info(plugin_name)
if plugin_json:
logger.info("Name: %s", plugin_name)
logger.info("Version: %s", plugin_json['version'])
logger.info("Enabled?: %s", plugin_json['enabled'])
logger.info("Has update?: %s", plugin_json['hasUpdate'])
logger.info("Official page: %s", plugin_json['url'])
logger.info("Dependencies:")
for dep in plugin_json['dependencies']:
logger.info("\tName: %s", dep['shortName'])
logger.info("\tVersion: %s\n", dep['version'])
else:
logger.info("No such plugin: %s", plugin_name)
except Exception as e:
raise exception.JcliException(e)
def run(self):
"""Executes chosen action."""
if self.action == 'list':
self.list_plugins()
if self.action == 'info':
self.plugin_info()
| {
"content_hash": "df87bbbbcfe26e62dd55cf21a9a60060",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 31.216666666666665,
"alnum_prop": 0.5568606513614522,
"repo_name": "bregman-arie/jcli",
"id": "832566ce1dc8e0d50073cbec2e33d958c3ae256c",
"size": "2499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jcli/executor/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44362"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from JumpScale import j
class ActorsInfo():
def getActorMethodCall(self, appname, actor, method):
"""
used for during error show links to methods in browser
"""
url = "/rest/%s/%s/%s?" % (appname, actor, method)
auth = j.portal.server.active.ws.routes["%s_%s_%s" % (appname, actor, method)]['auth']
if auth:
params = ["authkey"]
else:
params = []
params.extend(list(j.portal.server.active.ws.routes["%s_%s_%s" % (appname, actor, method)]['params'].keys()))
for param in params:
url += "%s=&" % param
url += "format=text"
if url[-1] == "&":
url = url[:-1]
if url[-1] == "?":
url = url[:-1]
# url="<a href=\"%s\">%s</a> " % (url,url)
return url
def getActorInfoPage(self, appname, actorname, methodname, page=None):
"""
used for during error show info about 1 actor
"""
if appname == "" or actorname == "" or methodname == "":
txt = "getActorInfo need 3 params: appname, actorname, methoname, got: %s, %s,%s" % (
appname, actorname, methodname)
return txt
if page is None:
page = j.portal.server.active.pageprocessor.getpage()
page.addHeading("%s.%s.%s" % (appname, actorname, methodname), 5)
url = getActorMethodCall(appname, actorname, methodname)
routekey = "%s_%s_%s" % (appname, actorname, methodname)
if routekey not in j.portal.server.active.routes:
j.portal.server.active.activateActor(appname, actorname)
routeData = j.portal.server.active.routes[routekey]
# routedata: function,paramvalidation,paramdescription,paramoptional,description
description = routeData[4]
if description.strip() != "":
page.addMessage(description)
# param info
params = routeData[1]
descriptions = routeData[2]
# optional = routeData[3]
page.addLink("%s" % (methodname), url)
if len(list(params.keys())) > 0:
page.addBullet("Params:\n", 1)
for key in list(params.keys()):
if key in descriptions:
descr = descriptions[key].strip()
else:
descr = ""
page.addBullet("- %s : %s \n" % (key, descr), 2)
return page
def getActorsInfoPage(appname="", actor="", page=None, extraParams={}):
actorsloader = j.portal.server.active.actorsloader
if appname != "" and actor != "":
result = j.portal.server.active.activateActor(appname, actor)
if result is False:
# actor was not there
page = j.portal.server.active.pageprocessor.getpage()
page.addHeading("Could not find actor %s %s." % (appname, actor), 4)
return page
if page is None:
page = j.portal.server.active.pageprocessor.getpage()
if appname == "":
page.addHeading("Applications in appserver.", 4)
appnames = {}
# [item.split("_", 1) for item in self.app_actor_dict.keys()]:
for appname, actorname in actorsloader.getAppActors():
appnames[appname] = 1
appnames = sorted(appnames.keys())
for appname in appnames:
link = page.getLink("%s" % (appname), getActorInfoUrl(appname, ""))
page.addBullet(link)
return page
if actor == "":
page.addHeading("Actors for application %s" % (appname), 4)
actornames = []
# [item.split("_", 1) for item in self.app_actor_dict.keys()]:
for appname2, actorname2 in actorsloader.getAppActors():
if appname2 == appname:
actornames.append(actorname2)
actornames.sort()
for actorname in actornames:
link = page.getLink("%s" % (actorname), getActorInfoUrl(appname, actorname))
page.addBullet(link)
return page
keys = sorted(j.portal.server.active.routes.keys())
page.addHeading("list", 2)
for item in keys:
app2, actor2, method = item.split("_")
if app2 == appname and actor2 == actor:
url = getActorMethodCall(appname, actor, method)
link = page.getLink(item, url)
page.addBullet(link)
page.addHeading("details", 2)
for item in keys:
app2, actor2, method = item.split("_")
if app2 == appname and actor2 == actor:
page = getActorInfoPage(appname, actor, method, page=page)
| {
"content_hash": "eebc2642fe70d56f4ed595506cf6708a",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 117,
"avg_line_length": 39.608333333333334,
"alnum_prop": 0.5409215232484746,
"repo_name": "Jumpscale/jumpscale_portal8",
"id": "3dafbfce7fc2532e796f6b119b69d6b4ac0a2394",
"size": "4753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/portal/portalloaders/ActorsInfo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "482591"
},
{
"name": "HTML",
"bytes": "313255"
},
{
"name": "JavaScript",
"bytes": "8815099"
},
{
"name": "PHP",
"bytes": "205758"
},
{
"name": "Python",
"bytes": "974012"
},
{
"name": "Ruby",
"bytes": "28925"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView
from opendata.catalog.models import UrlImage
class Home(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
random_feature = UrlImage.objects.select_random()
context["feature_images"] = random_feature
return context
| {
"content_hash": "50d00279ed26a2d3445f01f0261adf0f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 30,
"alnum_prop": 0.7,
"repo_name": "openrural/open-data-nc",
"id": "f2824aea9e3483d3bec704648156316c844439d5",
"size": "390",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opendata/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "508367"
},
{
"name": "Elixir",
"bytes": "536"
},
{
"name": "JavaScript",
"bytes": "333168"
},
{
"name": "Python",
"bytes": "346034"
},
{
"name": "Scheme",
"bytes": "12750"
},
{
"name": "Shell",
"bytes": "96367"
}
],
"symlink_target": ""
} |
import unittest
from .fake_api_client import make_fake_client
from .fake_api import FAKE_SECRET_NAME
class CreateServiceTest(unittest.TestCase):
def test_secrets_repr(self):
client = make_fake_client()
secret = client.secrets.create(name="super_secret", data="secret")
assert secret.__repr__() == f"<Secret: '{FAKE_SECRET_NAME}'>"
| {
"content_hash": "d9bd42a434dfe2e1b1a48fdabbb2245f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 32.90909090909091,
"alnum_prop": 0.6878453038674033,
"repo_name": "docker/docker-py",
"id": "1c261a871ff20e6af03aa235aa99df2213287eb7",
"size": "362",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/unit/models_secrets_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2114"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "1073920"
},
{
"name": "Shell",
"bytes": "1165"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0078_auto_20180528_1414'),
]
operations = [
migrations.AlterField(
model_name='projectlocation',
name='latitude',
field=models.DecimalField(blank=True, decimal_places=18, max_digits=21, null=True, verbose_name='latitude'),
),
migrations.AlterField(
model_name='projectlocation',
name='longitude',
field=models.DecimalField(blank=True, decimal_places=18, max_digits=21, null=True, verbose_name='longitude'),
)
]
| {
"content_hash": "6fd3f402dadec85e72a4e30f71e6f855",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 121,
"avg_line_length": 30.25,
"alnum_prop": 0.6322314049586777,
"repo_name": "onepercentclub/bluebottle",
"id": "9980988b6ab8afc0decde3a19f66f2f9a2501ded",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/projects/migrations/0079_auto_20180626_1225.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.auth import authenticate
class OpenIDProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
identity = models.TextField(unique=True)
def __unicode__(self):
try:
return 'OpenID profile for %s, via provider %s' % (self.user, self.identity)
except User.DoesNotExist:
return 'OpenID profile for None, via provider None'
def authenticate(self):
return authenticate(identity=self.identity)
class OpenIDStore(models.Model):
site = models.ForeignKey(Site, default=Site.objects.get_current)
server_url = models.CharField(max_length=255)
handle = models.CharField(max_length=255)
secret = models.TextField()
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField()
def __unicode__(self):
return u'OpenID Store %s for %s' % (self.server_url, self.site)
class OpenIDNonce(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'OpenID Nonce for %s' % self.server_url
| {
"content_hash": "6bba658988d812fa48ab2ffc0d26170e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 35.175,
"alnum_prop": 0.6986496090973703,
"repo_name": "Soovox/django-socialregistration",
"id": "964ade80793bc31d48e4fba2ba58f12d6a76fcab",
"size": "1407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "socialregistration/contrib/openid/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129905"
}
],
"symlink_target": ""
} |
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients_impl as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested structure of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
sparse (IndexedSlices), jacobian function currently makes it dense and
returns a Tensor instead. This may change in the future.
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None, when use_pfor is
true, corresponds to vectorizing all the iterations. When use_pfor is
false, the default value of None corresponds to parallel_iterations=10.
This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError("Need first dimension of output shape (%s) and inp shape "
"(%s) to match." % (output.shape, inp.shape))
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
| {
"content_hash": "7dd1e38cf9ee8a29d919106fb088110d",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 80,
"avg_line_length": 39.1578947368421,
"alnum_prop": 0.6610983102918587,
"repo_name": "freedomtan/tensorflow",
"id": "81a00d4ab692e2a96a9acdf7d8a7f81b3afde3c9",
"size": "5897",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/parallel_for/gradients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from ghhu.telefone import Telefone
class Telefonista:
def __init__(self):
self._telefone = Telefone()
self._contatos = []
def adicionar_contato(self, nome, numero):
self._contatos.append((nome, numero))
def ligar_para_todos_contatos(self, telefone=None):
tel = self._telefone if telefone is None else telefone
return ['{msg_telefone} - {contato}'.format(msg_telefone=tel.telefonar(numero), contato=nome)
for nome, numero in self._contatos]
| {
"content_hash": "81a8aa3246fbf5743b8fb619bfa10eb7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 101,
"avg_line_length": 34.13333333333333,
"alnum_prop": 0.646484375,
"repo_name": "renzon/hotel-urbano",
"id": "8a5a790502d5bac022acb8dd6265682a9ceb059a",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghhu/telefonista.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22184"
}
],
"symlink_target": ""
} |
from designate.objects.blacklist import Blacklist # noqa
from designate.objects.domain import Domain # noqa
from designate.objects.quota import Quota # noqa
from designate.objects.rrdata_a import RRData_A # noqa
from designate.objects.rrdata_aaaa import RRData_AAAA # noqa
from designate.objects.rrdata_cname import RRData_CNAME # noqa
from designate.objects.rrdata_mx import RRData_MX # noqa
from designate.objects.rrdata_ns import RRData_NS # noqa
from designate.objects.rrdata_ptr import RRData_PTR # noqa
from designate.objects.rrdata_soa import RRData_SOA # noqa
from designate.objects.rrdata_spf import RRData_SPF # noqa
from designate.objects.rrdata_srv import RRData_SRV # noqa
from designate.objects.rrdata_sshfp import RRData_SSHFP # noqa
from designate.objects.rrdata_txt import RRData_TXT # noqa
from designate.objects.record import Record # noqa
from designate.objects.recordset import RecordSet # noqa
from designate.objects.server import Server # noqa
from designate.objects.tenant import Tenant # noqa
from designate.objects.tld import Tld # noqa
from designate.objects.tsigkey import TsigKey # noqa
| {
"content_hash": "664e2bbf4e28c8c397442ca8ef1e8e48",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 56.8,
"alnum_prop": 0.8045774647887324,
"repo_name": "richm/designate",
"id": "a49d2befaa7ac0a4c77154633b2f3a08081a3fc3",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/objects/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1272656"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
} |
"""
Goal: simplify the code when interacting with entities
Usage when declaring a model:
import db
class MyEntity(db.Model, CRUDMixin):
id = db.Column('myID', db.Integer, primary_key=True)
data = db.Column('myData', db.String(255))
MyTableEntity.create(data="abc")
my = MyTableEntity(data="abc")
db.session.save(my, commit=False)
found = MyTableEntity.get_by_id(1) is not None
"""
from redidropper.main import db
class CRUDMixin(object):
""" Helper class flask-sqlalchemy entities """
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
@classmethod
def create(cls, **kwargs):
""" Helper for session.add() + session.commit() """
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
| {
"content_hash": "9ffbd555d6a8d003e8d29a4e8b042d29",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 59,
"avg_line_length": 24.423728813559322,
"alnum_prop": 0.6141568355308813,
"repo_name": "indera/redi-dropper-client",
"id": "90b023243400c030e027fc1817ecefd5ee2c6d9f",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/redidropper/database/crud_mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13363"
},
{
"name": "HTML",
"bytes": "61929"
},
{
"name": "JavaScript",
"bytes": "103072"
},
{
"name": "Makefile",
"bytes": "1028"
},
{
"name": "Python",
"bytes": "146815"
},
{
"name": "Ruby",
"bytes": "934"
},
{
"name": "Shell",
"bytes": "10259"
},
{
"name": "VimL",
"bytes": "1716"
}
],
"symlink_target": ""
} |
import os
import argparse
import ConfigParser
configfile = None
logfile = None
is_daemon = False
be_verbose = False
def parse_args() :
global configfile, logfile, is_daemon, be_verbose
ap = argparse.ArgumentParser(description="Collector and correlator of Netflow v5, v9 and IPFIX flows and Syslog messages")
ap.add_argument('-c', metavar='configfile', default='/usr/local/etc/collectord.conf', help="collectors' config file")
ap.add_argument('-l', metavar='logfile', default='/var/log/collectord.log', help='log file for collector own messages')
ap.add_argument('-d', action='store_true', help='start as daemon')
ap.add_argument('-v', action='store_true', help='verbose debug messages')
args = ap.parse_args()
configfile = args.c
logfile = args.l
is_daemon = args.d
be_verbose = args.v
return args
def parse_config(filename) :
if not os.path.isfile(filename):
print("File {0} not found".format(filename))
quit()
cf = ConfigParser.SafeConfigParser()
cf.read(filename)
res = {}
res['sections'] = cf.sections()
for sect in res['sections'] :
opts = {}
for opt in ['address', 'port', 'type'] :
opts[opt] = cf.get(sect, opt)
res[sect] = opts
return res
def print_args_config(config) :
print("Running the following config:")
print(" logfile name: {0}".format(logfile))
print(" config file name: {0}".format(configfile))
print(" is daemon: {0}".format(is_daemon))
print(" be verbose: {0}".format(be_verbose))
print('Config file is:')
for s in config['sections']:
print("Section {0}:".format(s))
print(" Collector type: {0}".format(config[s]['type']))
print(" Listening on : {0}:{1}".format(config[s]['address'], config[s]['port']))
if __name__ == "__main__":
parse_args()
c = parse_config(configfile)
if c == None :
print('Error parsing config file')
else :
print_args_config(c)
| {
"content_hash": "4b199ff8516e56aa8b181cc3e5ab868a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 126,
"avg_line_length": 32.75806451612903,
"alnum_prop": 0.6149679960610537,
"repo_name": "ilmarh/collectors",
"id": "9d00fe9972a72be12fe081297f46eadc28e75def",
"size": "2054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collector_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29967"
}
],
"symlink_target": ""
} |
from .rowobjects import Row, BaseRow, cleanup_name, banned_columns
from .selections import Selection
| {
"content_hash": "dc21981cf1ab8a1be240c1089f745ef7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 66,
"avg_line_length": 50.5,
"alnum_prop": 0.8217821782178217,
"repo_name": "DolphDev/PSV",
"id": "308c45d5169ce0d1fbcb44b7ed3fc43bf41108d8",
"size": "101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "psv/core/objects/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124853"
}
],
"symlink_target": ""
} |
"""Provide some handy classes for user to implement a simple computation module
in Python easily.
"""
import logging
from .base_module import BaseModule
from ..initializer import Uniform
from .. import ndarray as nd
class PythonModule(BaseModule):
"""A convenient module class that implements many of the module APIs as
empty functions.
Parameters
----------
data_names : list of str
Names of the data expected by the module.
label_names : list of str
Names of the labels expected by the module. Could be ``None`` if the
module does not need labels.
output_names : list of str
Names of the outputs.
"""
def __init__(self, data_names, label_names, output_names, logger=logging):
super(PythonModule, self).__init__(logger=logger)
if isinstance(data_names, tuple):
data_names = list(data_names)
if isinstance(label_names, tuple):
label_names = list(label_names)
self._data_names = data_names
self._label_names = label_names
self._output_names = output_names
self._data_shapes = None
self._label_shapes = None
self._output_shapes = None
################################################################################
# Symbol information
################################################################################
@property
def data_names(self):
"""A list of names for data required by this module."""
return self._data_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
return self._output_names
################################################################################
# Input/Output information
################################################################################
@property
def data_shapes(self):
"""A list of (name, shape) pairs specifying the data inputs to this module."""
return self._data_shapes
@property
def label_shapes(self):
"""A list of (name, shape) pairs specifying the label inputs to this module.
If this module does not accept labels -- either it is a module without loss
function, or it is not bound for training, then this should return an empty
list ``[]```.
"""
return self._label_shapes
@property
def output_shapes(self):
"""A list of (name, shape) pairs specifying the outputs of this module."""
return self._output_shapes
################################################################################
# Parameters of a module
################################################################################
def get_params(self):
"""Gets parameters, those are potentially copies of the the actual parameters used
to do computation on the device. Subclass should override this method if contains
parameters.
Returns
-------
``({}, {})``, a pair of empty dict.
"""
return (dict(), dict())
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False):
"""Initializes the parameters and auxiliary states. By default this function
does nothing. Subclass should override this method if contains parameters.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
"""
pass
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Currently we do nothing here. Subclass should
override this method if contains parameters.
"""
pass
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
if self._label_shapes is None:
# since we do not need labels, we are probably not a module with a loss
# function or predictions, so just ignore this call
return
# by default we expect our outputs are some scores that could be evaluated
eval_metric.update(labels, self.get_outputs())
################################################################################
# module setup
################################################################################
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
assert grad_req == 'write', "Python module only support write gradient"
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
assert len(data_shapes) == len(self._data_names)
assert [x[0] for x in data_shapes] == self._data_names
self._data_shapes = data_shapes
self._label_shapes = label_shapes
if label_shapes is not None:
assert self._label_names is not None
assert len(self._label_names) == len(label_shapes)
assert [x[0] for x in label_shapes] == self._label_names
self._output_shapes = self._compute_output_shapes()
def _compute_output_shapes(self):
"""The subclass should implement this method to compute the shape of
outputs. This method can assume that the ``data_shapes`` and ``label_shapes``
are already initialized.
"""
raise NotImplementedError()
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers. By default we do nothing. Subclass should
override this method if needed.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default `False`, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
pass
class PythonLossModule(PythonModule):
"""A convenient module class that implements many of the module APIs as
empty functions.
Parameters
----------
name : str
Names of the module. The outputs will be named `[name + '_output']`.
data_names : list of str
Defaults to ``['data']``. Names of the data expected by this module.
Should be a list of only one name.
label_names : list of str
Default ``['softmax_label']``. Names of the labels expected by the module.
Should be a list of only one name.
grad_func : function
Optional. If not ``None``, should be a function that takes `scores`
and `labels`, both of type `NDArray`, and return the gradients with
respect to the scores according to this loss function. The return
value could be a numpy array or an `NDArray`.
"""
def __init__(self, name='pyloss', data_names=('data',), label_names=('softmax_label',),
logger=logging, grad_func=None):
super(PythonLossModule, self).__init__(data_names, label_names,
[name + '_output'], logger=logger)
self._name = name
assert len(data_names) == 1
assert len(label_names) == 1
self._scores = None
self._labels = None
self._scores_grad = None
if grad_func is not None:
assert callable(grad_func)
self._grad_func = grad_func
def _compute_output_shapes(self):
"""Computes the shapes of outputs. As a loss module with outputs, we simply
output whatever we receive as inputs (i.e. the scores).
"""
return [(self._name + '_output', self._data_shapes[0][1])]
def forward(self, data_batch, is_train=None):
"""Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
"""
self._scores = data_batch.data[0]
if is_train is None:
is_train = self.for_training
if is_train:
self._labels = data_batch.label[0]
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation. As a output loss module,
we treat the inputs to this module as scores, and simply return them.
Parameters
----------
merge_multi_context : bool
Should always be ``True``, because we do not use multiple contexts for computing.
"""
assert merge_multi_context is True
return [self._scores]
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert out_grads is None, 'For a loss module, out_grads should be None'
assert self.for_training
self._backward_impl()
def _backward_impl(self):
"""Actual implementation of the backward computation. The computation
should take ``self._scores`` and ``self._labels`` and then compute the
gradients with respect to the scores, store it as an `NDArray` in
``self._scores_grad``.
Instead of defining a subclass and overriding this function,
a more convenient way is to pass in a `grad_func` when constructing
the module object. Then it will be called to compute the gradients.
"""
if self._grad_func is not None:
grad = self._grad_func(self._scores, self._labels)
if not isinstance(grad, nd.NDArray):
grad = nd.array(grad)
self._scores_grad = grad
else:
raise NotImplementedError()
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients to the inputs, computed in the previous backward computation.
Parameters
----------
merge_multi_context : bool
Should always be ``True`` because we do not use multiple context for computation.
"""
assert merge_multi_context is True
return [self._scores_grad]
def install_monitor(self, mon):
"""Installs monitor on all executors."""
raise NotImplementedError()
| {
"content_hash": "a950fc0e82597f405cd1b0cbe6c5e410",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 98,
"avg_line_length": 40.50148367952522,
"alnum_prop": 0.5830463770239578,
"repo_name": "arikpoz/mxnet",
"id": "f46ea280aaff7bf939029e5e1597363255a63ca0",
"size": "13716",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/mxnet/module/python_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "84504"
},
{
"name": "C++",
"bytes": "3026637"
},
{
"name": "CMake",
"bytes": "46661"
},
{
"name": "Cuda",
"bytes": "492497"
},
{
"name": "Java",
"bytes": "2868"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "38000"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "575202"
},
{
"name": "Perl6",
"bytes": "21768"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "2618370"
},
{
"name": "R",
"bytes": "255240"
},
{
"name": "Scala",
"bytes": "852052"
},
{
"name": "Shell",
"bytes": "106373"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django_lean.experiments.models import GoalRecord
from django_lean.experiments.signals import goal_recorded, user_enrolled
from django_lean.lean_analytics import get_all_analytics
def analytics_goalrecord(sender, goal_record, experiment_user, *args, **kwargs):
if getattr(settings, 'LEAN_ANALYTICS_FOR_EXPERIMENTS', False):
for analytics in get_all_analytics():
analytics.record(goal_record=goal_record,
experiment_user=experiment_user)
goal_recorded.connect(analytics_goalrecord, sender=GoalRecord)
def analytics_enrolled(sender, experiment, experiment_user, group_id,
*args, **kwargs):
if getattr(settings, 'LEAN_ANALYTICS_FOR_EXPERIMENTS', False):
for analytics in get_all_analytics():
analytics.enroll(experiment=experiment,
experiment_user=experiment_user,
group_id=group_id)
user_enrolled.connect(analytics_enrolled)
| {
"content_hash": "9ef1b7ad6f59c077655f5d04e09e43e8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 80,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.68359375,
"repo_name": "MontmereLimited/django-lean",
"id": "664f83068e6034b50470488fa17ce638acd8e002",
"size": "1024",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django_lean/lean_analytics/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14189"
},
{
"name": "JavaScript",
"bytes": "2774"
},
{
"name": "Python",
"bytes": "301048"
}
],
"symlink_target": ""
} |
from django.db import models
class User(models.Model):
name = models.CharField(max_length=32)
mail = models.EmailField()
def __repr__(self):
return "{}: {}".format(self.pk, self.name)
__str__ = __repr__
class Entry(models.Model):
STATUS_DRAFT = "draft"
STATUS_PUBLIC = "public"
STATUS_SET = (
(STATUS_DRAFT, "下書き"),
(STATUS_PUBLIC, "公開中"),
)
title = models.CharField(max_length=128)
body = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status = models.CharField(choices=STATUS_SET, default=STATUS_DRAFT, max_length=8)
author = models.ForeignKey(User, related_name='entries')
| {
"content_hash": "f98f6634f5196b127a8340353bb058d1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6361185983827493,
"repo_name": "t-yanaka/zabbix-report",
"id": "288a240c9707a59e79c4ea2a3063d4cc0e096b74",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph/api/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "325516"
},
{
"name": "HTML",
"bytes": "151753"
},
{
"name": "JavaScript",
"bytes": "697732"
},
{
"name": "Python",
"bytes": "2557181"
},
{
"name": "Shell",
"bytes": "4285"
}
],
"symlink_target": ""
} |
"""
py_vollib.ref_python.black_scholes_merton.greeks.numerical
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
py_vollib.ref_python is a pure python version of py_vollib without any dependence on LetsBeRational. It is provided purely as a reference implementation for sanity checking. It is not recommended for industrial use.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
# Related third party imports
# Local application/library specific imports
from py_vollib.ref_python.black_scholes_merton import black_scholes_merton
from py_vollib.helpers.numerical_greeks import delta as numerical_delta
from py_vollib.helpers.numerical_greeks import vega as numerical_vega
from py_vollib.helpers.numerical_greeks import theta as numerical_theta
from py_vollib.helpers.numerical_greeks import rho as numerical_rho
from py_vollib.helpers.numerical_greeks import gamma as numerical_gamma
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import gamma as agamma
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import delta as adelta
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import vega as avega
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import rho as arho
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import theta as atheta
# -----------------------------------------------------------------------------
# FUNCTIONS - NUMERICAL GREEK CALCULATION
f = lambda flag, S, K, t, r, sigma, b: black_scholes_merton(flag, S, K, t, r, sigma, r-b)
def delta(flag, S, K, t, r, sigma, q):
"""Returns the Black-Scholes-Merton delta of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:param q: annualized continuous dividend yield
:type q: float
:returns: float
"""
return numerical_delta(flag, S, K, t, r, sigma, r-q, f)
def theta(flag, S, K, t, r, sigma, q):
"""Returns the Black-Scholes-Merton theta of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:param q: annualized continuous dividend yield
:type q: float
:returns: float
"""
return numerical_theta(flag, S, K, t, r, sigma, r-q, f)
def vega(flag, S, K, t, r, sigma, q):
"""Returns the Black-Scholes-Merton vega of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:param q: annualized continuous dividend yield
:type q: float
:returns: float
"""
return numerical_vega(flag, S, K, t, r, sigma, r-q, f)
def rho(flag, S, K, t, r, sigma, q):
"""Returns the Black-Scholes-Merton rho of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:param q: annualized continuous dividend yield
:type q: float
:returns: float
"""
return numerical_rho(flag, S, K, t, r, sigma, r-q, f)
def gamma(flag, S, K, t, r, sigma, q):
"""Returns the Black-Scholes-Merton gamma of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:param q: annualized continuous dividend yield
:type q: float
:returns: float
"""
return numerical_gamma(flag, S, K, t, r, sigma, r-q, f)
def test_analytical_vs_numerical():
"""Test by comparing analytical and numerical values.
>>> S = 49
>>> K = 50
>>> r = .05
>>> q = .05
>>> t = 0.3846
>>> sigma = 0.2
>>> flag = 'c'
>>> epsilon = .0001
>>> v1 = delta(flag, S, K, t, r, sigma, q)
>>> v2 = adelta(flag, S, K, t, r, sigma, q)
>>> abs(v1-v2)<epsilon
True
>>> v1 = gamma(flag, S, K, t, r, sigma, q)
>>> v2 = agamma(flag, S, K, t, r, sigma, q)
>>> abs(v1-v2)<epsilon
True
>>> v1 = rho(flag, S, K, t, r, sigma, q)
>>> v2 = arho(flag, S, K, t, r, sigma, q)
>>> abs(v1-v2)<epsilon
True
>>> v1 = vega(flag, S, K, t, r, sigma, q)
>>> v2 = avega(flag, S, K, t, r, sigma, q)
>>> abs(v1-v2)<epsilon
True
>>> v1 = theta(flag, S, K, t, r, sigma, q)
>>> v2 = atheta(flag, S, K, t, r, sigma, q)
>>> abs(v1-v2)<epsilon
True
"""
pass
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
| {
"content_hash": "7a04a4e03da375bdcda44247ef43fc63",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 215,
"avg_line_length": 29.729468599033815,
"alnum_prop": 0.601559961000975,
"repo_name": "vollib/py_vollib",
"id": "2976160e3407c0dc19c086a9d70a2ba297af86d1",
"size": "6180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_vollib/ref_python/black_scholes_merton/greeks/numerical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226337"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from hscom import __common__
(print, print_, print_on, print_off, rrr, profile,
printDBG) = __common__.init(__name__, '[encounter]', DEBUG=False)
# Python
from itertools import izip
# Science
import networkx as netx
import numpy as np
from scipy.cluster.hierarchy import fclusterdata
# HotSpotter
from hotspotter import match_chips3 as mc3
from hscom import fileio as io
from hscom import helpers as util
from hsviz import draw_func2 as df2
def compute_encounters(hs, seconds_thresh=15):
'''
clusters encounters togethers (by time, not space)
An encounter is a meeting, localized in time and space between a camera and
a group of animals. Animals are identified within each encounter.
'''
if not 'seconds_thresh' in vars():
seconds_thresh = 3
# For each image
gx_list = hs.get_valid_gxs()
# TODO: Get image GPS location
#gps_info_list = hs.gx2_exif(gx_list, tag='GPSInfo')
#gps_lat_list = hs.gx2_exif(gx_list, tag='GPSLatitude')
#gps_lon_list = hs.gx2_exif(gx_list, tag='GPSLongitude')
#gps_latref_list = hs.gx2_exif(gx_list, tag='GPSLatitudeRef')
#gps_lonref_list = hs.gx2_exif(gx_list, tag='GPSLongitudeRef')
# Get image timestamps
datetime_list = hs.gx2_exif(gx_list, tag='DateTime')
nImgs = len(datetime_list)
valid_listx = [ix for ix, dt in enumerate(datetime_list) if dt is not None]
nWithExif = len(valid_listx)
nWithoutExif = nImgs - nWithExif
print('[encounter] %d / %d images with exif data' % (nWithExif, nImgs))
print('[encounter] %d / %d images without exif data' % (nWithoutExif, nImgs))
# Convert datetime objects to unixtime scalars
unixtime_list = [io.exiftime_to_unixtime(datetime_str) for datetime_str in datetime_list]
unixtime_list = np.array(unixtime_list)
# Agglomerative clustering of unixtimes
print('[encounter] clustering')
X_data = np.vstack([unixtime_list, np.zeros(len(unixtime_list))]).T
gx2_clusterid = fclusterdata(X_data, seconds_thresh, criterion='distance')
# Reverse the image to cluster index mapping
clusterx2_gxs = [[] for _ in xrange(gx2_clusterid.max())]
for gx, clusterx in enumerate(gx2_clusterid):
clusterx2_gxs[clusterx - 1].append(gx) # IDS are 1 based
# Print images per encouter statistics
clusterx2_nGxs = np.array(map(len, clusterx2_gxs))
print('[encounter] image per encounter stats:\n %s'
% util.pstats(clusterx2_nGxs, True))
# Sort encounters by images per encounter
ex2_clusterx = clusterx2_nGxs.argsort()
gx2_ex = [None] * len(gx2_clusterid)
ex2_gxs = [None] * len(ex2_clusterx)
for ex, clusterx in enumerate(ex2_clusterx):
gxs = clusterx2_gxs[clusterx]
ex2_gxs[ex] = gxs
for gx in gxs:
gx2_ex[gx] = ex
return gx2_ex, ex2_gxs
def build_encounter_ids(ex2_gxs, gx2_clusterid):
USE_STRING_ID = True
gx2_eid = [None] * len(gx2_clusterid)
for ex, gxs in enumerate(ex2_gxs):
for gx in gxs:
nGx = len(gxs)
gx2_eid[gx] = ('ex=%r_nGxs=%d' % (ex, nGx)
if USE_STRING_ID else
ex + (nGx / 10 ** np.ceil(np.log(nGx) / np.log(10))))
def get_chip_encounters(hs):
gx2_ex, ex2_gxs = compute_encounters(hs)
# Build encounter to chips from encounter to images
ex2_cxs = [None for _ in xrange(len(ex2_gxs))]
for ex, gxs in enumerate(ex2_gxs):
ex2_cxs[ex] = util.flatten(hs.gx2_cxs(gxs))
# optional
# resort encounters by number of chips
ex2_nCxs = map(len, ex2_cxs)
ex2_cxs = [y for (x, y) in sorted(zip(ex2_nCxs, ex2_cxs))]
return ex2_cxs
def get_fmatch_iter(res):
# USE res.get_fmatch_iter()
fmfsfk_enum = enumerate(izip(res.cx2_fm, res.cx2_fs, res.cx2_fk))
fmatch_iter = ((cx, fx_tup, score, rank)
for cx, (fm, fs, fk) in fmfsfk_enum
for (fx_tup, score, rank) in izip(fm, fs, fk))
return fmatch_iter
def get_cxfx_enum(qreq):
ax2_cxs = qreq._data_index.ax2_cx
ax2_fxs = qreq._data_index.ax2_fx
cxfx_enum = enumerate(izip(ax2_cxs, ax2_fxs))
return cxfx_enum
def make_feature_graph(qreq, qcx2_res, use_networkx=True):
# Make a graph between the chips
cxfx2_ax = {(cx, fx): ax for ax, (cx, fx) in get_cxfx_enum(qreq)}
def w_edge(cx1, cx2, fx1, fx2, score, rank):
ax1 = cxfx2_ax[(cx1, fx1)]
ax2 = cxfx2_ax[(cx2, fx2)]
attr_dict = {'score': score, 'rank': rank}
return (ax1, ax2, attr_dict)
nodes = [(ax, {'fx': fx, 'cx': cx}) for ax, (cx, fx) in get_cxfx_enum(qreq)]
weighted_edges = [w_edge(cx1, cx2, fx1, fx2, score, rank)
for (cx1, res) in qcx2_res.iteritems()
for (cx2, (fx1, fx2), score, rank) in get_fmatch_iter(res)
if score > 0]
if use_networkx:
graph = netx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(weighted_edges)
else:
vx2_ax = cxfx2_ax.values()
import graph_tool
graph = graph_tool.Graph(g=None, directed=True, prune=False, vorder=None)
vertex_list = graph.add_vertex(n=len(nodes))
v_fx = graph.new_vertex_property("int")
v_cx = graph.new_vertex_property("int")
e_score = graph.new_edge_property("float")
e_rank = graph.new_edge_property("int")
for v, (ax, vprops) in zip(vertex_list, nodes):
v_cx[v] = int(vprops['cx'])
v_fx[v] = int(vprops['fx'])
mark_prog, end_prog = util.progress_func(len(weighted_edges))
count = 0
for ax1, ax2, prop_dict in weighted_edges:
mark_prog(count)
count += 1
vx1 = vx2_ax.index(ax1)
vx2 = vx2_ax.index(ax2)
v1 = graph.vertex(vx1)
v2 = graph.vertex(vx2)
e = graph.add_edge(v1, v2)
e_score[e] = float(prop_dict['score'])
e_rank[e] = int(prop_dict['rank'])
mark_prog(count)
end_prog()
#import graph_tool.draw
graph.save('test_graph.dot')
return graph
def make_chip_graph(qcx2_res):
# Make a graph between the chips
nodes = qcx2_res.keys()
#attr_edges = [(res.qcx, cx, {'score': score})
#for res in qcx2_res.itervalues()
#for cx, score in enumerate(res.cx2_score) if score > 0]
weighted_edges = [(res.qcx, cx, score)
for res in qcx2_res.itervalues()
for cx, score in enumerate(res.cx2_score) if score > 0]
graph = netx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_weighted_edges_from(weighted_edges)
return graph
def viz_graph(graph):
netx.draw(graph)
def viz_chipgraph(hs, graph, fnum=1, with_images=False):
# Adapated from
# https://gist.github.com/shobhit/3236373
print('[encounter] drawing chip graph')
df2.figure(fnum=fnum, pnum=(1, 1, 1))
ax = df2.gca()
#pos = netx.spring_layout(graph)
pos = netx.graphviz_layout(graph)
netx.draw(graph, pos=pos, ax=ax)
if with_images:
cx_list = graph.nodes()
pos_list = [pos[cx] for cx in cx_list]
thumb_list = hs.get_thumb(cx_list, 16, 16)
draw_images_at_positions(thumb_list, pos_list)
def draw_images_at_positions(img_list, pos_list):
print('[encounter] drawing %d images' % len(img_list))
# Thumb stack
ax = df2.gca()
fig = df2.gcf()
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
mark_progress, end_progress = util.progress_func(len(pos_list), lbl='drawing img')
for ix, ((x, y), img) in enumerate(izip(pos_list, img_list)):
mark_progress(ix)
xx, yy = trans((x, y)) # figure coordinates
xa, ya = trans2((xx, yy)) # axes coordinates
#
width, height = img.shape[0:2]
tlx = xa - (width / 2.0)
tly = ya - (height / 2.0)
img_bbox = [tlx, tly, width, height]
# Make new axis for the image
img_ax = df2.plt.axes(img_bbox)
img_ax.imshow(img)
img_ax.set_aspect('equal')
img_ax.axis('off')
end_progress()
def intra_query_cxs(hs, cxs):
dcxs = qcxs = cxs
qreq = mc3.prep_query_request(qreq=hs.qreq,
qcxs=qcxs,
dcxs=dcxs,
query_cfg=hs.prefs.query_cfg)
qcx2_res = mc3.process_query_request(hs, qreq)
return qcx2_res
def intra_encounter_match(hs, cxs, **kwargs):
# Make a graph between the chips
qcx2_res = intra_query_cxs(cxs)
graph = make_chip_graph(qcx2_res)
# TODO: Make a super cool algorithm which does this correctly
#graph.cutEdges(**kwargs)
# Get a temporary name id
# TODO: ensure these name indexes do not conflict with other encounters
#cx2_nx, nx2_cxs = graph.getConnectedComponents()
return graph
def execute_all_intra_encounter_match(hs, **kwargs):
# Group images / chips into encounters
ex2_cxs = get_chip_encounters(hs)
# For each encounter
ex2_names = {}
for ex, cxs in enumerate(ex2_cxs):
pass
# Perform Intra-Encounter Matching
nx2_cxs = intra_encounter_match(hs, cxs)
ex2_names[ex] = nx2_cxs
return ex2_names
def inter_encounter_match(hs, eid2_names=None, **kwargs):
# Perform Inter-Encounter Matching
if eid2_names is None:
eid2_names = intra_encounter_match(hs, **kwargs)
all_nxs = util.flatten(eid2_names.values())
for nx2_cxs in eid2_names:
qnxs = nx2_cxs
dnxs = all_nxs
name_result = hs.query(qnxs=qnxs, dnxs=dnxs)
qcx2_res = name_result.chip_results()
graph = netx.Graph()
graph.add_nodes_from(range(len(qcx2_res)))
graph.add_edges_from([res.cx2_fm for res in qcx2_res.itervalues()])
graph.setWeights([(res.cx2_fs, res.cx2_fk) for res in qcx2_res.itervalues()])
graph.cutEdges(**kwargs)
cx2_nx, nx2_cxs = graph.getConnectedComponents()
return cx2_nx
| {
"content_hash": "5788208835c832d9d116e6b81e5a8ece",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 93,
"avg_line_length": 35.819148936170215,
"alnum_prop": 0.6094446094446094,
"repo_name": "SU-ECE-17-7/hotspotter",
"id": "83c58fb55f9693b2548ffbe4dd2db3b2d7c800db",
"size": "10101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hotspotter/encounter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3321"
},
{
"name": "C++",
"bytes": "175"
},
{
"name": "CMake",
"bytes": "870"
},
{
"name": "Inno Setup",
"bytes": "3248"
},
{
"name": "Python",
"bytes": "2044804"
},
{
"name": "Shell",
"bytes": "17534"
}
],
"symlink_target": ""
} |
"""Module containing utils to get and check lab status."""
import json
import logging
import re
import time
import urllib
from chromite.cbuildbot import constants
logger = logging.getLogger('chromite')
class LabIsDownException(Exception):
"""Raised when the Lab is Down."""
class BoardIsDisabledException(Exception):
"""Raised when a certain board is disabled in the lab."""
def GetLabStatus(max_attempts=5):
"""Grabs the current lab status and message.
Adopted from autotest/files/client/common_lib/site_utils.py
Args:
max_attempts: max attempts to hit the lab status url.
Returns:
a dict with keys 'lab_is_up' and 'message'. lab_is_up points
to a boolean and message points to a string.
"""
status_url = constants.LAB_STATUS_URL
result = {'lab_is_up': True, 'message': ''}
retry_waittime = 1
for _ in range(max_attempts):
try:
response = urllib.urlopen(status_url)
except IOError as e:
logger.log(logging.WARNING,
'Error occurred when grabbing the lab status: %s.', e)
time.sleep(retry_waittime)
continue
# Check for successful response code.
code = response.getcode()
if code == 200:
data = json.load(response)
result['lab_is_up'] = data['general_state'] in ('open', 'throttled')
result['message'] = data['message']
return result
else:
logger.log(logging.WARNING,
'Get HTTP code %d when grabbing the lab status from %s',
code, status_url)
time.sleep(retry_waittime)
# We go ahead and say the lab is open if we can't get the status.
logger.log(logging.WARNING, 'Could not get a status from %s', status_url)
return result
def CheckLabStatus(board=None):
"""Check if the lab is up and if we can schedule suites to run.
Also checks if the lab is disabled for that particular board, and if so
will raise an error to prevent new suites from being scheduled for that
board. Adopted from autotest/files/client/common_lib/site_utils.py
Args:
board: board name that we want to check the status of.
Raises:
LabIsDownException if the lab is not up.
BoardIsDisabledException if the desired board is currently disabled.
"""
# First check if the lab is up.
lab_status = GetLabStatus()
if not lab_status['lab_is_up']:
raise LabIsDownException('Chromium OS Lab is currently not up: '
'%s.' % lab_status['message'])
# Check if the board we wish to use is disabled.
# Lab messages should be in the format of:
# Lab is 'status' [boards not to be ran] (comment). Example:
# Lab is Open [stumpy, kiev, x86-alex] (power_resume rtc causing duts to go
# down)
boards_are_disabled = re.search('\[(.*)\]', lab_status['message'])
if board and boards_are_disabled:
if board in boards_are_disabled.group(1):
raise BoardIsDisabledException('Chromium OS Lab is '
'currently not allowing suites to be scheduled on board '
'%s: %s' % (board, lab_status['message']))
return
| {
"content_hash": "ab073b5ec5f4629e74d8897f239f6054",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 32.45744680851064,
"alnum_prop": 0.6719108489019994,
"repo_name": "bpsinc-native/src_third_party_chromite",
"id": "708f171083d937c28e43ee3fc84ba2e98de096ca",
"size": "3221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbuildbot/lab_status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "3534807"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
} |
from six import StringIO
from django.core.files.base import ContentFile
from django_downloadview import VirtualDownloadView
from django_downloadview import VirtualFile
from django_downloadview import TextIteratorIO
class TextDownloadView(VirtualDownloadView):
def get_file(self):
"""Return :class:`django.core.files.base.ContentFile` object."""
return ContentFile(b"Hello world!\n", name='hello-world.txt')
class StringIODownloadView(VirtualDownloadView):
def get_file(self):
"""Return wrapper on ``six.StringIO`` object."""
file_obj = StringIO(u"Hello world!\n")
return VirtualFile(file_obj, name='hello-world.txt')
def generate_hello():
yield u'Hello '
yield u'world!'
yield u'\n'
class GeneratedDownloadView(VirtualDownloadView):
def get_file(self):
"""Return wrapper on ``StringIteratorIO`` object."""
file_obj = TextIteratorIO(generate_hello())
return VirtualFile(file_obj, name='hello-world.txt')
| {
"content_hash": "df97cd35b870ad0f56c53e3ee349f49e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7092907092907093,
"repo_name": "fladi/django-downloadview",
"id": "3dc8ed2f2274e57e4fbb547a9b2c94d9fcbe205e",
"size": "1001",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demo/demoproject/virtual/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "923"
},
{
"name": "Makefile",
"bytes": "2041"
},
{
"name": "Python",
"bytes": "145516"
}
],
"symlink_target": ""
} |
"""[DEPRECATED] Utilities for connecting to kernels
Moved to IPython.kernel.connect
"""
import warnings
warnings.warn("IPython.lib.kernel moved to IPython.kernel.connect in IPython 1.0",
DeprecationWarning
)
from IPython.kernel.connect import *
| {
"content_hash": "aa0d4bf4d86e23d6f9457f35a9343db5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.7101449275362319,
"repo_name": "mattvonrocketstein/smash",
"id": "f1e623fc82ed1c18873acced9acb4f9069c0ea2c",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/lib/kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
import os
from app import create_app, db
from app.models import User, Role, Permission
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Permission=Permission)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "1834841b534d1a2a33e66ebeda1dde74",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.7219973009446694,
"repo_name": "jeromefiot/FEED2TW",
"id": "30e9a36a2474a79b742932d9e48a8101ab43ec59",
"size": "763",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2764"
},
{
"name": "HTML",
"bytes": "28805"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "59055"
}
],
"symlink_target": ""
} |
from mycompany.common.auth import api
# Blocking calls
user_info = api.get_user(username="testuser")
password_ok = api.check_passsword(password="Passw0rd1")
api.user_registered.listen(my_callback)
# Parallel calls
async_result1 = api.get_user.async(username="testuser")
async_result2 = api.check_passsword.async(password="Passw0rd1")
user_info = async_result1.wait()
password_ok = async_result2.wait()
# Pro
# - Presence of 'api' indicates that this is an external service
# - As api is an object, one cannot access the methods/events without the prefix
# - Simple, readable
# Con
# - Use of 'api' is by convention only
# - ...but we're all consenting adults.
# Developer tools
api.debug.enable()
api.debug.disable()
# Enables/disables debugging for calls using this api instance
api.debug.info("get_user")
# Shows: number of consumers, consumer information, last call timestamp, last call args, last call response,
# last handled by
api.debug.trace("get_user", username="testuser")
# Shows:
# - "WARNING: Only works when working with other warren consumers & producers"
# - Total number of handlers listening for this api call/event [1]
# - Raw message as sent to broker
# - Expecting response to tcp://10.1.2.3:54142
# - Raw message received by rabbitmq [1]
# - "Waiting for debug information from handlers..." [3]
# - Consumer ---> Message received from by PID at HOST
# - Consumer ---> Raw message as received from broker: ...
# - Consumer ---> Message being handled by implementation mycompany.common.auth.AuthApi.get_user()
# - Consumer ---> Implementation returned result: ...
# - Consumer ---> Returning result to tcp://10.1.2.3:54142
# - Consumer ---> Acknowledgement sent to broker
# - Consumer ---> All done
# - Response received from consumer: ...
# - Done
# [1] Requires a debug back chanel of some sort (i.e. a debug exchange)
# [2] Creates a temporary queue on the relevant exchange.
# [3] The debugger sends the message with a 'debug-to' header, which logs back
# via ZeroMQ
| {
"content_hash": "09000a5c821ed218cb45a12092296f4c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 108,
"avg_line_length": 37.142857142857146,
"alnum_prop": 0.6990384615384615,
"repo_name": "adamcharnock/lightbus",
"id": "34c2f4805b19f2c7b9c84b7aff22f23b3434899c",
"size": "2128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightbus_experiments/potential_use.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "710699"
}
],
"symlink_target": ""
} |
"""Estimator classes for BoostedTrees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import numpy as np
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import boosted_trees_utils
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.array_ops import identity as tf_identity
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import estimator_export
# TODO(nponomareva): Reveal pruning params here.
_TreeHParams = collections.namedtuple('TreeHParams', [
'n_trees', 'max_depth', 'learning_rate', 'l1', 'l2', 'tree_complexity',
'min_node_weight', 'center_bias', 'pruning_mode'
])
_HOLD_FOR_MULTI_CLASS_SUPPORT = object()
_HOLD_FOR_MULTI_DIM_SUPPORT = object()
_DUMMY_NUM_BUCKETS = -1
_DUMMY_NODE_ID = -1
def _get_transformed_features(features, sorted_feature_columns):
"""Gets the transformed features from features/feature_columns pair.
Args:
features: a dicionary of name to Tensor.
sorted_feature_columns: a list/set of tf.feature_column, sorted by name.
Returns:
result_features: a list of the transformed features, sorted by the name.
Raises:
ValueError: when unsupported features/columns are tried.
"""
# pylint:disable=protected-access
transformed_features = feature_column_lib._transform_features(
features, sorted_feature_columns)
result_features = []
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._BucketizedColumn):
source_name = column.source_column.name
squeezed_tensor = array_ops.squeeze(transformed_features[column], axis=1)
if len(squeezed_tensor.shape) > 1:
raise ValueError('For now, only supports features equivalent to rank 1 '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
result_features.append(squeezed_tensor)
elif isinstance(column, feature_column_lib._IndicatorColumn):
source_name = column.categorical_column.name
tensor = math_ops.to_int32(transformed_features[column])
if len(tensor.shape) > 2:
raise ValueError('Rank of indicator column must be no more than 2, '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
unstacked = array_ops.unstack(tensor, axis=1)
result_features.extend(unstacked)
else:
raise ValueError(
'For now, only bucketized_column and indicator_column is supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
return result_features
def _local_variable(initial_value, name=None):
"""Stores a tensor as a local Variable for faster read."""
result = variable_scope.variable(
initial_value=initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
if isinstance(initial_value, ops.Tensor):
# Match the resulting variable's shape if the initial_value is a Tensor.
result.set_shape(initial_value.shape)
return result
def _group_features_by_num_buckets(sorted_feature_columns):
"""Groups feature ids by the number of buckets.
Derives the feature ids based on iterating through ordered feature columns
and groups them by the number of buckets each feature require. Returns a
sorted list of buckets and a list of lists of feature ids for each of those
buckets.
Args:
sorted_feature_columns: a list/set of tf.feature_column sorted by name.
Returns:
bucket_size_list: a list of required bucket sizes.
feature_ids_list: a list of lists of feature ids for each bucket size.
Raises:
ValueError: when unsupported features columns are provided.
"""
bucket_size_to_feature_ids_dict = collections.OrderedDict()
# TODO(nponomareva) for now we preserve the previous functionality and bucket
# all numeric into the same num of buckets. Can be easily changed to using
# each numeric's real buckets num, but we need to test that it does not cause
# a performance hit.
# We will replace this dummy key with the real max after we calculate it.
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS] = []
max_buckets_for_bucketized = 2
max_buckets_for_indicator = 2
feature_idx = 0
# pylint:disable=protected-access
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn):
num_categorical_features = column.categorical_column._num_buckets
if max_buckets_for_indicator not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_indicator] = []
for _ in range(num_categorical_features):
# We use bucket size of 2 for categorical.
bucket_size_to_feature_ids_dict[max_buckets_for_indicator].append(
feature_idx)
feature_idx += 1
elif isinstance(column, feature_column_lib._BucketizedColumn):
max_buckets_for_bucketized = max(max_buckets_for_bucketized,
len(column.boundaries) + 1)
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS].append(feature_idx)
feature_idx += 1
elif not isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
raise ValueError(
'For now, only bucketized_column and indicator column are supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
# Replace the dummy key with the real max num of buckets for all bucketized
# columns.
if max_buckets_for_bucketized not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized] = []
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized].extend(
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS])
del bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS]
feature_ids_list = list(bucket_size_to_feature_ids_dict.values())
bucket_size_list = list(bucket_size_to_feature_ids_dict.keys())
return bucket_size_list, feature_ids_list
def _calculate_num_features(sorted_feature_columns):
num_features = 0
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
num_features += column.categorical_column._num_buckets # pylint:disable=protected-access
else:
num_features += 1
return num_features
def _generate_feature_name_mapping(sorted_feature_columns):
"""Return a list of feature name for feature ids.
Args:
sorted_feature_columns: a list/set of tf.feature_column sorted by name.
Returns:
feature_name_mapping: a list of feature names indexed by the feature ids.
Raises:
ValueError: when unsupported features/columns are tried.
"""
names = []
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
categorical_column = column.categorical_column
if isinstance(categorical_column,
feature_column_lib._VocabularyListCategoricalColumn): # pylint:disable=protected-access
for value in categorical_column.vocabulary_list:
names.append('{}:{}'.format(column.name, value))
elif isinstance(categorical_column,
feature_column_lib._BucketizedColumn): # pylint:disable=protected-access
boundaries = [-np.inf] + list(categorical_column.boundaries) + [np.inf]
for pair in zip(boundaries[:-1], boundaries[1:]):
names.append('{}:{}'.format(column.name, pair))
else:
for num in range(categorical_column._num_buckets): # pylint:disable=protected-access
names.append('{}:{}'.format(column.name, num))
elif isinstance(column, feature_column_lib._BucketizedColumn):
names.append(column.name)
else:
raise ValueError(
'For now, only bucketized_column and indicator_column is supported '
'but got: {}'.format(column))
return names
def _cache_transformed_features(features, sorted_feature_columns, batch_size):
"""Transform features and cache, then returns (cached_features, cache_op)."""
num_features = _calculate_num_features(sorted_feature_columns)
cached_features = [
_local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='cached_feature_{}'.format(i)) for i in range(num_features)
]
are_features_cached = _local_variable(False, name='are_features_cached')
def cache_features_and_return():
"""Caches transformed features.
The intention is to hide get_transformed_features() from the graph by
caching the result except the first step, since bucketize operation
(inside get_transformed_features) is expensive.
Returns:
input_feature_list: a list of input features.
cache_flip_op: op to add to graph to make sure cache update is included to
the graph.
"""
transformed_features = _get_transformed_features(features,
sorted_feature_columns)
cached = [
state_ops.assign(cached_features[i], transformed_features[i])
for i in range(num_features)
]
# TODO(youngheek): Try other combination of dependencies so that the
# function returns a single result, not a tuple.
with ops.control_dependencies(cached):
cache_flip_op = are_features_cached.assign(True)
return cached, cache_flip_op
input_feature_list, cache_flip_op = control_flow_ops.cond(
are_features_cached, lambda: (cached_features, control_flow_ops.no_op()),
cache_features_and_return)
return input_feature_list, cache_flip_op
class _CacheTrainingStatesUsingHashTable(object):
"""Caching logits, etc. using MutableHashTable."""
def __init__(self, example_ids, logits_dimension):
"""Creates a cache with the given configuration.
It maintains a MutableDenseHashTable for all values.
The API lookup() and insert() would have those specs,
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
However in the MutableDenseHashTable, ids are bitcasted into float32 and
all values are concatenated as a single tensor (of float32).
Hence conversion happens internally before inserting to the HashTable and
after lookup from it.
Args:
example_ids: a Rank 1 tensor to be used as a key of the cache.
logits_dimension: a constant (int) for the dimension of logits.
Raises:
ValueError: if example_ids is other than int64 or string.
"""
if dtypes.as_dtype(dtypes.int64).is_compatible_with(example_ids.dtype):
empty_key = -1 << 62
elif dtypes.as_dtype(dtypes.string).is_compatible_with(example_ids.dtype):
empty_key = ''
else:
raise ValueError(
'Unsupported example_id_feature dtype %s.' % example_ids.dtype)
# Cache holds latest <tree_id, node_id, logits> for each example.
# tree_id and node_id are both int32 but logits is a float32.
# To reduce the overhead, we store all of them together as float32 and
# bitcast the ids to int32.
self._table_ref = lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key, value_dtype=dtypes.float32, value_shape=[3])
self._example_ids = ops.convert_to_tensor(example_ids)
if self._example_ids.shape.ndims not in (None, 1):
raise ValueError(
'example_id should have rank 1, but got %s' % self._example_ids)
self._logits_dimension = logits_dimension
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
cached_tree_ids, cached_node_ids, cached_logits = array_ops.split(
lookup_ops.lookup_table_find_v2(
self._table_ref,
self._example_ids,
default_value=[0.0, _DUMMY_NODE_ID, 0.0]),
[1, 1, self._logits_dimension],
axis=1)
cached_tree_ids = array_ops.squeeze(
array_ops.bitcast(cached_tree_ids, dtypes.int32))
cached_node_ids = array_ops.squeeze(
array_ops.bitcast(cached_node_ids, dtypes.int32))
if self._example_ids.shape.ndims is not None:
cached_logits.set_shape(
[self._example_ids.shape[0], self._logits_dimension])
return (cached_tree_ids, cached_node_ids, cached_logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
insert_op = lookup_ops.lookup_table_insert_v2(
self._table_ref, self._example_ids,
array_ops.concat(
[
array_ops.expand_dims(
array_ops.bitcast(tree_ids, dtypes.float32), 1),
array_ops.expand_dims(
array_ops.bitcast(node_ids, dtypes.float32), 1),
logits,
],
axis=1,
name='value_concat_for_cache_insert'))
return insert_op
class _CacheTrainingStatesUsingVariables(object):
"""Caching logits, etc. using Variables."""
def __init__(self, batch_size, logits_dimension):
"""Creates a cache with the given configuration.
It maintains three variables, tree_ids, node_ids, logits, for caching.
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
Note, this can be used only with in-memory data setting.
Args:
batch_size: `int`, the size of the cache.
logits_dimension: a constant (int) for the dimension of logits.
"""
self._logits_dimension = logits_dimension
self._tree_ids = _local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='tree_ids_cache')
self._node_ids = _local_variable(
_DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
name='node_ids_cache')
self._logits = _local_variable(
array_ops.zeros([batch_size, logits_dimension], dtype=dtypes.float32),
name='logits_cache')
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
return (self._tree_ids, self._node_ids, self._logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
return control_flow_ops.group(
[
self._tree_ids.assign(tree_ids),
self._node_ids.assign(node_ids),
self._logits.assign(logits)
],
name='cache_insert')
class _StopAtAttemptsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at the number of attempts."""
def __init__(self, num_finalized_trees_tensor, num_attempted_layers_tensor,
max_trees, max_depth):
self._num_finalized_trees_tensor = num_finalized_trees_tensor
self._num_attempted_layers_tensor = num_attempted_layers_tensor
self._max_trees = max_trees
self._max_depth = max_depth
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
def after_run(self, run_context, run_values):
# num_* tensors should be retrieved by a separate session than the training
# one, in order to read the values after growing.
# So, if it's approaching to the limit, get the actual value by additional
# session.
num_finalized_trees, num_attempted_layers = run_values.results
if (num_finalized_trees >= self._max_trees - 1 or
num_attempted_layers > 2 * self._max_trees * self._max_depth - 1):
num_finalized_trees, num_attempted_layers = run_context.session.run(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
if (num_finalized_trees >= self._max_trees or
num_attempted_layers > 2 * self._max_trees * self._max_depth):
run_context.request_stop()
def _get_max_splits(tree_hparams):
"""Calculates the max possible number of splits based on tree params."""
# maximum number of splits possible in the whole tree =2^(D-1)-1
max_splits = (1 << tree_hparams.max_depth) - 1
return max_splits
class _EnsembleGrower(object):
"""Abstract base class for different types of ensemble growers.
Use it to receive training ops for growing and centering bias, depending
on the implementation (for example, in memory or accumulator-based
distributed):
grower = ...create subclass grower(tree_ensemble, tree_hparams)
grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,
last_layer_nodes_range)
training_ops.append(grow_op)
"""
def __init__(self, tree_ensemble, tree_hparams, feature_ids_list):
"""Initializes a grower object.
Args:
tree_ensemble: A TreeEnsemble variable.
tree_hparams: TODO. collections.namedtuple for hyper parameters.
feature_ids_list: a list of lists of feature ids for each bucket size.
Raises:
ValueError: when pruning mode is invalid or pruning is used and no tree
complexity is set.
"""
self._tree_ensemble = tree_ensemble
self._tree_hparams = tree_hparams
self._feature_ids_list = feature_ids_list
# pylint: disable=protected-access
self._pruning_mode_parsed = boosted_trees_ops.PruningMode.from_str(
tree_hparams.pruning_mode)
if tree_hparams.tree_complexity > 0:
if self._pruning_mode_parsed == boosted_trees_ops.PruningMode.NO_PRUNING:
raise ValueError(
'Tree complexity have no effect unless pruning mode is chosen.')
else:
if self._pruning_mode_parsed != boosted_trees_ops.PruningMode.NO_PRUNING:
raise ValueError('For pruning, tree_complexity must be positive.')
# pylint: enable=protected-access
@abc.abstractmethod
def center_bias(self, center_bias_var, gradients, hessians):
"""Centers bias, if ready, based on statistics.
Args:
center_bias_var: A variable that will be updated when bias centering
finished.
gradients: A rank 2 tensor of gradients.
hessians: A rank 2 tensor of hessians.
Returns:
An operation for centering bias.
"""
@abc.abstractmethod
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
"""Grows a tree, if ready, based on provided statistics.
Args:
stats_summaries_list: List of stats summary tensors, representing sums of
gradients and hessians for each feature bucket.
last_layer_nodes_range: A tensor representing ids of the nodes in the
current layer, to be split.
Returns:
An op for growing a tree.
"""
def chief_init_op(self):
"""Ops that chief needs to run to initialize the state."""
return control_flow_ops.no_op()
# ============= Helper methods ===========
def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):
"""Updates the ensembles and cache (if needed) with logits prior."""
continue_centering = boosted_trees_ops.center_bias(
self._tree_ensemble.resource_handle,
mean_gradients=mean_gradients,
mean_hessians=mean_hessians,
l1=self._tree_hparams.l1,
l2=self._tree_hparams.l2)
return center_bias_var.assign(continue_centering)
def _grow_tree_from_stats_summaries(self, stats_summaries_list,
last_layer_nodes_range):
"""Updates ensemble based on the best gains from stats summaries."""
node_ids_per_feature = []
gains_list = []
thresholds_list = []
left_node_contribs_list = []
right_node_contribs_list = []
all_feature_ids = []
assert len(stats_summaries_list) == len(self._feature_ids_list)
max_splits = _get_max_splits(self._tree_hparams)
for i, feature_ids in enumerate(self._feature_ids_list):
(numeric_node_ids_per_feature, numeric_gains_list,
numeric_thresholds_list, numeric_left_node_contribs_list,
numeric_right_node_contribs_list) = (
boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range=last_layer_nodes_range,
stats_summary_list=stats_summaries_list[i],
l1=self._tree_hparams.l1,
l2=self._tree_hparams.l2,
tree_complexity=self._tree_hparams.tree_complexity,
min_node_weight=self._tree_hparams.min_node_weight,
max_splits=max_splits))
all_feature_ids += feature_ids
node_ids_per_feature += numeric_node_ids_per_feature
gains_list += numeric_gains_list
thresholds_list += numeric_thresholds_list
left_node_contribs_list += numeric_left_node_contribs_list
right_node_contribs_list += numeric_right_node_contribs_list
grow_op = boosted_trees_ops.update_ensemble(
# Confirm if local_tree_ensemble or tree_ensemble should be used.
self._tree_ensemble.resource_handle,
feature_ids=all_feature_ids,
node_ids=node_ids_per_feature,
gains=gains_list,
thresholds=thresholds_list,
left_node_contribs=left_node_contribs_list,
right_node_contribs=right_node_contribs_list,
learning_rate=self._tree_hparams.learning_rate,
max_depth=self._tree_hparams.max_depth,
pruning_mode=self._pruning_mode_parsed)
return grow_op
class _InMemoryEnsembleGrower(_EnsembleGrower):
"""An in-memory ensemble grower."""
def __init__(self, tree_ensemble, tree_hparams, feature_ids_list):
super(_InMemoryEnsembleGrower, self).__init__(
tree_ensemble=tree_ensemble, tree_hparams=tree_hparams,
feature_ids_list=feature_ids_list)
def center_bias(self, center_bias_var, gradients, hessians):
# For in memory, we already have a full batch of gradients and hessians,
# so just take a mean and proceed with centering.
mean_gradients = array_ops.expand_dims(
math_ops.reduce_mean(gradients, 0), 0)
mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
# For in memory, we already have full data in one batch, so we can grow the
# tree immediately.
return self._grow_tree_from_stats_summaries(
stats_summaries_list, last_layer_nodes_range)
class _AccumulatorEnsembleGrower(_EnsembleGrower):
"""An accumulator based ensemble grower."""
def __init__(self, tree_ensemble, tree_hparams, stamp_token,
n_batches_per_layer, bucket_size_list, is_chief, center_bias,
feature_ids_list):
super(_AccumulatorEnsembleGrower, self).__init__(
tree_ensemble=tree_ensemble, tree_hparams=tree_hparams,
feature_ids_list=feature_ids_list)
self._stamp_token = stamp_token
self._n_batches_per_layer = n_batches_per_layer
self._bucket_size_list = bucket_size_list
self._is_chief = is_chief
self._growing_accumulators = []
self._chief_init_ops = []
max_splits = _get_max_splits(self._tree_hparams)
for i, feature_ids in enumerate(self._feature_ids_list):
accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians (the last dimension).
shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
shared_name='numeric_stats_summary_accumulator_' + str(i))
self._chief_init_ops.append(
accumulator.set_global_step(self._stamp_token))
self._growing_accumulators.append(accumulator)
self._center_bias = center_bias
if center_bias:
self._bias_accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians means only.
# TODO(nponomareva): this will change for a multiclass
shape=[2, 1],
shared_name='bias_accumulator')
self._chief_init_ops.append(
self._bias_accumulator.set_global_step(self._stamp_token))
def center_bias(self, center_bias_var, gradients, hessians):
# For not in memory situation, we need to accumulate enough of batches first
# before proceeding with centering bias.
# Create an accumulator.
if not self._center_bias:
raise RuntimeError('center_bias called but bias centering is disabled.')
bias_dependencies = []
grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
apply_grad = self._bias_accumulator.apply_grad(
grads_and_hess, self._stamp_token)
bias_dependencies.append(apply_grad)
# Center bias if enough batches were processed.
with ops.control_dependencies(bias_dependencies):
if not self._is_chief:
return control_flow_ops.no_op()
def _set_accumulators_stamp():
return control_flow_ops.group(
[acc.set_global_step(self._stamp_token + 1) for acc in
self._growing_accumulators])
def center_bias_from_accumulator():
accumulated = array_ops.unstack(self._bias_accumulator.take_grad(1),
axis=0)
center_bias_op = self._center_bias_fn(
center_bias_var,
array_ops.expand_dims(accumulated[0], 0),
array_ops.expand_dims(accumulated[1], 0))
with ops.control_dependencies([center_bias_op]):
return control_flow_ops.cond(center_bias_var,
control_flow_ops.no_op,
_set_accumulators_stamp)
center_bias_op = control_flow_ops.cond(
math_ops.greater_equal(self._bias_accumulator.num_accumulated(),
self._n_batches_per_layer),
center_bias_from_accumulator,
control_flow_ops.no_op,
name='wait_until_n_batches_for_bias_accumulated')
return center_bias_op
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
dependencies = []
for i in range(len(self._feature_ids_list)):
stats_summaries = stats_summaries_list[i]
apply_grad = self._growing_accumulators[i].apply_grad(
array_ops.stack(stats_summaries, axis=0), self._stamp_token)
dependencies.append(apply_grad)
# Grow the tree if enough batches is accumulated.
with ops.control_dependencies(dependencies):
if not self._is_chief:
return control_flow_ops.no_op()
min_accumulated = math_ops.reduce_min(
array_ops.stack([acc.num_accumulated() for acc in
self._growing_accumulators]))
def grow_tree_from_accumulated_summaries_fn():
"""Updates tree with the best layer from accumulated summaries."""
# Take out the accumulated summaries from the accumulator and grow.
stats_summaries_list = []
stats_summaries_list = [
array_ops.unstack(accumulator.take_grad(1), axis=0)
for accumulator in self._growing_accumulators
]
grow_op = self._grow_tree_from_stats_summaries(
stats_summaries_list, last_layer_nodes_range
)
return grow_op
grow_model = control_flow_ops.cond(
math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
grow_tree_from_accumulated_summaries_fn,
control_flow_ops.no_op,
name='wait_until_n_batches_accumulated')
return grow_model
def chief_init_op(self):
"""Ops that chief needs to run to initialize the state."""
return control_flow_ops.group(self._chief_init_ops)
def _bt_model_fn(
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=None,
example_id_column_name=None,
# TODO(youngheek): replace this later using other options.
train_in_memory=False,
name='boosted_trees'):
"""Gradient Boosted Trees model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
tree_hparams: TODO. collections.namedtuple for hyper parameters.
n_batches_per_layer: A `Tensor` of `int64`. Each layer is built after at
least n_batches_per_layer accumulations.
config: `RunConfig` object to configure the runtime settings.
closed_form_grad_and_hess_fn: a function that accepts logits and labels
and returns gradients and hessians. By default, they are created by
tf.gradients() from the loss.
example_id_column_name: Name of the feature for a unique ID per example.
Currently experimental -- not exposed to public API.
train_in_memory: `bool`, when true, it assumes the dataset is in memory,
i.e., input_fn should return the entire dataset as a single batch, and
also n_batches_per_layer should be set as 1.
name: Name to use for the model.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
sorted_feature_columns = sorted(feature_columns, key=lambda tc: tc.name)
with ops.name_scope(name) as name:
# Prepare.
global_step = training_util.get_or_create_global_step()
bucket_size_list, feature_ids_list = _group_features_by_num_buckets(
sorted_feature_columns)
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
# Create logits.
if mode != model_fn_lib.ModeKeys.TRAIN:
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
logits = boosted_trees_ops.predict(
# For non-TRAIN mode, ensemble doesn't change after initialization,
# so no local copy is needed; using tree_ensemble directly.
tree_ensemble_handle=tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=control_flow_ops.no_op,
logits=logits)
# ============== Training graph ==============
center_bias = tree_hparams.center_bias
is_single_machine = (config.num_worker_replicas <= 1)
if train_in_memory:
assert n_batches_per_layer == 1, (
'When train_in_memory is enabled, input_fn should return the entire '
'dataset as a single batch, and n_batches_per_layer should be set as '
'1.')
if (not config.is_chief or config.num_worker_replicas > 1 or
config.num_ps_replicas > 0):
raise ValueError('train_in_memory is supported only for '
'non-distributed training.')
worker_device = control_flow_ops.no_op().device
train_op = []
# Extract input features and set up cache for training.
training_state_cache = None
if train_in_memory:
# cache transformed features as well for in-memory training.
batch_size = array_ops.shape(labels)[0]
input_feature_list, input_cache_op = (
_cache_transformed_features(features, sorted_feature_columns,
batch_size))
train_op.append(input_cache_op)
training_state_cache = _CacheTrainingStatesUsingVariables(
batch_size, head.logits_dimension)
else:
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
if example_id_column_name:
example_ids = features[example_id_column_name]
training_state_cache = _CacheTrainingStatesUsingHashTable(
example_ids, head.logits_dimension)
if training_state_cache:
cached_tree_ids, cached_node_ids, cached_logits = (
training_state_cache.lookup())
else:
# Always start from the beginning when no cache is set up.
batch_size = array_ops.shape(labels)[0]
cached_tree_ids, cached_node_ids, cached_logits = (
array_ops.zeros([batch_size], dtype=dtypes.int32),
_DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
array_ops.zeros(
[batch_size, head.logits_dimension], dtype=dtypes.float32))
if is_single_machine:
local_tree_ensemble = tree_ensemble
ensemble_reload = control_flow_ops.no_op()
else:
# Have a local copy of ensemble for the distributed setting.
with ops.device(worker_device):
local_tree_ensemble = boosted_trees_ops.TreeEnsemble(
name=name + '_local', is_local=True)
# TODO(soroush): Do partial updates if this becomes a bottleneck.
ensemble_reload = local_tree_ensemble.deserialize(
*tree_ensemble.serialize())
with ops.control_dependencies([ensemble_reload]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
last_layer_nodes_range) = local_tree_ensemble.get_states()
partial_logits, tree_ids, node_ids = boosted_trees_ops.training_predict(
tree_ensemble_handle=local_tree_ensemble.resource_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
logits = cached_logits + partial_logits
if train_in_memory:
grower = _InMemoryEnsembleGrower(tree_ensemble, tree_hparams,
feature_ids_list=feature_ids_list)
else:
grower = _AccumulatorEnsembleGrower(tree_ensemble, tree_hparams,
stamp_token, n_batches_per_layer,
bucket_size_list, config.is_chief,
center_bias=center_bias,
feature_ids_list=feature_ids_list)
summary.scalar('ensemble/num_trees', num_trees)
summary.scalar('ensemble/num_finalized_trees', num_finalized_trees)
summary.scalar('ensemble/num_attempted_layers', num_attempted_layers)
# Variable that determines whether bias centering is needed.
center_bias_var = variable_scope.variable(
initial_value=center_bias, name='center_bias_needed', trainable=False,
use_resource=True)
# Create training graph.
def _train_op_fn(loss):
"""Run one training iteration."""
if training_state_cache:
# Cache logits only after center_bias is complete, if it's in progress.
train_op.append(
control_flow_ops.cond(
center_bias_var, control_flow_ops.no_op,
lambda: training_state_cache.insert(tree_ids, node_ids, logits))
)
if closed_form_grad_and_hess_fn:
gradients, hessians = closed_form_grad_and_hess_fn(logits, labels)
else:
gradients = gradients_impl.gradients(loss, logits, name='Gradients')[0]
hessians = gradients_impl.gradients(
gradients, logits, name='Hessians')[0]
# TODO(youngheek): perhaps storage could be optimized by storing stats
# with the dimension max_splits_per_layer, instead of max_splits (for the
# entire tree).
max_splits = _get_max_splits(tree_hparams)
stats_summaries_list = []
for i, feature_ids in enumerate(feature_ids_list):
num_buckets = bucket_size_list[i]
summaries = [
array_ops.squeeze(
boosted_trees_ops.make_stats_summary(
node_ids=node_ids,
gradients=gradients,
hessians=hessians,
bucketized_features_list=[input_feature_list[f]],
max_splits=max_splits,
num_buckets=num_buckets),
axis=0) for f in feature_ids
]
stats_summaries_list.append(summaries)
if center_bias:
update_model = control_flow_ops.cond(
center_bias_var,
functools.partial(
grower.center_bias,
center_bias_var,
gradients,
hessians,
),
functools.partial(grower.grow_tree, stats_summaries_list,
last_layer_nodes_range))
else:
update_model = grower.grow_tree(stats_summaries_list,
last_layer_nodes_range)
train_op.append(update_model)
with ops.control_dependencies([update_model]):
increment_global = state_ops.assign_add(global_step, 1).op
train_op.append(increment_global)
return control_flow_ops.group(train_op, name='train_op')
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
# Add an early stop hook.
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks +
(_StopAtAttemptsHook(num_finalized_trees, num_attempted_layers,
tree_hparams.n_trees, tree_hparams.max_depth),),
training_chief_hooks=[GrowerInitializationHook(grower.chief_init_op())] +
list(estimator_spec.training_chief_hooks))
return estimator_spec
class GrowerInitializationHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles initialization of `_EnsembleGrower`."""
def __init__(self, init_op):
self._init_op = init_op
def after_create_session(self, session, coord):
session.run(self._init_op)
def _create_classification_head(n_classes,
weight_column=None,
label_vocabulary=None):
"""Creates a classification head. Refer to canned.head for details on args."""
# TODO(nponomareva): Support multi-class cases.
if n_classes == 2:
# pylint: disable=protected-access
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
else:
raise ValueError('For now only binary classification is supported.'
'n_classes given as {}'.format(n_classes))
def _create_classification_head_and_closed_form(n_classes, weight_column,
label_vocabulary):
"""Creates a head for classifier and the closed form gradients/hessians."""
head = _create_classification_head(n_classes, weight_column, label_vocabulary)
if (n_classes == 2 and head.logits_dimension == 1 and
weight_column is None and label_vocabulary is None):
# Use the closed-form gradients/hessians for 2 class.
def _grad_and_hess_for_logloss(logits, labels):
"""A closed form gradient and hessian for logistic loss."""
# TODO(youngheek): add weights handling.
predictions = math_ops.reciprocal(math_ops.exp(-logits) + 1.0)
normalizer = math_ops.reciprocal(
math_ops.cast(array_ops.size(predictions), dtypes.float32))
labels = math_ops.cast(labels, dtypes.float32)
labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint: disable=protected-access
labels, logits, head.logits_dimension)
gradients = (predictions - labels) * normalizer
hessians = predictions * (1.0 - predictions) * normalizer
return gradients, hessians
closed_form = _grad_and_hess_for_logloss
else:
closed_form = None
return (head, closed_form)
def _create_regression_head(label_dimension, weight_column=None):
if label_dimension != 1:
raise ValueError('For now only 1 dimension regression is supported.'
'label_dimension given as {}'.format(label_dimension))
# pylint: disable=protected-access
return head_lib._regression_head(
label_dimension=label_dimension,
weight_column=weight_column,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
def _compute_feature_importances_per_tree(tree, num_features):
"""Computes the importance of each feature in the tree."""
importances = np.zeros(num_features)
for node in tree.nodes:
node_type = node.WhichOneof('node')
if node_type == 'bucketized_split':
feature_id = node.bucketized_split.feature_id
importances[feature_id] += node.metadata.gain
elif node_type == 'leaf':
assert node.metadata.gain == 0
else:
raise ValueError('Unexpected split type %s', node_type)
return importances
def _compute_feature_importances(tree_ensemble, num_features, normalize):
"""Computes gain-based feature importances.
The higher the value, the more important the feature.
Args:
tree_ensemble: a trained tree ensemble, instance of proto
boosted_trees.TreeEnsemble.
num_features: The total number of feature ids.
normalize: If True, normalize the feature importances.
Returns:
sorted_feature_idx: A list of feature_id which is sorted
by its feature importance.
feature_importances: A list of corresponding feature importances.
Raises:
AssertionError: When normalize = True, if feature importances
contain negative value, or if normalization is not possible
(e.g. ensemble is empty or trees contain only a root node).
"""
tree_importances = [_compute_feature_importances_per_tree(tree, num_features)
for tree in tree_ensemble.trees]
tree_importances = np.array(tree_importances)
tree_weights = np.array(tree_ensemble.tree_weights).reshape(-1, 1)
feature_importances = np.sum(tree_importances * tree_weights, axis=0)
if normalize:
assert np.all(feature_importances >= 0), ('feature_importances '
'must be non-negative.')
normalizer = np.sum(feature_importances)
assert normalizer > 0, 'Trees are all empty or contain only a root node.'
feature_importances /= normalizer
sorted_feature_idx = np.argsort(feature_importances)[::-1]
return sorted_feature_idx, feature_importances[sorted_feature_idx]
def _bt_explanations_fn(features,
head,
sorted_feature_columns,
name='boosted_trees'):
"""Gradient Boosted Trees predict with explanations model_fn.
Args:
features: dict of `Tensor`.
head: A `head_lib._Head` instance.
sorted_feature_columns: Sorted iterable of `feature_column._FeatureColumn`
model inputs.
name: Name used for the model.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
mode = model_fn_lib.ModeKeys.PREDICT
with ops.name_scope(name) as name:
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
logits = boosted_trees_ops.predict(
# For non-TRAIN mode, ensemble doesn't change after initialization,
# so no local copy is needed; using tree_ensemble directly.
tree_ensemble_handle=tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=None,
train_op_fn=control_flow_ops.no_op,
logits=logits)
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
estimator_spec.predictions[boosted_trees_utils._DEBUG_PROTO_KEY] = debug_op # pylint: disable=protected-access
return estimator_spec
class _BoostedTreesBase(estimator.Estimator):
"""Base class for boosted trees estimators.
This class is intended to keep tree-specific functions (E.g., methods for
feature importances and directional feature contributions) in one central
place.
It is not a valid (working) Estimator on its own and should only be used as a
base class.
"""
def __init__(self, model_fn, model_dir, config, feature_columns, head,
center_bias, is_classification):
"""Initializes a `_BoostedTreesBase` instance.
Args:
model_fn: model_fn: Model function. See base class for more detail.
model_dir: Directory to save model parameters, graph and etc. See base
class for more detail.
config: `estimator.RunConfig` configuration object.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`
head: A `head_lib._Head` instance.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
is_classification: If the estimator is for classification.
"""
super(_BoostedTreesBase, self).__init__(
model_fn=model_fn, model_dir=model_dir, config=config)
self._sorted_feature_columns = sorted(
feature_columns, key=lambda tc: tc.name)
self._head = head
self._n_features = _calculate_num_features(self._sorted_feature_columns)
self._names_for_feature_id = np.array(
_generate_feature_name_mapping(self._sorted_feature_columns))
self._center_bias = center_bias
self._is_classification = is_classification
def experimental_feature_importances(self, normalize=False):
"""Computes gain-based feature importances.
The higher the value, the more important the corresponding feature.
Args:
normalize: If True, normalize the feature importances.
Returns:
sorted_feature_names: 1-D array of feature name which is sorted
by its feature importance.
feature_importances: 1-D array of the corresponding feature importance.
Raises:
ValueError: When attempting to normalize on an empty ensemble
or an ensemble of trees which have no splits. Or when attempting
to normalize and feature importances have negative values.
"""
reader = checkpoint_utils.load_checkpoint(self._model_dir)
serialized = reader.get_tensor('boosted_trees:0_serialized')
if not serialized:
raise ValueError('Found empty serialized string for TreeEnsemble.'
'You should only call this method after training.')
ensemble_proto = boosted_trees_pb2.TreeEnsemble()
ensemble_proto.ParseFromString(serialized)
sorted_feature_id, importances = _compute_feature_importances(
ensemble_proto, self._n_features, normalize)
return self._names_for_feature_id[sorted_feature_id], importances
def experimental_predict_with_explanations(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Computes model explainability outputs per example along with predictions.
Currently supports directional feature contributions (DFCs). For each
instance, DFCs indicate the aggregate contribution of each feature. See
https://arxiv.org/abs/1312.1121 and
http://blog.datadive.net/interpreting-random-forests/ for more details.
Args:
input_fn: A function that provides input data for predicting as
minibatches. See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following: * A `tf.data.Dataset` object: Outputs of `Dataset`
object must be a tuple `(features, labels)` with same constraints as
below. * A tuple `(features, labels)`: Where `features` is a `tf.Tensor`
or a dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary, with the exception of 'bias' and 'dfc', which will
always be in the dictionary. If `None`, returns all keys in prediction
dict, as well as two new keys 'dfc' and 'bias'.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
Yields:
Evaluated values of `predictions` tensors. The `predictions` tensors will
contain at least two keys 'dfc' and 'bias' for model explanations. The
`dfc` value corresponds to the contribution of each feature to the overall
prediction for this instance (positive indicating that the feature makes
it more likely to select class 1 and negative less likely). The 'bias'
value will be the same across all the instances, corresponding to the
probability (classification) or prediction (regression) of the training
data distribution.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
if not self._center_bias:
raise ValueError('center_bias must be enabled during estimator '
'instantiation when using '
'experimental_predict_with_explanations.')
# pylint: disable=protected-access
if not self._is_classification:
identity_inverse_link_fn = self._head._inverse_link_fn in (None,
tf_identity)
# pylint:enable=protected-access
if not identity_inverse_link_fn:
raise ValueError(
'For now only identity inverse_link_fn in regression_head is '
'supported for experimental_predict_with_explanations.')
# pylint:disable=unused-argument
def new_model_fn(features, labels, mode):
return _bt_explanations_fn(features, self._head,
self._sorted_feature_columns)
# pylint:enable=unused-argument
est = estimator.Estimator(
model_fn=new_model_fn,
model_dir=self.model_dir,
config=self.config,
warm_start_from=self._warm_start_settings)
# Make sure bias and dfc will be in prediction dict.
user_supplied_predict_keys = predict_keys is not None
if user_supplied_predict_keys:
predict_keys = set(predict_keys)
predict_keys.add(boosted_trees_utils._DEBUG_PROTO_KEY)
predictions = est.predict(
input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=True)
for pred in predictions:
bias, dfcs = boosted_trees_utils._parse_explanations_from_prediction(
pred[boosted_trees_utils._DEBUG_PROTO_KEY], self._n_features,
self._is_classification)
pred['bias'] = bias
pred['dfc'] = dfcs
# Don't need to expose serialized proto to end user.
del pred[boosted_trees_utils._DEBUG_PROTO_KEY]
yield pred
# pylint: disable=protected-access
@estimator_export('estimator.BoostedTreesClassifier')
class BoostedTreesClassifier(_BoostedTreesBase):
"""A Classifier for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
n_classes=_HOLD_FOR_MULTI_CLASS_SUPPORT,
weight_column=None,
label_vocabulary=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False,
pruning_mode='none'):
"""Initializes a `BoostedTreesClassifier` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
# Need to see a large portion of the data before we can build a layer, for
# example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
classifier = estimator.BoostedTreesClassifier(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
classifier.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = classifier.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer. The total number of batches is total number of data divided by
batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Multiclass support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-
pruning (do not split a node if not enough gain is observed) and post
pruning (build the tree up to a max depth and then prune branches with
negative gain). For pre and post pruning, you MUST provide
tree_complexity >0.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Support multi-class cases.
if n_classes == _HOLD_FOR_MULTI_CLASS_SUPPORT:
n_classes = 2
head, closed_form = _create_classification_head_and_closed_form(
n_classes, weight_column, label_vocabulary=label_vocabulary)
# HParams for the model.
tree_hparams = _TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias, pruning_mode)
def _model_fn(features, labels, mode, config):
return _bt_model_fn(
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=closed_form)
super(BoostedTreesClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_columns=feature_columns,
head=head,
center_bias=center_bias,
is_classification=True)
@estimator_export('estimator.BoostedTreesRegressor')
class BoostedTreesRegressor(_BoostedTreesBase):
"""A Regressor for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
label_dimension=_HOLD_FOR_MULTI_DIM_SUPPORT,
weight_column=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False,
pruning_mode='none'):
"""Initializes a `BoostedTreesRegressor` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
# Need to see a large portion of the data before we can build a layer, for
# example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
regressor = estimator.BoostedTreesRegressor(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
regressor.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = regressor.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer. The total number of batches is total number of data divided by
batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example.
Multi-dimensional support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-
pruning (do not split a node if not enough gain is observed) and post
pruning (build the tree up to a max depth and then prune branches with
negative gain). For pre and post pruning, you MUST provide
tree_complexity >0.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Extend it to multi-dimension cases.
if label_dimension == _HOLD_FOR_MULTI_DIM_SUPPORT:
label_dimension = 1
head = _create_regression_head(label_dimension, weight_column)
# HParams for the model.
tree_hparams = _TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias, pruning_mode)
def _model_fn(features, labels, mode, config):
return _bt_model_fn(features, labels, mode, head, feature_columns,
tree_hparams, n_batches_per_layer, config)
super(BoostedTreesRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_columns=feature_columns,
head=head,
center_bias=center_bias,
is_classification=False)
# pylint: enable=protected-access
| {
"content_hash": "0a876fde6a74555680e57ecd561a795e",
"timestamp": "",
"source": "github",
"line_count": 1550,
"max_line_length": 115,
"avg_line_length": 42.42193548387097,
"alnum_prop": 0.6659518812543723,
"repo_name": "girving/tensorflow",
"id": "0278990cfc9bfcf18912886aa752da262e805573",
"size": "66443",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/canned/boosted_trees.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_con_glass_beaker.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "0f473ed998f3f8ce04f199d4bf2493c0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.6877076411960132,
"repo_name": "anhstudios/swganh",
"id": "b59cb16f6d794c80595d5a6c02a752ecd8945b71",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/item/shared_item_con_glass_beaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
weasyprint.tests.test_css_properties
------------------------------------
Test expanders for shorthand properties.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import math
from .testing_utils import assert_no_logs, capture_logs, almost_equal
from ..css import PARSER, preprocess_declarations
from ..css.properties import INITIAL_VALUES
from ..images import LinearGradient, RadialGradient
def expand_to_dict(css, expected_error=None):
"""Helper to test shorthand properties expander functions."""
declarations, errors = PARSER.parse_style_attr(css)
assert not errors
with capture_logs() as logs:
base_url = 'http://weasyprint.org/foo/'
declarations = list(preprocess_declarations(base_url, declarations))
if expected_error:
assert len(logs) == 1
assert expected_error in logs[0]
else:
assert not logs
return dict(
(name, value) for name, value, _priority in declarations
if value != 'initial')
def assert_invalid(css, message='invalid'):
assert expand_to_dict(css, message) == {}
@assert_no_logs
def test_not_print():
assert expand_to_dict(
'volume: 42', 'the property does not apply for the print media') == {}
@assert_no_logs
def test_function():
assert expand_to_dict('clip: rect(1px, 3em, auto, auto)') == {
'clip': [(1, 'px'), (3, 'em'), 'auto', 'auto']}
assert_invalid('clip: square(1px, 3em, auto, auto)')
assert_invalid('clip: rect(1px, 3em, auto auto)', 'invalid')
assert_invalid('clip: rect(1px, 3em, auto)')
assert_invalid('clip: rect(1px, 3em / auto)')
@assert_no_logs
def test_counters():
assert expand_to_dict('counter-reset: foo bar 2 baz') == {
'counter_reset': [('foo', 0), ('bar', 2), ('baz', 0)]}
assert expand_to_dict('counter-increment: foo bar 2 baz') == {
'counter_increment': [('foo', 1), ('bar', 2), ('baz', 1)]}
assert expand_to_dict('counter-reset: foo') == {
'counter_reset': [('foo', 0)]}
assert expand_to_dict('counter-reset: FoO') == {
'counter_reset': [('FoO', 0)]}
assert expand_to_dict('counter-increment: foo bAr 2 Bar') == {
'counter_increment': [('foo', 1), ('bAr', 2), ('Bar', 1)]}
assert expand_to_dict('counter-reset: none') == {
'counter_reset': []}
assert expand_to_dict(
'counter-reset: foo none', 'Invalid counter name') == {}
assert expand_to_dict(
'counter-reset: foo initial', 'Invalid counter name') == {}
assert_invalid('counter-reset: foo 3px')
assert_invalid('counter-reset: 3')
@assert_no_logs
def test_spacing():
assert expand_to_dict('letter-spacing: normal') == {
'letter_spacing': 'normal'}
assert expand_to_dict('letter-spacing: 3px') == {
'letter_spacing': (3, 'px')}
assert_invalid('letter-spacing: 3')
assert expand_to_dict(
'letter_spacing: normal', 'did you mean letter-spacing') == {}
assert expand_to_dict('word-spacing: normal') == {
'word_spacing': 'normal'}
assert expand_to_dict('word-spacing: 3px') == {
'word_spacing': (3, 'px')}
assert_invalid('word-spacing: 3')
@assert_no_logs
def test_decoration():
assert expand_to_dict('text-decoration: none') == {
'text_decoration': 'none'}
assert expand_to_dict('text-decoration: overline') == {
'text_decoration': frozenset(['overline'])}
# blink is accepted but ignored
assert expand_to_dict('text-decoration: overline blink line-through') == {
'text_decoration': frozenset(['line-through', 'overline'])}
@assert_no_logs
def test_size():
assert expand_to_dict('size: 200px') == {
'size': ((200, 'px'), (200, 'px'))}
assert expand_to_dict('size: 200px 300pt') == {
'size': ((200, 'px'), (300, 'pt'))}
assert expand_to_dict('size: auto') == {
'size': ((210, 'mm'), (297, 'mm'))}
assert expand_to_dict('size: portrait') == {
'size': ((210, 'mm'), (297, 'mm'))}
assert expand_to_dict('size: landscape') == {
'size': ((297, 'mm'), (210, 'mm'))}
assert expand_to_dict('size: A3 portrait') == {
'size': ((297, 'mm'), (420, 'mm'))}
assert expand_to_dict('size: A3 landscape') == {
'size': ((420, 'mm'), (297, 'mm'))}
assert expand_to_dict('size: portrait A3') == {
'size': ((297, 'mm'), (420, 'mm'))}
assert expand_to_dict('size: landscape A3') == {
'size': ((420, 'mm'), (297, 'mm'))}
assert_invalid('size: A3 landscape A3')
assert_invalid('size: A9')
assert_invalid('size: foo')
assert_invalid('size: foo bar')
assert_invalid('size: 20%')
@assert_no_logs
def test_transforms():
assert expand_to_dict('transform: none') == {
'transform': []}
assert expand_to_dict(
'transform: translate(6px) rotate(90deg)'
) == {'transform': [('translate', ((6, 'px'), (0, 'px'))),
('rotate', math.pi / 2)]}
assert expand_to_dict(
'transform: translate(-4px, 0)'
) == {'transform': [('translate', ((-4, 'px'), (0, None)))]}
assert expand_to_dict(
'transform: translate(6px, 20%)'
) == {'transform': [('translate', ((6, 'px'), (20, '%')))]}
assert expand_to_dict(
'transform: scale(2)'
) == {'transform': [('scale', (2, 2))]}
assert_invalid('transform: translate(6px 20%)') # missing comma
assert_invalid('transform: lipsumize(6px)')
assert_invalid('transform: foo')
assert_invalid('transform: scale(2) foo')
assert_invalid('transform: 6px')
assert_invalid('-weasy-transform: none',
'the property was unprefixed, use transform')
@assert_no_logs
def test_expand_four_sides():
"""Test the 4-value properties."""
assert expand_to_dict('margin: inherit') == {
'margin_top': 'inherit',
'margin_right': 'inherit',
'margin_bottom': 'inherit',
'margin_left': 'inherit',
}
assert expand_to_dict('margin: 1em') == {
'margin_top': (1, 'em'),
'margin_right': (1, 'em'),
'margin_bottom': (1, 'em'),
'margin_left': (1, 'em'),
}
assert expand_to_dict('margin: -1em auto 20%') == {
'margin_top': (-1, 'em'),
'margin_right': 'auto',
'margin_bottom': (20, '%'),
'margin_left': 'auto',
}
assert expand_to_dict('padding: 1em 0') == {
'padding_top': (1, 'em'),
'padding_right': (0, None),
'padding_bottom': (1, 'em'),
'padding_left': (0, None),
}
assert expand_to_dict('padding: 1em 0 2%') == {
'padding_top': (1, 'em'),
'padding_right': (0, None),
'padding_bottom': (2, '%'),
'padding_left': (0, None),
}
assert expand_to_dict('padding: 1em 0 2em 5px') == {
'padding_top': (1, 'em'),
'padding_right': (0, None),
'padding_bottom': (2, 'em'),
'padding_left': (5, 'px'),
}
assert expand_to_dict(
'padding: 1 2 3 4 5',
'Expected 1 to 4 token components got 5') == {}
assert_invalid('margin: rgb(0, 0, 0)')
assert_invalid('padding: auto')
assert_invalid('padding: -12px')
assert_invalid('border-width: -3em')
assert_invalid('border-width: 12%')
@assert_no_logs
def test_expand_borders():
"""Test the ``border`` property."""
assert expand_to_dict('border-top: 3px dotted red') == {
'border_top_width': (3, 'px'),
'border_top_style': 'dotted',
'border_top_color': (1, 0, 0, 1), # red
}
assert expand_to_dict('border-top: 3px dotted') == {
'border_top_width': (3, 'px'),
'border_top_style': 'dotted',
}
assert expand_to_dict('border-top: 3px red') == {
'border_top_width': (3, 'px'),
'border_top_color': (1, 0, 0, 1), # red
}
assert expand_to_dict('border-top: solid') == {
'border_top_style': 'solid',
}
assert expand_to_dict('border: 6px dashed lime') == {
'border_top_width': (6, 'px'),
'border_top_style': 'dashed',
'border_top_color': (0, 1, 0, 1), # lime
'border_left_width': (6, 'px'),
'border_left_style': 'dashed',
'border_left_color': (0, 1, 0, 1), # lime
'border_bottom_width': (6, 'px'),
'border_bottom_style': 'dashed',
'border_bottom_color': (0, 1, 0, 1), # lime
'border_right_width': (6, 'px'),
'border_right_style': 'dashed',
'border_right_color': (0, 1, 0, 1), # lime
}
assert_invalid('border: 6px dashed left')
@assert_no_logs
def test_expand_list_style():
"""Test the ``list_style`` property."""
assert expand_to_dict('list-style: inherit') == {
'list_style_position': 'inherit',
'list_style_image': 'inherit',
'list_style_type': 'inherit',
}
assert expand_to_dict('list-style: url(../bar/lipsum.png)') == {
'list_style_image': ('url', 'http://weasyprint.org/bar/lipsum.png'),
}
assert expand_to_dict('list-style: square') == {
'list_style_type': 'square',
}
assert expand_to_dict('list-style: circle inside') == {
'list_style_position': 'inside',
'list_style_type': 'circle',
}
assert expand_to_dict('list-style: none circle inside') == {
'list_style_position': 'inside',
'list_style_image': ('none', None),
'list_style_type': 'circle',
}
assert expand_to_dict('list-style: none inside none') == {
'list_style_position': 'inside',
'list_style_image': ('none', None),
'list_style_type': 'none',
}
assert_invalid('list-style: none inside none none')
assert_invalid('list-style: red')
assert_invalid('list-style: circle disc',
'got multiple type values in a list-style shorthand')
def assert_background(css, **expected):
"""Helper checking the background properties."""
expanded = expand_to_dict('background: ' + css)
assert expanded.pop('background_color') == expected.pop(
'background_color', INITIAL_VALUES['background_color'])
nb_layers = len(expanded['background_image'])
for name, value in expected.items():
assert expanded.pop(name) == value
for name, value in expanded.items():
assert value == INITIAL_VALUES[name] * nb_layers
@assert_no_logs
def test_expand_background():
"""Test the ``background`` property."""
assert_background('red', background_color=(1, 0, 0, 1))
assert_background(
'url(lipsum.png)',
background_image=[('url', 'http://weasyprint.org/foo/lipsum.png')])
assert_background(
'no-repeat',
background_repeat=[('no-repeat', 'no-repeat')])
assert_background('fixed', background_attachment=['fixed'])
assert_background(
'repeat no-repeat fixed',
background_repeat=[('repeat', 'no-repeat')],
background_attachment=['fixed'])
assert_background(
'top',
background_position=[('left', (50, '%'), 'top', (0, '%'))])
assert_background(
'top right',
background_position=[('left', (100, '%'), 'top', (0, '%'))])
assert_background(
'top right 20px',
background_position=[('right', (20, 'px'), 'top', (0, '%'))])
assert_background(
'top 1% right 20px',
background_position=[('right', (20, 'px'), 'top', (1, '%'))])
assert_background(
'top no-repeat',
background_repeat=[('no-repeat', 'no-repeat')],
background_position=[('left', (50, '%'), 'top', (0, '%'))])
assert_background(
'top right no-repeat',
background_repeat=[('no-repeat', 'no-repeat')],
background_position=[('left', (100, '%'), 'top', (0, '%'))])
assert_background(
'top right 20px no-repeat',
background_repeat=[('no-repeat', 'no-repeat')],
background_position=[('right', (20, 'px'), 'top', (0, '%'))])
assert_background(
'top 1% right 20px no-repeat',
background_repeat=[('no-repeat', 'no-repeat')],
background_position=[('right', (20, 'px'), 'top', (1, '%'))])
assert_background(
'url(bar) #f00 repeat-y center left fixed',
background_color=(1, 0, 0, 1),
background_image=[('url', 'http://weasyprint.org/foo/bar')],
background_repeat=[('no-repeat', 'repeat')],
background_attachment=['fixed'],
background_position=[('left', (0, '%'), 'top', (50, '%'))])
assert_background(
'#00f 10% 200px',
background_color=(0, 0, 1, 1),
background_position=[('left', (10, '%'), 'top', (200, 'px'))])
assert_background(
'right 78px fixed',
background_attachment=['fixed'],
background_position=[('left', (100, '%'), 'top', (78, 'px'))])
assert_background(
'center / cover red',
background_size=['cover'],
background_position=[('left', (50, '%'), 'top', (50, '%'))],
background_color=(1, 0, 0, 1))
assert_background(
'center / auto red',
background_size=[('auto', 'auto')],
background_position=[('left', (50, '%'), 'top', (50, '%'))],
background_color=(1, 0, 0, 1))
assert_background(
'center / 42px',
background_size=[((42, 'px'), 'auto')],
background_position=[('left', (50, '%'), 'top', (50, '%'))])
assert_background(
'center / 7% 4em',
background_size=[((7, '%'), (4, 'em'))],
background_position=[('left', (50, '%'), 'top', (50, '%'))])
assert_background(
'red content-box',
background_color=(1, 0, 0, 1),
background_origin=['content-box'],
background_clip=['content-box'])
assert_background(
'red border-box content-box',
background_color=(1, 0, 0, 1),
background_origin=['border-box'],
background_clip=['content-box'])
assert_background(
'url(bar) center, no-repeat',
background_color=(0, 0, 0, 0),
background_image=[('url', 'http://weasyprint.org/foo/bar'),
('none', None)],
background_position=[('left', (50, '%'), 'top', (50, '%')),
('left', (0, '%'), 'top', (0, '%'))],
background_repeat=[('repeat', 'repeat'), ('no-repeat', 'no-repeat')])
assert_invalid('background: 10px lipsum')
assert_invalid('background-position: 10px lipsum')
assert_invalid('background: content-box red content-box')
assert_invalid('background-image: inexistent-gradient(blue, green)')
# Color must be in the last layer:
assert_invalid('background: red, url(foo)')
@assert_no_logs
def test_expand_background_position():
"""Test the ``background-position`` property."""
def position(css, *expected):
[(name, [value])] = expand_to_dict(
'background-position:' + css).items()
assert name == 'background_position'
assert value == expected
for css_x, val_x in [
('left', (0, '%')), ('center', (50, '%')), ('right', (100, '%')),
('4.5%', (4.5, '%')), ('12px', (12, 'px'))
]:
for css_y, val_y in [
('top', (0, '%')), ('center', (50, '%')), ('bottom', (100, '%')),
('7%', (7, '%')), ('1.5px', (1.5, 'px'))
]:
# Two tokens:
position('%s %s' % (css_x, css_y), 'left', val_x, 'top', val_y)
# One token:
position(css_x, 'left', val_x, 'top', (50, '%'))
# One token, vertical
position('top', 'left', (50, '%'), 'top', (0, '%'))
position('bottom', 'left', (50, '%'), 'top', (100, '%'))
# Three tokens:
position('center top 10%', 'left', (50, '%'), 'top', (10, '%'))
position('top 10% center', 'left', (50, '%'), 'top', (10, '%'))
position('center bottom 10%', 'left', (50, '%'), 'bottom', (10, '%'))
position('bottom 10% center', 'left', (50, '%'), 'bottom', (10, '%'))
position('right top 10%', 'right', (0, '%'), 'top', (10, '%'))
position('top 10% right', 'right', (0, '%'), 'top', (10, '%'))
position('right bottom 10%', 'right', (0, '%'), 'bottom', (10, '%'))
position('bottom 10% right', 'right', (0, '%'), 'bottom', (10, '%'))
position('center left 10%', 'left', (10, '%'), 'top', (50, '%'))
position('left 10% center', 'left', (10, '%'), 'top', (50, '%'))
position('center right 10%', 'right', (10, '%'), 'top', (50, '%'))
position('right 10% center', 'right', (10, '%'), 'top', (50, '%'))
position('bottom left 10%', 'left', (10, '%'), 'bottom', (0, '%'))
position('left 10% bottom', 'left', (10, '%'), 'bottom', (0, '%'))
position('bottom right 10%', 'right', (10, '%'), 'bottom', (0, '%'))
position('right 10% bottom', 'right', (10, '%'), 'bottom', (0, '%'))
# Four tokens:
position('left 10% bottom 3px', 'left', (10, '%'), 'bottom', (3, 'px'))
position('bottom 3px left 10%', 'left', (10, '%'), 'bottom', (3, 'px'))
position('right 10% top 3px', 'right', (10, '%'), 'top', (3, 'px'))
position('top 3px right 10%', 'right', (10, '%'), 'top', (3, 'px'))
assert_invalid('background-position: left center 3px')
assert_invalid('background-position: 3px left')
assert_invalid('background-position: bottom 4%')
assert_invalid('background-position: bottom top')
@assert_no_logs
def test_font():
"""Test the ``font`` property."""
assert expand_to_dict('font: 12px My Fancy Font, serif') == {
'font_size': (12, 'px'),
'font_family': ['My Fancy Font', 'serif'],
}
assert expand_to_dict('font: small/1.2 "Some Font", serif') == {
'font_size': 'small',
'line_height': (1.2, None),
'font_family': ['Some Font', 'serif'],
}
assert expand_to_dict('font: small-caps italic 700 large serif') == {
'font_style': 'italic',
'font_variant': 'small-caps',
'font_weight': 700,
'font_size': 'large',
'font_family': ['serif'],
}
assert expand_to_dict(
'font: small-caps condensed normal 700 large serif'
) == {
# 'font_style': 'normal', XXX shouldn’t this be here?
'font_stretch': 'condensed',
'font_variant': 'small-caps',
'font_weight': 700,
'font_size': 'large',
'font_family': ['serif'],
}
assert_invalid('font-family: "My" Font, serif')
assert_invalid('font-family: "My" "Font", serif')
assert_invalid('font-family: "My", 12pt, serif')
assert_invalid('font: menu', 'System fonts are not supported')
assert_invalid('font: 12deg My Fancy Font, serif')
assert_invalid('font: 12px')
assert_invalid('font: 12px/foo serif')
assert_invalid('font: 12px "Invalid" family')
@assert_no_logs
def test_line_height():
"""Test the ``line-height`` property."""
assert expand_to_dict('line-height: 1px') == {'line_height': (1, 'px')}
assert expand_to_dict('line-height: 1.1%') == {'line_height': (1.1, '%')}
assert expand_to_dict('line-height: 1em') == {'line_height': (1, 'em')}
assert expand_to_dict('line-height: 1') == {'line_height': (1, None)}
assert expand_to_dict('line-height: 1.3') == {'line_height': (1.3, None)}
assert expand_to_dict('line-height: -0') == {'line_height': (0, None)}
assert expand_to_dict('line-height: 0px') == {'line_height': (0, 'px')}
assert_invalid('line-height: 1deg')
assert_invalid('line-height: -1px')
assert_invalid('line-height: -1')
assert_invalid('line-height: -0.5%')
assert_invalid('line-height: 1px 1px')
@assert_no_logs
def test_string_set():
"""Test the ``string-set`` property."""
assert expand_to_dict('-weasy-string-set: test content(text)') == {
'string_set': [('test', [('content', 'text')])]}
assert expand_to_dict('-weasy-string-set: test content(before)') == {
'string_set': [('test', [('content', 'before')])]}
assert expand_to_dict('-weasy-string-set: test "string"') == {
'string_set': [('test', [('STRING', 'string')])]}
assert expand_to_dict(
'-weasy-string-set: test1 "string", test2 "string"') == {
'string_set': [
('test1', [('STRING', 'string')]),
('test2', [('STRING', 'string')])]}
assert expand_to_dict('-weasy-string-set: test attr(class)') == {
'string_set': [('test', [('attr', 'class')])]}
assert expand_to_dict('-weasy-string-set: test counter(count)') == {
'string_set': [('test', [('counter', ['count', 'decimal'])])]}
assert expand_to_dict(
'-weasy-string-set: test counter(count, upper-roman)') == {
'string_set': [('test', [('counter', ['count', 'upper-roman'])])]}
assert expand_to_dict('-weasy-string-set: test counters(count, ".")') == {
'string_set': [('test', [('counters', ['count', '.', 'decimal'])])]}
assert expand_to_dict(
'-weasy-string-set: test counters(count, ".", upper-roman)') == {
'string_set': [
('test', [('counters', ['count', '.', 'upper-roman'])])]}
assert expand_to_dict(
'-weasy-string-set: test content(text) "string" '
'attr(title) attr(title) counter(count)') == {
'string_set': [('test', [
('content', 'text'), ('STRING', 'string'),
('attr', 'title'), ('attr', 'title'),
('counter', ['count', 'decimal'])])]}
assert_invalid('-weasy-string-set: test')
assert_invalid('-weasy-string-set: test test1')
assert_invalid('-weasy-string-set: test content(test)')
assert_invalid('-weasy-string-set: test unknown()')
assert_invalid('-weasy-string-set: test attr(id, class)')
@assert_no_logs
def test_linear_gradient():
red = (1, 0, 0, 1)
lime = (0, 1, 0, 1)
blue = (0, 0, 1, 1)
pi = math.pi
def gradient(css, direction, colors=[blue], stop_positions=[None]):
for repeating, prefix in ((False, ''), (True, 'repeating-')):
expanded = expand_to_dict(
'background-image: %slinear-gradient(%s)' % (prefix, css))
[(_, [(type_, image)])] = expanded.items()
assert type_ == 'linear-gradient'
assert isinstance(image, LinearGradient)
assert image.repeating == repeating
assert almost_equal((image.direction_type, image.direction),
direction)
assert almost_equal(image.colors, colors)
assert almost_equal(image.stop_positions, stop_positions)
def invalid(css):
assert_invalid('background-image: linear-gradient(%s)' % css)
assert_invalid('background-image: repeating-linear-gradient(%s)' % css)
invalid(' ')
invalid('1% blue')
invalid('blue 10deg')
invalid('blue 4')
invalid('soylent-green 4px')
invalid('red 4px 2px')
gradient('blue', ('angle', pi))
gradient('red', ('angle', pi), [red], [None])
gradient('blue 1%, lime,red 2em ', ('angle', pi),
[blue, lime, red], [(1, '%'), None, (2, 'em')])
invalid('18deg')
gradient('18deg, blue', ('angle', pi / 10))
gradient('4rad, blue', ('angle', 4))
gradient('.25turn, blue', ('angle', pi / 2))
gradient('100grad, blue', ('angle', pi / 2))
gradient('12rad, blue 1%, lime,red 2em ', ('angle', 12),
[blue, lime, red], [(1, '%'), None, (2, 'em')])
invalid('10arc-minutes, blue')
invalid('10px, blue')
invalid('to 90deg, blue')
gradient('to top, blue', ('angle', 0))
gradient('to right, blue', ('angle', pi / 2))
gradient('to bottom, blue', ('angle', pi))
gradient('to left, blue', ('angle', pi * 3 / 2))
gradient('to right, blue 1%, lime,red 2em ', ('angle', pi / 2),
[blue, lime, red], [(1, '%'), None, (2, 'em')])
invalid('to the top, blue')
invalid('to up, blue')
invalid('into top, blue')
invalid('top, blue')
gradient('to top left, blue', ('corner', 'top_left'))
gradient('to left top, blue', ('corner', 'top_left'))
gradient('to top right, blue', ('corner', 'top_right'))
gradient('to right top, blue', ('corner', 'top_right'))
gradient('to bottom left, blue', ('corner', 'bottom_left'))
gradient('to left bottom, blue', ('corner', 'bottom_left'))
gradient('to bottom right, blue', ('corner', 'bottom_right'))
gradient('to right bottom, blue', ('corner', 'bottom_right'))
invalid('to bottom up, blue')
invalid('bottom left, blue')
@assert_no_logs
def test_overflow_wrap():
assert expand_to_dict('overflow-wrap: normal') == {
'overflow_wrap': 'normal'}
assert expand_to_dict('overflow-wrap: break-word') == {
'overflow_wrap': 'break-word'}
assert_invalid('overflow-wrap: none')
assert_invalid('overflow-wrap: normal, break-word')
@assert_no_logs
def test_expand_word_wrap():
assert expand_to_dict('word-wrap: normal') == {
'overflow_wrap': 'normal'}
assert expand_to_dict('word-wrap: break-word') == {
'overflow_wrap': 'break-word'}
assert_invalid('word-wrap: none')
assert_invalid('word-wrap: normal, break-word')
@assert_no_logs
def test_radial_gradient():
red = (1, 0, 0, 1)
lime = (0, 1, 0, 1)
blue = (0, 0, 1, 1)
def gradient(css, shape='ellipse', size=('keyword', 'farthest-corner'),
center=('left', (50, '%'), 'top', (50, '%')),
colors=[blue], stop_positions=[None]):
for repeating, prefix in ((False, ''), (True, 'repeating-')):
expanded = expand_to_dict(
'background-image: %sradial-gradient(%s)' % (prefix, css))
[(_, [(type_, image)])] = expanded.items()
assert type_ == 'radial-gradient'
assert isinstance(image, RadialGradient)
assert image.repeating == repeating
assert image.shape == shape
assert almost_equal((image.size_type, image.size), size)
assert almost_equal(image.center, center)
assert almost_equal(image.colors, colors)
assert almost_equal(image.stop_positions, stop_positions)
def invalid(css):
assert_invalid('background-image: radial-gradient(%s)' % css)
assert_invalid('background-image: repeating-radial-gradient(%s)' % css)
invalid(' ')
invalid('1% blue')
invalid('blue 10deg')
invalid('blue 4')
invalid('soylent-green 4px')
invalid('red 4px 2px')
gradient('blue')
gradient('red', colors=[red])
gradient('blue 1%, lime,red 2em ', colors=[blue, lime, red],
stop_positions=[(1, '%'), None, (2, 'em')])
gradient('circle, blue', 'circle')
gradient('ellipse, blue', 'ellipse')
invalid('circle')
invalid('square, blue')
invalid('closest-triangle, blue')
invalid('center, blue')
gradient('ellipse closest-corner, blue',
'ellipse', ('keyword', 'closest-corner'))
gradient('circle closest-side, blue',
'circle', ('keyword', 'closest-side'))
gradient('farthest-corner circle, blue',
'circle', ('keyword', 'farthest-corner'))
gradient('farthest-side, blue',
'ellipse', ('keyword', 'farthest-side'))
gradient('5ch, blue',
'circle', ('explicit', ((5, 'ch'), (5, 'ch'))))
gradient('5ch circle, blue',
'circle', ('explicit', ((5, 'ch'), (5, 'ch'))))
gradient('circle 5ch, blue',
'circle', ('explicit', ((5, 'ch'), (5, 'ch'))))
invalid('ellipse 5ch')
invalid('5ch ellipse')
gradient('10px 50px, blue',
'ellipse', ('explicit', ((10, 'px'), (50, 'px'))))
gradient('10px 50px ellipse, blue',
'ellipse', ('explicit', ((10, 'px'), (50, 'px'))))
gradient('ellipse 10px 50px, blue',
'ellipse', ('explicit', ((10, 'px'), (50, 'px'))))
invalid('circle 10px 50px, blue')
invalid('10px 50px circle, blue')
invalid('10%, blue')
invalid('10% circle, blue')
invalid('circle 10%, blue')
gradient('10px 50px, blue',
'ellipse', ('explicit', ((10, 'px'), (50, 'px'))))
invalid('at appex, blue')
gradient('at top 10% right, blue',
center=('right', (0, '%'), 'top', (10, '%')))
gradient('circle at bottom, blue', shape='circle',
center=('left', (50, '%'), 'top', (100, '%')))
gradient('circle at 10px, blue', shape='circle',
center=('left', (10, 'px'), 'top', (50, '%')))
gradient('closest-side circle at right 5em, blue',
shape='circle', size=('keyword', 'closest-side'),
center=('left', (100, '%'), 'top', (5, 'em')))
| {
"content_hash": "5ba6f4fa698c5e497e3aabf53a663387",
"timestamp": "",
"source": "github",
"line_count": 722,
"max_line_length": 79,
"avg_line_length": 39.677285318559555,
"alnum_prop": 0.549167452089224,
"repo_name": "marclaporte/WeasyPrint",
"id": "c35f79e757db549b84a43fdce1e25c6dac554e95",
"size": "28664",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "weasyprint/tests/test_css_validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31587"
},
{
"name": "HTML",
"bytes": "18676"
},
{
"name": "Python",
"bytes": "986854"
}
],
"symlink_target": ""
} |
class beautyConsole:
"""This class defines properties and methods to manipulate console output"""
# Black 0;30 Dark Gray 1;30
# Blue 0;34 Light Blue 1;34
# Green 0;32 Light Green 1;32
# Cyan 0;36 Light Cyan 1;36
# Red 0;31 Light Red 1;31
# Purple 0;35 Light Purple 1;35
# Brown 0;33 Yellow 1;33
# Light Gray 0;37 White 1;37
colors = {
"black": '\33[30m',
"white": '\33[37m',
"red": '\33[31m',
"green": '\33[32m',
"yellow": '\33[33m',
"blue": '\33[34m',
"magenta": '\33[35m',
"cyan": '\33[36m',
"grey": '\33[90m',
"lightgrey": '\33[37m',
"lightblue": '\33[36'
}
characters = {
"endline": '\33[0m'
}
def __init__(self):
return None
@staticmethod
def getColor(color_name):
"""returns color identified by color_name or white as default value"""
if color_name in beautyConsole.colors:
return beautyConsole.colors[color_name]
return beautyConsole.colors["white"]
@staticmethod
def getSpecialChar(char_name):
"""returns special character identified by char_name"""
if char_name in beautyConsole.characters:
return beautyConsole.characters[char_name]
return ""
efMsgFound = "exploitable function call"
eKeyWordFound = "keyword with possibly critical meaning in code"
efMsgGlobalFound = "global variable explicit call"
fiMsgFound = "file include pattern found; potential LFI/RFI detected"
eReflFound = "reflected property found; check for XSS"
| {
"content_hash": "fdc92a85b97b86b8c08469789f0c8dbb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 33.705882352941174,
"alnum_prop": 0.5607911576497964,
"repo_name": "bl4de/security-tools",
"id": "603135fd038fe4617380a804100a75504b11213a",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodestructor/imports/beautyConsole.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1386"
},
{
"name": "JavaScript",
"bytes": "217"
},
{
"name": "PHP",
"bytes": "223"
},
{
"name": "Python",
"bytes": "105700"
},
{
"name": "Shell",
"bytes": "49415"
}
],
"symlink_target": ""
} |
import pymel.core as pm
import System.utils as utils
reload(utils)
from functools import partial
class ControlModule:
def __init__(self, _moduleNamespace):
# Class variables
self.moduleContainer = None
self.blueprintNamespace = ''
self.moduleNamespace = ''
self.characterNamespace = ''
self.publishedNames = []
if _moduleNamespace == None:
return
# Break down namespace info for easy access
moduleNamespaceInfo = utils.StripAllNamespaces(_moduleNamespace)
self.blueprintNamespace = moduleNamespaceInfo[0]
self.moduleNamespace = moduleNamespaceInfo[1]
self.characterNamespace = utils.StripLeadingNamespace(self.blueprintNamespace)[0]
self.moduleContainer = "%s:%s:module_container" %(self.blueprintNamespace, self.moduleNamespace)
# DERIVED CLASS METHODS
def Install_custom(self, _joints, _moduleGrp, _moduleContainer):
print "Install_custom() method not implemented bt derived module"
def CompatibleBlueprintModules(self):
return ("-1")
def UI(self, _parentLayout):
print "No custom user interface provided"
def UI_preferences(self, _parentLayout):
print "No custom user interface provided"
def Match(self, *args):
print "No matching functionality provided"
# BASE CLASS METHODS
def Install(self):
nodes = self.Install_init()
joints = nodes[0]
moduleGrp = nodes[1]
moduleContainer = nodes[2]
self.Install_custom(joints, moduleGrp, moduleContainer)
self.Install_finalize()
def Install_init(self):
pm.namespace(setNamespace = self.blueprintNamespace)
pm.namespace(add = self.moduleNamespace)
pm.namespace(setNamespace = ":")
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
container = [characterContainer, blueprintContainer]
for c in container:
pm.lockNode(c, lock = False, lockUnpublished = False)
self.joints = self.DuplicateAndRenameCreationPose()
moduleJointsGrp = self.joints[0]
moduleGrp = pm.group(empty = True, name = "%s:%s:module_grp" %(self.blueprintNamespace, self.moduleNamespace))
hookIn = "%s:HOOK_IN" %self.blueprintNamespace
pm.parent(moduleGrp, hookIn, relative = True)
pm.parent(moduleJointsGrp, moduleGrp, absolute = True)
pm.select(moduleGrp, replace = True)
pm.addAttr(attributeType = "float", longName = "iconScale", minValue = 0.001, softMaxValue = 10.0, defaultValue = 1.0, keyable = True)
pm.setAttr("%s.overrideEnabled" %moduleGrp, 1)
pm.setAttr("%s.overrideColor" %moduleGrp, 6)
utilityNodes = self.SetupBlueprintWeightBasedBlending()
self.SetupModuleVisibility(moduleGrp)
containedNodes = []
containedNodes.extend(self.joints)
containedNodes.append(moduleGrp)
containedNodes.extend(utilityNodes)
self.moduleContainer = pm.container(name = self.moduleContainer)
utils.AddNodeToContainer(self.moduleContainer, containedNodes, True)
utils.AddNodeToContainer(blueprintContainer, self.moduleContainer)
index = 0
for joint in self.joints:
if index > 0:
niceJointName = utils.StripAllNamespaces(joint)[1]
self.PublishNameToModuleContainer("%s.rotate" %joint, "%s_R" %niceJointName, False)
index += 1
self.PublishNameToModuleContainer("%s.levelOfDetail" %moduleGrp, "Control_LOD")
self.PublishNameToModuleContainer("%s.iconScale" %moduleGrp, "Icon_Scale")
self.PublishNameToModuleContainer("%s.overrideColor" %moduleGrp, "Icon_Color")
self.PublishNameToModuleContainer("%s.visibility" %moduleGrp, "Visibility", False)
return (self.joints, moduleGrp, self.moduleContainer)
def Install_finalize(self):
self.PublishModuleContainerNamesToOuterContainers()
pm.setAttr("%s:blueprint_joints_grp.controlModulesInstalled" %self.blueprintNamespace, True)
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
containers = [characterContainer, blueprintContainer, self.moduleContainer]
for c in containers:
pm.lockNode(c, lock = True, lockUnpublished = True)
def DuplicateAndRenameCreationPose(self):
joints = pm.duplicate("%s:creationPose_joints_grp" %self.blueprintNamespace, renameChildren = True)
pm.select(joints, hierarchy = True)
joints = pm.ls(selection = True)
for i in range(len(joints)):
nameSuffix = joints[i].rpartition("creationPose_")[2]
joints[i] = pm.rename(joints[i], "%s:%s:%s" %(self.blueprintNamespace, self.moduleNamespace, nameSuffix))
return joints
def SetupBlueprintWeightBasedBlending(self):
settingsLocator = "%s:SETTINGS" %self.blueprintNamespace
attributes = pm.listAttr(settingsLocator, keyable = False)
weightAttributes = []
for attr in attributes:
if attr.find("_weight") != -1:
weightAttributes.append(attr)
value = 0
if len(weightAttributes) == 0:
value = 1
pm.setAttr("%s.creationPoseWeight" %settingsLocator, 0)
pm.select(settingsLocator, replace = True)
weightAttributeName = "%s_weight" %self.moduleNamespace
pm.addAttr(longName = weightAttributeName, attributeType = "double", minValue = 0, maxValue = 1, defaultValue = value, keyable = False)
pm.container("%s:module_container" %self.blueprintNamespace, edit = True, publishAndBind = ["%s.%s" %(settingsLocator, weightAttributeName), weightAttributeName])
currentEntries = pm.attributeQuery("activeModule", node = settingsLocator, listEnum = True)
newEntry = self.moduleNamespace
if currentEntries[0] == "None":
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = newEntry)
pm.setAttr("%s.activeModule" %settingsLocator, 0)
else:
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = "%s:%s" %(currentEntries[0], newEntry))
utilityNodes = []
for i in range(1, len(self.joints)):
joint = self.joints[i]
nameSuffix = utils.StripAllNamespaces(joint)[1]
blueprintJoint = "%s:blueprint_%s" %(self.blueprintNamespace, nameSuffix)
weightNodeAttr = "%s.%s" %(settingsLocator, weightAttributeName)
if i < len(self.joints) - 1 or len(self.joints) == 2:
multiplyRotations = pm.shadingNode("multiplyDivide", name = "%s_multiplyRotationsWeight" %joint, asUtility = True)
utilityNodes.append(multiplyRotations)
pm.connectAttr("%s.rotate" %joint, "%s.input1" %multiplyRotations, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyRotations, attr), force = True)
index = utils.FindFirstFreeConnection("%s_addRotations.input3D" %blueprintJoint)
pm.connectAttr("%s.output" %multiplyRotations, "%s_addRotations.input3D[%d]" %(blueprintJoint, index), force = True)
if i == 1:
addNode = "%s_addTranslate" %blueprintJoint
if pm.objExists(addNode):
multiplyTranslation = pm.shadingNode("multiplyDivide", name = "%s_multiplyTranslationWeight" %joint, asUtility = True)
utilityNodes.append(multiplyTranslation)
pm.connectAttr("%s.translate" %joint, "%s.input1" %multiplyTranslation, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyTranslation, attr), force = True)
index = utils.FindFirstFreeConnection("%s.input3D" %addNode)
pm.connectAttr("%s.output" %multiplyTranslation, "%s.input3D[%d]" %(addNode, index), force = True)
addNode = "%s_addScale" %blueprintJoint
if pm.objExists(addNode):
multiplyScale = pm.shadingNode("multiplyDivide", name = "%s_multiplyScaleWeight" %joint, asUtility = True)
utilityNodes.append(multiplyScale)
pm.connectAttr("%s.scale" %joint, "%s.input1" %multiplyScale, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyScale, attr), force = True)
index = utils.FindFirstFreeConnection("%s.input3D" %addNode)
pm.connectAttr("%s.output" %multiplyScale, "%s.input3D[%d]" %(addNode, index), force = True)
else:
multiplyTranslation = pm.shadingNode("multiplyDivide", name = "%s_multiplyTranslationWeight" %joint, asUtility = True)
utilityNodes.append(multiplyTranslation)
pm.connectAttr("%s.translateX" %joint, "%s.input1X" %multiplyTranslation, force = True)
pm.connectAttr(weightNodeAttr, "%s.input2X" %multiplyTranslation, force = True)
addNode = "%s_addTx" %blueprintJoint
index = utils.FindFirstFreeConnection("%s.input1D" %addNode)
pm.connectAttr("%s.outputX" %multiplyTranslation, "%s.input1D[%d]" %(addNode, index), force = True)
return utilityNodes
def SetupModuleVisibility(self, _moduleGrp):
pm.select(_moduleGrp, replace = True)
pm.addAttr(attributeType = "byte", defaultValue = 1, minValue = 0, softMaxValue = 3, longName = "levelOfDetail", keyable = True)
moduleVisibilityMultiply = "%s:moduleVisibilityMultiply" %self.characterNamespace
pm.connectAttr("%s.outputX" %moduleVisibilityMultiply, "%s.visibility" %_moduleGrp, force = True)
def PublishNameToModuleContainer(self, _attribute, _attributeNiceName, _publishToOuterContainer = True):
if self.moduleContainer == None:
return
blueprintName = utils.StripLeadingNamespace(self.blueprintNamespace)[1].partition("__")[2]
attributePrefix = "%s_%s_" %(blueprintName, self.moduleNamespace)
publishedName = "%s%s" %(attributePrefix, _attributeNiceName)
if _publishToOuterContainer:
self.publishedNames.append(publishedName)
pm.container(self.moduleContainer, edit = True, publishAndBind = [_attribute, publishedName])
def PublishModuleContainerNamesToOuterContainers(self):
if self.moduleContainer == None:
return
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
for publishedNames in self.publishedNames:
outerPublishedNames = pm.container(blueprintContainer, query = True, publishName = True)
if publishedNames in outerPublishedNames:
continue
pm.container(blueprintContainer, edit = True, publishAndBind = ["%s.%s" %(self.moduleContainer, publishedNames), publishedNames])
pm.container(characterContainer, edit = True, publishAndBind = ["%s.%s" %(blueprintContainer, publishedNames), publishedNames])
def Uninstall(self):
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
moduleContainer = self.moduleContainer
containers = [characterContainer, blueprintContainer, moduleContainer]
for c in containers:
pm.lockNode(c, lock = False, lockUnpublished = False)
containers.pop()
blueprintJointsGrp = "%s:blueprint_joints_grp" %self.blueprintNamespace
blueprintJoints = utils.FindJointChain(blueprintJointsGrp)
blueprintJoints.pop(0)
settingsLocator = "%s:SETTINGS" %self.blueprintNamespace
connections = pm.listConnections("%s_addRotations" %blueprintJoints[0], source = True, destination = False)
if len(connections) == 2:
pm.setAttr("%s.controlModulesInstalled" %blueprintJointsGrp, False)
publishedNames = pm.container(moduleContainer, query = True, publishName = True)
publishedNames.sort()
for name in publishedNames:
outerPublishedNames = pm.container(characterContainer, query = True, publishName = True)
if name in outerPublishedNames:
pm.container(characterContainer, edit = True, unbindAndUnpublish = "%s.%s" %(blueprintContainer, name))
pm.container(blueprintContainer, edit = True, unbindAndUnpublish = "%s.%s" %(moduleContainer, name))
pm.delete(moduleContainer)
weightAttributeName = "%s_weight" %self.moduleNamespace
pm.deleteAttr("%s.%s" %(settingsLocator, weightAttributeName))
attributes = pm.listAttr(settingsLocator, keyable = False)
weightAttributes = []
for attr in attributes:
if attr.find("_weight") != -1:
weightAttributes.append(attr)
totalWeight = 0
for attr in weightAttributes:
totalWeight += pm.getAttr("%s.%s" %(settingsLocator, attr))
pm.setAttr("%s.creationPoseWeight" %settingsLocator, 1-totalWeight)
currentEntries = pm.attributeQuery("activeModule", node = settingsLocator, listEnum = True)
currentEntriesList = currentEntries[0].split(":")
ourEntry = self.moduleNamespace
currentEntriesString = ""
for entry in currentEntriesList:
if entry != ourEntry:
currentEntriesString += "%s:" %entry
if currentEntriesString == "":
currentEntriesString = "None"
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = currentEntriesString)
pm.setAttr("%s.activeModule" %settingsLocator, 0)
pm.namespace(setNamespace = self.blueprintNamespace)
pm.namespace(removeNamespace = self.moduleNamespace)
pm.namespace(setNamespace = ":")
for c in containers:
pm.lockNode(c, lock = True, lockUnpublished = True) | {
"content_hash": "1d01a8a31720eb6731379c81ab984068",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 164,
"avg_line_length": 35.83791208791209,
"alnum_prop": 0.7222690686086624,
"repo_name": "Shadowtags/ModularRiggingTool",
"id": "89642b13d11413cecacf29dd226208a4da8b003e",
"size": "13045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nwModularRiggingTool/Modules/System/controlModule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "3100880"
},
{
"name": "Python",
"bytes": "263312"
}
],
"symlink_target": ""
} |
"""The tests for the Template switch platform."""
import pytest
from homeassistant import setup
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.core import CoreState, State
from homeassistant.setup import async_setup_component
from tests.common import (
assert_setup_component,
async_mock_service,
mock_component,
mock_restore_cache,
)
from tests.components.switch import common
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_template_state_text(hass):
"""Test the state text of a template."""
with assert_setup_component(1, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_ON
hass.states.async_set("switch.test_state", STATE_OFF)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_OFF
async def test_template_state_boolean_on(hass):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_ON
async def test_template_state_boolean_off(hass):
"""Test the setting of the state with off."""
with assert_setup_component(1, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ 1 == 2 }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_OFF
async def test_icon_template(hass):
"""Test icon template."""
with assert_setup_component(1, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
"icon_template": "{% if states.switch.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.attributes.get("icon") == ""
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.attributes["icon"] == "mdi:check"
async def test_entity_picture_template(hass):
"""Test entity_picture template."""
with assert_setup_component(1, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
"entity_picture_template": "{% if states.switch.test_state.state %}"
"/local/switch.png"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.attributes.get("entity_picture") == ""
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.attributes["entity_picture"] == "/local/switch.png"
async def test_template_syntax_error(hass):
"""Test templating syntax error."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{% if rubbish %}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_invalid_name_does_not_create(hass):
"""Test invalid name."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test INVALID switch": {
"value_template": "{{ rubbish }",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_invalid_switch_does_not_create(hass):
"""Test invalid switch."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {"test_template_switch": "Invalid"},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_no_switches_does_not_create(hass):
"""Test if there are no switches no creation."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass, "switch", {"switch": {"platform": "template"}}
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_missing_on_does_not_create(hass):
"""Test missing on."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"not_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_missing_off_does_not_create(hass):
"""Test missing off."""
with assert_setup_component(0, "switch"):
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"not_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_on_action(hass, calls):
"""Test on action."""
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {"service": "test.automation"},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("switch.test_state", STATE_OFF)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "switch.test_template_switch")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_on_action_optimistic(hass, calls):
"""Test on action in optimistic mode."""
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"turn_on": {"service": "test.automation"},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("switch.test_template_switch", STATE_OFF)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "switch.test_template_switch")
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert len(calls) == 1
assert state.state == STATE_ON
async def test_off_action(hass, calls):
"""Test off action."""
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_ON
await common.async_turn_off(hass, "switch.test_template_switch")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_off_action_optimistic(hass, calls):
"""Test off action in optimistic mode."""
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"turn_off": {"service": "test.automation"},
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("switch.test_template_switch", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert state.state == STATE_ON
await common.async_turn_off(hass, "switch.test_template_switch")
await hass.async_block_till_done()
state = hass.states.get("switch.test_template_switch")
assert len(calls) == 1
assert state.state == STATE_OFF
async def test_restore_state(hass):
"""Test state restoration."""
mock_restore_cache(
hass,
(
State("switch.s1", STATE_ON),
State("switch.s2", STATE_OFF),
),
)
hass.state = CoreState.starting
mock_component(hass, "recorder")
await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"s1": {
"turn_on": {"service": "test.automation"},
"turn_off": {"service": "test.automation"},
},
"s2": {
"turn_on": {"service": "test.automation"},
"turn_off": {"service": "test.automation"},
},
},
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.s1")
assert state
assert state.state == STATE_ON
state = hass.states.get("switch.s2")
assert state
assert state.state == STATE_OFF
async def test_available_template_with_entities(hass):
"""Test availability templates with values from other entities."""
await setup.async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
"availability_template": "{{ is_state('availability_state.state', 'on') }}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("availability_state.state", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE
hass.states.async_set("availability_state.state", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("switch.test_template_switch").state == STATE_UNAVAILABLE
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
await setup.async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ true }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
"availability_template": "{{ x - 12 }}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_unique_id(hass):
"""Test unique_id option only creates one switch per id."""
await setup.async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch_01": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ true }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
},
"test_template_switch_02": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ false }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
},
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
| {
"content_hash": "e0dba52a1fcf6ff3affcbc961ac61690",
"timestamp": "",
"source": "github",
"line_count": 701,
"max_line_length": 100,
"avg_line_length": 32.497860199714694,
"alnum_prop": 0.4374259251130328,
"repo_name": "tchellomello/home-assistant",
"id": "6dab2569e59129d1929974ba88f41de000d0f7ec",
"size": "22781",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/template/test_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for download in orm['downloads.Download'].objects.all():
download.visible = not download.do_not_list
download.save()
def backwards(self, orm):
for download in orm['downloads.Download'].objects.all():
download.do_not_list = not download.visible
download.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'downloads.download': {
'Meta': {'ordering': "['primary_category', 'title']", 'object_name': 'Download', '_ormbases': ['jmbo.ModelBase']},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'}),
'do_not_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'downloads.textoverlaytemporarydownload': {
'Meta': {'object_name': 'TextOverlayTemporaryDownload'},
'background_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'colour': ('downloads.fields.ColourField', [], {'max_length': '7'}),
'download_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['downloads.Download']", 'unique': 'True', 'primary_key': 'True'}),
'font': ('django.db.models.fields.FilePathField', [], {'path': "'/usr/share/fonts/truetype/'", 'max_length': '100', 'recursive': 'True'}),
'font_size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'unique_per_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {}),
'x': ('django.db.models.fields.PositiveIntegerField', [], {}),
'y': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['downloads']
symmetrical = True
| {
"content_hash": "47fab5d0feb386839bc0439fbaee4709",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 195,
"avg_line_length": 80.52147239263803,
"alnum_prop": 0.5525333333333333,
"repo_name": "praekelt/jmbo-downloads",
"id": "4c83ff0c8481adc2f68063897bbd4f1ce608d11f",
"size": "13149",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "downloads/migrations/0003_rename_imagemod_to_temporarydownload.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "78529"
}
],
"symlink_target": ""
} |
import sys
from os import path
from subprocess import call
from shutil import rmtree
import json
from re import sub
from pprint import pprint
" Terminal Colors "
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
" GSM "
class GSM(object):
version = str('1.0.0')
json_file = str('gitsubmodule.json')
gitmodules_file = str('.gitmodules')
gitconfig_file = path.join('.git', 'config')
dependencies = dict()
devDependencies = dict()
cmd = str('install');
_jsonExists = bool(False)
" Property. "
@property
def jsonExists(self):
if path.isfile(self.json_file):
exists = True
else:
exists = False
self._jsonExists = exists
return self._jsonExists
" Initialise. "
def __init__(self):
# self.message(value="git submodule manager %s" % self.version)
pass
" Run. "
def run(self):
# parse args
if self.parseArgs() == True:
# install
if self.cmd == 'install':
if self.readJson() == True:
self.install()
# update
elif self.cmd == 'update':
self.update()
# remove
elif self.cmd == 'remove':
# e.g: test/gsm_test
plugin_path = sys.argv[2]
self.remove(plugin_path)
else:
pass
" Message "
def message(self, value, code=None):
if code:
if code == 'OK':
color = bcolors.OKGREEN
elif code == 'ERR':
color = bcolors.FAIL
print("gsm %s%s!%s %s" % (color, code, bcolors.ENDC, value))
else:
print(value)
" Parse Arguments. "
def parseArgs(self):
# check argv length
if len(sys.argv) < 2:
self.message(code='ERR', value="invalid command, try -h for help")
return False
# if command argument
cmd = sys.argv[1]
if cmd:
if cmd == '-h':
self.message(value="- install git submodules:")
self.message(value=" python gsm.py install")
return False
elif cmd in ['install', 'update', 'remove']:
self.cmd = cmd
return True
else:
self.message(code='ERR', value="unknown command `%s`" % cmd)
return False
else:
self.message(code='ERR', value="no command given")
return False
" Read JSON. "
def readJson(self):
if self.jsonExists == True:
with open(self.json_file) as data_file:
try:
data = json.load(data_file)
except ValueError as e:
self.message(code='ERR', value="no JSON object could be decoded, please check `%s`" % self.json_file)
return False
self.dependencies = data["dependencies"].items()
self.devDependencies = data["devDependencies"].items()
# self.message(code='OK', value="%s" % self.json_file)
return True
else:
self.message(code='ERR', value="could not find `%s`" % self.json_file)
return False
" Install (Add) Git Submodules. "
def install(self):
for dst, src in self.dependencies:
self.message(value="- Installing %s" % (dst))
self.message(value=" Source: %s" % (src))
call(["git", "submodule", "add", "-f", src, dst])
# check if all submodules installed
self.message(code='OK', value='install')
" Update Git Submodules. "
def update(self):
self.message(value="- Updating")
call(["git", "submodule", "update", "--init", "--recursive"])
self.message(code='OK', value='update')
" Remove Git Submodule. "
def remove(self, plugin_path):
self.message(value="- Removing %s%s%s" % (bcolors.BOLD, plugin_path, bcolors.ENDC))
if self.removeModuleEntry(plugin_path) == True:
pass
if self.removeModuleConfig(plugin_path) == True:
pass
if self.removeModuleCached(plugin_path) == True:
pass
if self.removeModuleDirectory(plugin_path) == True:
pass
# Remove Module Entry
def removeModuleEntry(self, plugin_path):
# remove the module's entry in the .gitmodules file
data = ''
skip = 0
with open (self.gitmodules_file, "r") as gitmodules_file:
for line in gitmodules_file:
if skip == 0:
if line.rstrip() == "[submodule \"%s\"]" % plugin_path:
# skip next 2 lines (path, url)
skip = 2
else:
data += "".join(line)
else:
skip = skip -1
# update file
try:
f = open(self.gitmodules_file, "w")
f.write(data)
f.close()
self.message(code='OK', value='removed from %s' % (self.gitmodules_file))
return True
except IOError as e:
self.message(code='ERR', value="I/O error: %s" % e)
return False
# Remove Module Config
def removeModuleConfig(self, plugin_path):
# remove the module's entry in the .git/config file
data = ''
skip = 0
with open (self.gitconfig_file, "r") as gitconfig_file:
for line in gitconfig_file:
if skip == 0:
if line.rstrip() == "[submodule \"%s\"]" % plugin_path:
# skip next line (url)
skip = 1
else:
data += "".join(line)
else:
skip = skip -1
# update file
try:
f = open(self.gitconfig_file, "w")
f.write(data)
f.close()
self.message(code='OK', value='removed from %s' % (self.gitconfig_file))
return True
except IOError as e:
self.message(code='ERR', value="I/O error: %s" % e)
return False
# Remove Module Cached
def removeModuleCached(self, plugin_path):
call(["git", "rm", "--cached", plugin_path])
self.message(code='OK', value='removed from cached')
return True
# Remove Module Directory
def removeModuleDirectory(self, plugin_path):
if path.exists(plugin_path):
rmtree(plugin_path)
self.message(code='OK', value='removed directory %s' % plugin_path)
return True
else:
return False
" Main "
def main():
gsm = GSM()
gsm.run()
sys.exit()
" Enter Main "
if __name__ == '__main__':
main() | {
"content_hash": "bb9fdc00093cd108f3f6f3fc614d242e",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 121,
"avg_line_length": 31.972602739726028,
"alnum_prop": 0.506426735218509,
"repo_name": "ericmdev/gsm",
"id": "9ddb16dbaa93d5ffab54775230fdd5df5dc75d89",
"size": "7043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gsm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7043"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from math import sqrt
from os.path import abspath, dirname, join
import sys
from time import localtime, strftime
from git import Repo
import json
sys.path.append(abspath(dirname(dirname(__file__))))
from utils.data_io import load_numpy_array_from_file
from utils.data_stats import load_stats_from_file
from utils.data_paths import DATA_DIR_PATH, RESULTS_DIR_PATH
def calculate_rmse(true_ratings, predictions):
return sqrt(((predictions - true_ratings) ** 2).mean())
def predict_and_save_rmse(model, test_points, rmse_file_path,
keep_predictions=False,
predictions_file_name='noname'):
predictions = model.predict(test_points)
true_ratings = test_points[:, 3]
rmse = calculate_rmse(true_ratings, predictions)
print('RMSE:', rmse)
save_rmse(rmse, rmse_file_path, append=True)
if keep_predictions:
save_predictions(predictions, predictions_file_name)
def save_model(model, model_file_name):
print('Saving model to {}'.format(model_file_name))
model.save(model_file_name)
def save_run_info(model, test_set_name, train_set_name, date_string,
time_string, feature_epoch_order, create_files,
epochs, run_multi, run_name, commit):
info_file_name = ('{model_class}_{run_name}_{short_commit}_{start_time}'
'_info.json'
.format(model_class=model.__class__.__name__,
short_commit=commit[:5],
run_name=run_name,
start_time=time_string)
)
info_file_path = join(RESULTS_DIR_PATH, info_file_name)
# Create a dict of data
excluded_params = ['users', 'movies', 'train_points', 'residuals',
'stats', 'max_movie', 'max_user']
run_info = {key: value for key, value in model.__dict__.items()
if key not in excluded_params}
run_info['algorithm'] = model.__class__.__name__
run_info['last_commit'] = commit
run_info['train_set_name'] = train_set_name
run_info['name'] = run_name
run_info['time'] = time_string
run_info['date'] = date_string
run_info['test_set_name'] = test_set_name
run_info['num_epochs'] = epochs
run_info['create_files'] = create_files
run_info['run_multi'] = run_multi
run_info['feature_epoch_order'] = feature_epoch_order
json.dump(run_info, open(info_file_path, 'w'), indent=4,
sort_keys=True)
return info_file_path
def run(model, train_set_name, test_set_name, run_name, epochs=None,
feature_epoch_order=False, create_files=True, run_multi=False):
print('Training {model_class} on "{train}" ratings'
.format(model_class=model.__class__.__name__, train=train_set_name))
if not create_files:
print("WARNING: 'nofile' flag detected. No model file will be " +
"saved to disk after this run.\n***MODEL WILL BE LOST.")
confirm = input("Are you sure you want to continue? [Y/n]")
if confirm == 'Y' or confirm == 'y' or confirm == '':
pass
else:
return
if epochs is not None:
print('Number of epochs:', epochs)
if model.num_features is not None:
print('Number of features:', model.num_features)
train_file_path = join(DATA_DIR_PATH, train_set_name + '.npy')
stats_file_path = join(DATA_DIR_PATH, 'old_stats', train_set_name +
'_stats.p')
model.debug = True
train_points = load_numpy_array_from_file(train_file_path)
stats = load_stats_from_file(stats_file_path)
test_file_path = join(DATA_DIR_PATH, test_set_name + '.npy')
test_points = load_numpy_array_from_file(test_file_path)
# Save run information in [...]_info.txt file
date_format = '%b-%d'
time_format = '%H%M'
latest_commit = Repo('.').commit('HEAD').hexsha
date_string = strftime(date_format, localtime())
time_string = strftime(time_format, localtime())
run_info_file_path = save_run_info(
model=model, test_set_name=test_set_name,
train_set_name=train_set_name,
epochs=epochs,
time_string=time_string,
date_string=date_string,
feature_epoch_order=feature_epoch_order,
create_files=create_files,
run_multi=run_multi,
run_name=run_name,
commit=latest_commit
)
print('Wrote run info to ', run_info_file_path)
rmse_file_path = run_info_file_path.replace('info.json', 'rmse.txt')
predictions_file_name = (run_info_file_path.split('/')[-1]
.replace('info.json', 'predictions.dta'))
if not run_multi:
if not feature_epoch_order:
model.train(train_points, stats=stats, epochs=epochs)
else:
model.train_feature_epoch(train_points=train_points, stats=stats,
epochs=epochs)
else:
print("Training multi!")
for epoch in range(epochs):
if epoch == 0:
model.train(train_points, stats=stats, epochs=1)
else:
model.train_more(epochs=1)
if create_files:
print('Predicting "{test}" ratings'.format(test=test_set_name))
predict_and_save_rmse(
model, test_points=test_points,
rmse_file_path=rmse_file_path,
keep_predictions=(create_files and epoch == epochs-1),
predictions_file_name=predictions_file_name
)
model.train_points = None
if create_files:
model_file_name = (run_info_file_path.split('/')[-1]
.replace('info.json', 'model.p'))
save_model(model, model_file_name)
if not run_multi:
# duplicate save if run_multi
print('Predicting "{test}" ratings'.format(test=test_set_name))
predict_and_save_rmse(model, test_points=test_points,
rmse_file_path=rmse_file_path,
keep_predictions=True,
predictions_file_name=predictions_file_name)
def save_predictions(predictions, predictions_file_name):
print('Saving predictions to {}'.format(predictions_file_name))
predictions_file_path = join(RESULTS_DIR_PATH, predictions_file_name)
with open(predictions_file_path, 'w+') as predictions_file:
predictions_file.writelines(['{:.3f}\n'.format(p) for p in predictions])
def save_rmse(rmse, rmse_file_path, append=True):
write_format = 'w+'
if append:
write_format = 'a+'
with open(rmse_file_path, write_format) as rmse_file:
rmse_file.write('{}\n'.format(rmse))
| {
"content_hash": "3a681b2a7bd295a019ca7b65cd753043",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 41.877300613496935,
"alnum_prop": 0.5947846469381776,
"repo_name": "jvanbrug/netflix",
"id": "ca41b87e720f7330451c77c2bace9f8bcd59addd",
"size": "6826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/run_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5381"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "97404"
}
],
"symlink_target": ""
} |
r"""
Fourier transform
=================
The graph Fourier transform :meth:`pygsp.graphs.Graph.gft` transforms a
signal from the vertex domain to the spectral domain. The smoother the signal
(see :meth:`pygsp.graphs.Graph.dirichlet_energy`), the lower in the frequencies
its energy is concentrated.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygsp as pg
G = pg.graphs.Sensor(seed=42)
G.compute_fourier_basis()
scales = [10, 3, 0]
limit = 0.44
fig, axes = plt.subplots(2, len(scales), figsize=(12, 4))
fig.subplots_adjust(hspace=0.5)
x0 = np.random.default_rng(1).normal(size=G.N)
for i, scale in enumerate(scales):
g = pg.filters.Heat(G, scale)
x = g.filter(x0).squeeze()
x /= np.linalg.norm(x)
x_hat = G.gft(x).squeeze()
assert np.all((-limit < x) & (x < limit))
G.plot(x, limits=[-limit, limit], ax=axes[0, i])
axes[0, i].set_axis_off()
axes[0, i].set_title('$x^T L x = {:.2f}$'.format(G.dirichlet_energy(x)))
axes[1, i].plot(G.e, np.abs(x_hat), '.-')
axes[1, i].set_xticks(range(0, 16, 4))
axes[1, i].set_xlabel(r'graph frequency $\lambda$')
axes[1, i].set_ylim(-0.05, 0.95)
axes[1, 0].set_ylabel(r'frequency content $\hat{x}(\lambda)$')
# axes[0, 0].set_title(r'$x$: signal in the vertex domain')
# axes[1, 0].set_title(r'$\hat{x}$: signal in the spectral domain')
fig.tight_layout()
| {
"content_hash": "de19f507e7e89e0c1428f2f46cd084ae",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 29.804347826086957,
"alnum_prop": 0.6447848285922684,
"repo_name": "epfl-lts2/pygsp",
"id": "ac89563c611960ba9eda8f79a335498b8563cd25",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/fourier_transform.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2796"
},
{
"name": "Makefile",
"bytes": "966"
},
{
"name": "Python",
"bytes": "392112"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None) # None if instance (frame) has no data!
if instance_shape is not None and (getattr(out, 'shape', ()) and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {} should be scalar or have shape {}, "
"but is has shape {} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
f'Invalid time input {self.name}={value!r}\n{err}')
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : value or Quantity or None
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None,
shape=None):
if default is None and unit is None:
raise ValueError('Either a default quantity value must be '
'provided, or a unit must be provided to define a '
'QuantityAttribute.')
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled
and np.any(value != 0)):
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"')
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.coordinates import SkyCoord
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {}. Allowed '
'classes are: {}'
.format(value.__class__,
self.allowed_classes))
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
| {
"content_hash": "aa2607ed8bd3e0be6323b877ddafcdca",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 102,
"avg_line_length": 34.9980694980695,
"alnum_prop": 0.5936896684869546,
"repo_name": "dhomeier/astropy",
"id": "7e008ac477e6ad741b42555cc168e9a47fc93586",
"size": "18234",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/coordinates/attributes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10891881"
},
{
"name": "C++",
"bytes": "55147"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "181654"
},
{
"name": "M4",
"bytes": "18016"
},
{
"name": "Makefile",
"bytes": "51059"
},
{
"name": "Python",
"bytes": "10582251"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
'''
Created on Aug 29 2015
@author: Kevin Chien
'''
import types
def import_resource(full_resource_name):
index = full_resource_name.rfind(".")
module_name = full_resource_name[0:index]
resource_name = full_resource_name[index + 1:len(full_resource_name)]
mod = __import__(module_name, fromlist=[resource_name])
if hasattr(mod, resource_name):
return getattr(mod, resource_name)
else:
return None
def get_class(full_path):
index = full_path.rfind('.')
resource_name = full_path[0:index]
class_name = full_path[index + 1:]
module = import_resource(resource_name)
return getattr(module, class_name)
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse'] = reverse
obj = type('Enum', (), enums)
obj.list = list(sequential) + named.values()
return obj
class EqualDict(dict):
'''
Implement __eq__ and __ne__ functions to make dict objects compare each other by key value.
The key value would be empty, so use compare_empty_key flag to determine whether the empty value should be compared.
More examples can be found in test case.
'''
def __init__(self, d, *keys):
super(EqualDict, self).__init__(**d)
self.keys = keys
def __eq__(self, other):
if isinstance(other, dict):
for key in self.keys:
self_value = self.get(key)
other_value = other.get(key)
if self_value != other_value:
return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def merge_nested_dict(x, y):
# store a copy of x, but overwrite with y's values where applicable
merged = dict(x,**y)
# if the value of merged[key] was overwritten with y[key]'s value
# then we need to put back any missing x[key] values
for key in x.keys():
# if this key is a dictionary, recurse
if type(x[key]) is types.DictType and y.has_key(key):
merged[key] = merge_nested_dict(x[key],y[key])
return merged
def dict_copy(d, source_keys, dest_keys):
new_d = {}
for source_key, dest_key in zip(source_keys, dest_keys):
value = d.get(source_key)
if value:
new_d[dest_key] = value
return new_d
| {
"content_hash": "de05b47d52d31badb50fb66cfb6997f8",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 120,
"avg_line_length": 32.67088607594937,
"alnum_prop": 0.5784579620302208,
"repo_name": "cchienhao/data_collector",
"id": "23d76e88bebf75e34b58174b1cd26187b0b8af44",
"size": "2581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/common/reflectionutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146326"
},
{
"name": "Ruby",
"bytes": "7585"
},
{
"name": "Shell",
"bytes": "589"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('history', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='historyentry',
name='delete_comment_date',
field=models.DateTimeField(default=None, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='historyentry',
name='delete_comment_user',
field=models.ForeignKey(null=True, default=None, related_name='deleted_comments', to=settings.AUTH_USER_MODEL, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "6e876eb7a31658bcf5da44ed35c758da",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 135,
"avg_line_length": 30.925925925925927,
"alnum_prop": 0.6275449101796408,
"repo_name": "curiosityio/taiga-docker",
"id": "b78c6da437879d192dc8b29a30a92884226cc29d",
"size": "859",
"binary": false,
"copies": "28",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/projects/history/migrations/0002_auto_20140916_0936.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
import botocore.session
from derpconf.config import Config
from mock import patch, MagicMock
from thumbor.context import Context
from tornado.testing import gen_test
from fixtures.storage_fixture import IMAGE_PATH, IMAGE_BYTES, s3_bucket
from tc_aws.loaders import s3_loader
from tests import S3MockedAsyncTestCase
class S3LoaderTestCase(S3MockedAsyncTestCase):
@gen_test
def test_can_load_image(self):
client = botocore.session.get_session().create_client('s3')
client.create_bucket(Bucket=s3_bucket)
client.put_object(
Bucket=s3_bucket,
Key=''.join(['root_path', IMAGE_PATH]),
Body=IMAGE_BYTES,
ContentType='image/jpeg', )
conf = Config(
TC_AWS_LOADER_BUCKET=s3_bucket,
TC_AWS_LOADER_ROOT_PATH='root_path'
)
image = yield s3_loader.load(Context(config=conf), IMAGE_PATH)
self.assertEqual(image, IMAGE_BYTES)
@gen_test
def test_can_validate_buckets(self):
conf = Config(
TC_AWS_ALLOWED_BUCKETS=['whitelist_bucket'],
TC_AWS_LOADER_BUCKET=None,
)
image = yield s3_loader.load(Context(config=conf), '/'.join([s3_bucket, IMAGE_PATH]))
self.assertIsNone(image.buffer)
@patch('thumbor.loaders.http_loader.load_sync')
@gen_test
def test_should_use_http_loader(self, load_sync_patch):
def cb(a, b, callback, *args, **kwargs):
callback('foobar')
return None
load_sync_patch.side_effect = cb
conf = Config(TC_AWS_ENABLE_HTTP_LOADER=True)
s3_loader.load(Context(config=conf), 'http://foo.bar')
self.assertTrue(load_sync_patch.called)
@patch('thumbor.loaders.http_loader.load_sync')
@gen_test
def test_should_not_use_http_loader_if_not_prefixed_with_scheme(self, load_sync_patch):
conf = Config(TC_AWS_ENABLE_HTTP_LOADER=True)
yield s3_loader.load(Context(config=conf), 'foo/bar')
self.assertFalse(load_sync_patch.called)
def test_datafunc_loader(self):
def callback(*args, **kwargs):
pass
file_key = {
'Error': 'Error',
'ResponseMetadata': {
'HTTPStatusCode': 502
}
}
self.call_count = 0
def get(key, callback=None):
self.call_count += 1
callback(file_key)
mock_bucket_loader = MagicMock()
mock_bucket_loader.get = get
func = s3_loader.HandleDataFunc.as_func(
'/'.join([s3_bucket, IMAGE_PATH]),
callback=callback,
bucket_loader=mock_bucket_loader,
max_retry=3
)
func(file_key)
self.assertEqual(self.call_count, 3)
| {
"content_hash": "40a2ee9a0d230731152f976f8a5a3a6e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 93,
"avg_line_length": 30.373626373626372,
"alnum_prop": 0.6067293777134588,
"repo_name": "thumbor-community/aws",
"id": "4da14c633512650c9b55165023f30cd10874d1ad",
"size": "2920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_s3_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "490"
},
{
"name": "Python",
"bytes": "55064"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
__title__ = 'Enclosure Groups'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class EnclosureGroups(object):
URI = '/rest/enclosure-groups'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
self.__default_values = {"type": "EnclosureGroupV200"}
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a list of enclosure groups.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of enclosure groups.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Gets a enclosure group by ID or by uri
Args:
id_or_uri: Could be either the enclosure group id or the enclosure group uri
Returns:
dict: enclosure group
"""
return self._client.get(id_or_uri)
def get_script(self, id_or_uri):
"""
Gets the configuration script of the enclosure-group resource with the specified URI.
Returns:
dict:
"""
uri = self._client.build_uri(id_or_uri) + "/script"
return self._client.get(uri)
def get_by(self, field, value):
"""
Get all enclosure groups that matches the filter
The search is case insensitive
Args:
field: field name to filter
value: value to filter
Returns:
list: A list of enclosure groups.
"""
return self._client.get_by(field, value)
def create(self, resource, timeout=-1):
"""
Creates an enclosure group. An interconnect bay mapping must be provided for each
of the interconnect bays in the enclosure. For this release, the same logical
interconnect group must be provided for each interconnect bay mapping.
Args:
resource (dict): Object to create
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: Created enclosure group
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.create(data, timeout=timeout)
def delete(self, resource, timeout=-1):
"""
Deletes an enclosure group. An enclosure group cannot be deleted if any enclosures
are currently part of that enclosure group.
Args:
resource (dict): object to delete
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
boolean: True when success
"""
return self._client.delete(resource, timeout=timeout)
def update(self, resource, timeout=-1):
"""
Updates an enclosure group with new attributes.
Args:
resource (dict): Object to update
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: Updated enclosure group
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.update(data, timeout=timeout)
def update_script(self, id_or_uri, script_body):
"""
Updates the configuration script of the enclosure-group with the specified URI.
Args:
id_or_uri: id or resource uri
script_body: configuration script
Returns:
dict: Updated enclosure group
"""
uri = self._client.build_uri(id_or_uri) + "/script"
return self._client.update(script_body, uri=uri)
| {
"content_hash": "74c144a9212713f22ddaa86228333dd9",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 109,
"avg_line_length": 33.55844155844156,
"alnum_prop": 0.6002321981424149,
"repo_name": "andreadean5/python-hpOneView",
"id": "69afb699d9398994fbbe830bcdba8d2ccd8a2eef",
"size": "6328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpOneView/resources/servers/enclosure_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "920844"
}
],
"symlink_target": ""
} |
class GoudaError(Exception):
pass
| {
"content_hash": "34199ec59b0ac8e3f82f8d78ac4ecd63",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 28,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "NaturalHistoryMuseum/gouda",
"id": "1cf17d3482dd03da5bf1a97ee32171f95a4885d2",
"size": "38",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gouda/gouda_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "873"
},
{
"name": "Java",
"bytes": "1630"
},
{
"name": "Python",
"bytes": "72625"
},
{
"name": "Shell",
"bytes": "904"
}
],
"symlink_target": ""
} |
import os
import warnings
from pathlib import Path
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
CI = os.environ.get('CI', False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join('data', 'iers_a_excerpt'))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize('iers_cls', (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif('not HAS_IERS_A')
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-02-30-test'))
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-04-30-test'))
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto predictive '
'values are older') as warns, \
pytest.raises(ValueError, match='interpolating from IERS_Auto '
'using predictive values'):
dat.ut1_utc(Time(60000, format='mjd').jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto '
'predictive values are older') as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
dat.ut1_utc(Time(60000, format='mjd').jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A], B[name][i_B], rtol=1e-15,
err_msg=("Bug #9206 IERS B parameter {} not copied over "
"correctly to IERS Auto".format(name)))
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert 'UT1_UTC' in iersb_tab.colnames
finally:
iers.IERS_B.close()
| {
"content_hash": "c9a5b102046b388d01f716f5b27fd1ca",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 100,
"avg_line_length": 43.86206896551724,
"alnum_prop": 0.5826681180454766,
"repo_name": "aleksandr-bakanov/astropy",
"id": "960c22d6933186a4bcca949b7c15148c19ddcf2b",
"size": "16601",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/utils/iers/tests/test_iers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898093"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0012_auto_20150607_2207'),
('pages', '0014_socialplugin'),
]
operations = [
migrations.CreateModel(
name='FAQItemPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('question', models.TextField(blank=True)),
('answer', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='FAQPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('title', models.CharField(max_length=70, blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| {
"content_hash": "51b812d91546bb6701b2c5f5d6f9ae84",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 174,
"avg_line_length": 34.34285714285714,
"alnum_prop": 0.5316139767054908,
"repo_name": "cjlee112/socraticqs2",
"id": "bb5bfad75f49bf705c16530608107b5a91c3917e",
"size": "1202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/pages/migrations/0015_faqitemplugin_faqplugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
"""Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
A module to support the Privet (Local Discovery) API.
Use this module to conduct tests that interact with the Privet protocol. The
Privet class will provide the needed methods to send, receive, and parse
messages from privet clients (for example, a printer).
"""
class Privet(object):
"""Contains methods to send and receive Privet Protocol messages."""
def __init__(self, logger):
"""Get a reference to a logger object. Set some initial dictionaries.
Args:
logger: initialized logger object.
"""
self.logger = logger
self.api_names = ['accesstoken', 'capabilities', 'info', 'INVALID',
'printer']
self.reg_actions = ['start', 'cancel', 'getClaimToken', 'complete',
'invalid']
self.printer_api = ['createjob', 'jobstate', 'submitdoc']
self.required_fields = ['manufacturer', 'model', 'firmware', 'update_url',
'version', 'x-privet-token']
self.headers_empty = {'X-Privet-Token': '""'}
self.headers_invalid = {'X-Privet-Token': 'INVALID'}
self.headers_missing = {}
def SetPrivetUrls(self, device_ip, device_port):
"""Construct a dictionary of URLs that Privet clients provide.
Args:
device_ip: string, IP address of the privet client.
device_port: integer, TCP port number of device.
Returns:
dictionary where key = action and value = URL.
"""
urls = {}
urls['register'] = {} # Register has multiple actions.
device_url = 'http://%s:%s' % (device_ip, device_port)
for name in self.api_names:
urls[name] = '%s/privet/%s' % (device_url, name)
for name in self.printer_api:
urls[name] = '%s/privet/printer/%s' % (device_url, name)
for action in self.reg_actions:
urls['register'][action] = '%s/privet/%s?action=%s' % (
device_url, 'register', action)
return urls
| {
"content_hash": "f5b2725f530f33cf42f23327118ec580",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 36.82089552238806,
"alnum_prop": 0.6643696797730037,
"repo_name": "google/cloudprint_logocert",
"id": "ce15206b7850db220c985c0531c167a0b0d2a1f0",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_privet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12177"
},
{
"name": "Python",
"bytes": "302438"
}
],
"symlink_target": ""
} |
import copy
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import expression as expr
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import portbindings_db as p_binding
from neutron.extensions import providernet as pr_net
from neutron.i18n import _LE, _LI
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3.rpc import l3_router_rpc_joint_agent_api
LOG = logging.getLogger(__name__)
ROUTER_APPLIANCE_OPTS = [
cfg.IntOpt('backlog_processing_interval',
default=10,
help=_('Time in seconds between renewed scheduling attempts of '
'non-scheduled routers.')),
]
cfg.CONF.register_opts(ROUTER_APPLIANCE_OPTS, "general")
class RouterCreateInternalError(n_exc.NeutronException):
message = _("Router could not be created due to internal error.")
class RouterInternalError(n_exc.NeutronException):
message = _("Internal error during router processing.")
class RouterBindingInfoError(n_exc.NeutronException):
message = _("Could not get binding information for router %(router_id)s.")
class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
"""Mixin class implementing Neutron's routing service using appliances."""
# Dictionary of routers for which new scheduling attempts should
# be made and the refresh setting and heartbeat for that.
_backlogged_routers = {}
_refresh_router_backlog = True
_heartbeat = None
@property
def l3_cfg_rpc_notifier(self):
if not hasattr(self, '_l3_cfg_rpc_notifier'):
self._l3_cfg_rpc_notifier = (l3_router_rpc_joint_agent_api.
L3RouterJointAgentNotifyAPI(self))
return self._l3_cfg_rpc_notifier
@l3_cfg_rpc_notifier.setter
def l3_cfg_rpc_notifier(self, value):
self._l3_cfg_rpc_notifier = value
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
if self.mgmt_nw_id() is None:
raise RouterCreateInternalError()
router_created = (super(L3RouterApplianceDBMixin, self).
create_router(context, router))
r_hd_b_db = l3_models.RouterHostingDeviceBinding(
router_id=router_created['id'],
auto_schedule=True,
hosting_device_id=None)
context.session.add(r_hd_b_db)
# backlog so this new router gets scheduled asynchronously
self.backlog_router(r_hd_b_db['router'])
return router_created
def update_router(self, context, id, router):
r = router['router']
# Check if external gateway has changed so we may have to
# update trunking
o_r_db = self._get_router(context, id)
old_ext_gw = (o_r_db.gw_port or {}).get('network_id')
new_ext_gw = (r.get('external_gateway_info', {}) or {}).get(
'network_id')
with context.session.begin(subtransactions=True):
e_context = context.elevated()
if old_ext_gw is not None and old_ext_gw != new_ext_gw:
o_r = self._make_router_dict(o_r_db, process_extensions=False)
# no need to schedule now since we're only doing this to
# tear-down connectivity and there won't be any if not
# already scheduled.
self._add_type_and_hosting_device_info(e_context, o_r,
schedule=False)
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
o_r_db.gw_port)
router_updated = (
super(L3RouterApplianceDBMixin, self).update_router(
context, id, router))
routers = [copy.deepcopy(router_updated)]
self._add_type_and_hosting_device_info(e_context, routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers)
return router_updated
def delete_router(self, context, id):
router_db = self._get_router(context, id)
router = self._make_router_dict(router_db)
with context.session.begin(subtransactions=True):
e_context = context.elevated()
r_hd_binding = self._get_router_binding_info(e_context, id)
self._add_type_and_hosting_device_info(
e_context, router, binding_info=r_hd_binding, schedule=False)
if router_db.gw_port is not None:
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context,
router_db.gw_port)
# conditionally remove router from backlog just to be sure
self.remove_router_from_backlog(id)
if router['hosting_device'] is not None:
self.unschedule_router_from_hosting_device(context,
r_hd_binding)
super(L3RouterApplianceDBMixin, self).delete_router(context, id)
self.l3_cfg_rpc_notifier.router_deleted(context, router)
def notify_router_interface_action(
self, context, router_interface_info, routers, action):
l3_method = '%s_router_interface' % action
self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method)
mapping = {'add': 'create', 'remove': 'delete'}
notifier = n_rpc.get_notifier('network')
router_event = 'router.interface.%s' % mapping[action]
notifier.info(context, router_event,
{'router_interface': router_interface_info})
def add_router_interface(self, context, router_id, interface_info):
with context.session.begin(subtransactions=True):
info = (super(L3RouterApplianceDBMixin, self).
add_router_interface(context, router_id, interface_info))
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.notify_router_interface_action(context, info, routers, 'add')
return info
def remove_router_interface(self, context, router_id, interface_info):
if 'port_id' in (interface_info or {}):
port_db = self._core_plugin._get_port(
context, interface_info['port_id'])
elif 'subnet_id' in (interface_info or {}):
subnet_db = self._core_plugin._get_subnet(
context, interface_info['subnet_id'])
port_db = self._get_router_port_db_on_subnet(
context, router_id, subnet_db)
else:
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
routers = [self.get_router(context, router_id)]
with context.session.begin(subtransactions=True):
e_context = context.elevated()
self._add_type_and_hosting_device_info(e_context, routers[0])
p_drv = self.get_hosting_device_plugging_driver()
if p_drv is not None:
p_drv.teardown_logical_port_connectivity(e_context, port_db)
info = (super(L3RouterApplianceDBMixin, self).
remove_router_interface(context, router_id,
interface_info))
self.notify_router_interface_action(context, info, routers, 'remove')
return info
def create_floatingip(
self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).create_floatingip(
context, floatingip)
if info['router_id']:
routers = [self.get_router(context, info['router_id'])]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'create_floatingip')
return info
def update_floatingip(self, context, id, floatingip):
orig_fl_ip = super(L3RouterApplianceDBMixin, self).get_floatingip(
context, id)
before_router_id = orig_fl_ip['router_id']
with context.session.begin(subtransactions=True):
info = super(L3RouterApplianceDBMixin, self).update_floatingip(
context, id, floatingip)
router_ids = []
if before_router_id:
router_ids.append(before_router_id)
router_id = info['router_id']
if router_id and router_id != before_router_id:
router_ids.append(router_id)
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'update_floatingip')
return info
def delete_floatingip(self, context, id):
floatingip_db = self._get_floatingip(context, id)
router_id = floatingip_db['router_id']
with context.session.begin(subtransactions=True):
super(L3RouterApplianceDBMixin, self).delete_floatingip(
context, id)
if router_id:
routers = [self.get_router(context, router_id)]
self._add_type_and_hosting_device_info(context.elevated(),
routers[0])
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
'delete_floatingip')
def disassociate_floatingips(self, context, port_id, do_notify=True):
with context.session.begin(subtransactions=True):
router_ids = super(L3RouterApplianceDBMixin,
self).disassociate_floatingips(context, port_id)
if router_ids and do_notify:
routers = []
for router_id in router_ids:
router = self.get_router(context, router_id)
self._add_type_and_hosting_device_info(context.elevated(),
router)
routers.append(router)
self.l3_cfg_rpc_notifier.routers_updated(
context, routers, 'disassociate_floatingips')
# since caller assumes that we handled notifications on its
# behalf, return nothing
return
return router_ids
@lockutils.synchronized('routerbacklog', 'neutron-')
def _handle_non_responding_hosting_devices(self, context, hosting_devices,
affected_resources):
"""Handle hosting devices determined to be "dead".
This function is called by the hosting device manager.
Service plugins are supposed to extend the 'affected_resources'
dictionary. Hence, we add the id of Neutron routers that are
hosted in <hosting_devices>.
param: hosting_devices - list of dead hosting devices
param: affected_resources - dict with list of affected logical
resources per hosting device:
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...],
'fw': [id1, ...],
...},
...}
"""
LOG.debug('Processing affected routers in dead hosting devices')
with context.session.begin(subtransactions=True):
for hd in hosting_devices:
hd_bindings = self._get_hosting_device_bindings(context,
hd['id'])
router_ids = []
for binding in hd_bindings:
router_ids.append(binding['router_id'])
if binding['auto_schedule']:
self.backlog_router(binding['router'])
try:
affected_resources[hd['id']].update(
{'routers': router_ids})
except KeyError:
affected_resources[hd['id']] = {'routers': router_ids}
def get_sync_data_ext(self, context, router_ids=None, active=None):
"""Query routers and their related floating_ips, interfaces.
Adds information about hosting device as well as trunking.
"""
with context.session.begin(subtransactions=True):
sync_data = (super(L3RouterApplianceDBMixin, self).
get_sync_data(context, router_ids, active))
for router in sync_data:
self._add_type_and_hosting_device_info(context, router)
plg_drv = self.get_hosting_device_plugging_driver()
if plg_drv and router['hosting_device']:
self._add_hosting_port_info(context, router, plg_drv)
return sync_data
def schedule_router_on_hosting_device(self, context, r_hd_binding):
LOG.info(_LI('Attempting to schedule router %s.'),
r_hd_binding['router']['id'])
result = self._create_csr1kv_vm_hosting_device(context.elevated())
if result is None:
# CSR1kv hosting device creation was unsuccessful so backlog
# it for another scheduling attempt later.
self.backlog_router(r_hd_binding['router'])
return False
with context.session.begin(subtransactions=True):
router = r_hd_binding['router']
r_hd_binding.hosting_device = result
self.remove_router_from_backlog(router['id'])
LOG.info(_LI('Successfully scheduled router %(r_id)s to '
'hosting device %(d_id)s'),
{'r_id': r_hd_binding['router']['id'],
'd_id': result['id']})
return True
def unschedule_router_from_hosting_device(self, context, r_hd_binding):
LOG.info(_LI('Un-schedule router %s.'),
r_hd_binding['router']['id'])
hosting_device = r_hd_binding['hosting_device']
if r_hd_binding['hosting_device'] is None:
return False
self._delete_service_vm_hosting_device(context.elevated(),
hosting_device)
@lockutils.synchronized('routers', 'neutron-')
def backlog_router(self, router):
if ((router or {}).get('id') is None or
router['id'] in self._backlogged_routers):
return
LOG.info(_LI('Backlogging router %s for renewed scheduling attempt '
'later'), router['id'])
self._backlogged_routers[router['id']] = router
@lockutils.synchronized('routers', 'neutron-')
def remove_router_from_backlog(self, id):
self._backlogged_routers.pop(id, None)
LOG.info(_LI('Router %s removed from backlog'), id)
@lockutils.synchronized('routerbacklog', 'neutron-')
def _process_backlogged_routers(self):
if self._refresh_router_backlog:
self._sync_router_backlog()
if not self._backlogged_routers:
return
context = n_context.get_admin_context()
scheduled_routers = []
LOG.info(_LI('Processing router (scheduling) backlog'))
# try to reschedule
for r_id, router in self._backlogged_routers.items():
self._add_type_and_hosting_device_info(context, router)
if router.get('hosting_device'):
# scheduling attempt succeeded
scheduled_routers.append(router)
self._backlogged_routers.pop(r_id, None)
# notify cfg agents so the scheduled routers are instantiated
if scheduled_routers:
self.l3_cfg_rpc_notifier.routers_updated(context,
scheduled_routers)
def _setup_backlog_handling(self):
self._heartbeat = loopingcall.FixedIntervalLoopingCall(
self._process_backlogged_routers)
self._heartbeat.start(
interval=cfg.CONF.general.backlog_processing_interval)
def _sync_router_backlog(self):
LOG.info(_LI('Synchronizing router (scheduling) backlog'))
context = n_context.get_admin_context()
query = context.session.query(l3_models.RouterHostingDeviceBinding)
query = query.options(joinedload('router'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
expr.null())
for binding in query:
router = self._make_router_dict(binding.router,
process_extensions=False)
self._backlogged_routers[binding.router_id] = router
self._refresh_router_backlog = False
def _get_router_binding_info(self, context, id, load_hd_info=True):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_hd_info:
query = query.options(joinedload('hosting_device'))
query = query.filter(l3_models.RouterHostingDeviceBinding.router_id ==
id)
try:
return query.one()
except exc.NoResultFound:
# This should not happen
LOG.error(_LE('DB inconsistency: No type and hosting info '
'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
except exc.MultipleResultsFound:
# This should not happen either
LOG.error(_LE('DB inconsistency: Multiple type and hosting info '
'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
def _get_hosting_device_bindings(self, context, id, load_routers=False,
load_hosting_device=False):
query = context.session.query(l3_models.RouterHostingDeviceBinding)
if load_routers:
query = query.options(joinedload('router'))
if load_hosting_device:
query = query.options(joinedload('hosting_device'))
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id == id)
return query.all()
def _add_type_and_hosting_device_info(self, context, router,
binding_info=None, schedule=True):
"""Adds type and hosting device information to a router."""
try:
if binding_info is None:
binding_info = self._get_router_binding_info(context,
router['id'])
except RouterBindingInfoError:
LOG.error(_LE('DB inconsistency: No hosting info associated with '
'router %s'), router['id'])
router['hosting_device'] = None
return
router['router_type'] = {
'id': None,
'name': 'CSR1kv_router',
'cfg_agent_driver': (cfg.CONF.hosting_devices
.csr1kv_cfgagent_router_driver)}
if binding_info.hosting_device is None and schedule:
# This router has not been scheduled to a hosting device
# so we try to do it now.
self.schedule_router_on_hosting_device(context, binding_info)
context.session.expire(binding_info)
if binding_info.hosting_device is None:
router['hosting_device'] = None
else:
router['hosting_device'] = self.get_device_info_for_agent(
binding_info.hosting_device)
def _add_hosting_port_info(self, context, router, plugging_driver):
"""Adds hosting port information to router ports.
We only populate hosting port info, i.e., reach here, if the
router has been scheduled to a hosting device. Hence this
a good place to allocate hosting ports to the router ports.
"""
# cache of hosting port information: {mac_addr: {'name': port_name}}
hosting_pdata = {}
if router['external_gateway_info'] is not None:
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], router['gw_port'],
router['hosting_device'], hosting_pdata, plugging_driver)
for itfc in router.get(l3_constants.INTERFACE_KEY, []):
h_info, did_allocation = self._populate_hosting_info_for_port(
context, router['id'], itfc, router['hosting_device'],
hosting_pdata, plugging_driver)
def _populate_hosting_info_for_port(self, context, router_id, port,
hosting_device, hosting_pdata,
plugging_driver):
port_db = self._core_plugin._get_port(context, port['id'])
h_info = port_db.hosting_info
new_allocation = False
if h_info is None:
# The port does not yet have a hosting port so allocate one now
h_info = self._allocate_hosting_port(
context, router_id, port_db, hosting_device['id'],
plugging_driver)
if h_info is None:
# This should not happen but just in case ...
port['hosting_info'] = None
return None, new_allocation
else:
new_allocation = True
if hosting_pdata.get('mac') is None:
p_data = self._core_plugin.get_port(
context, h_info.hosting_port_id, ['mac_address', 'name'])
hosting_pdata['mac'] = p_data['mac_address']
hosting_pdata['name'] = p_data['name']
# Including MAC address of hosting port so L3CfgAgent can easily
# determine which VM VIF to configure VLAN sub-interface on.
port['hosting_info'] = {'hosting_port_id': h_info.hosting_port_id,
'hosting_mac': hosting_pdata.get('mac'),
'hosting_port_name': hosting_pdata.get('name')}
plugging_driver.extend_hosting_port_info(
context, port_db, port['hosting_info'])
return h_info, new_allocation
def _allocate_hosting_port(self, context, router_id, port_db,
hosting_device_id, plugging_driver):
net_data = self._core_plugin.get_network(
context, port_db['network_id'], [pr_net.NETWORK_TYPE])
network_type = net_data.get(pr_net.NETWORK_TYPE)
alloc = plugging_driver.allocate_hosting_port(
context, router_id, port_db, network_type, hosting_device_id)
if alloc is None:
LOG.error(_LE('Failed to allocate hosting port for port %s'),
port_db['id'])
return
with context.session.begin(subtransactions=True):
h_info = l3_models.HostedHostingPortBinding(
logical_resource_id=router_id,
logical_port_id=port_db['id'],
network_type=network_type,
hosting_port_id=alloc['allocated_port_id'],
segmentation_id=alloc['allocated_vlan'])
context.session.add(h_info)
context.session.expire(port_db)
# allocation succeeded so establish connectivity for logical port
context.session.expire(h_info)
plugging_driver.setup_logical_port_connectivity(context, port_db)
return h_info
def _get_router_port_db_on_subnet(self, context, router_id, subnet):
try:
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet['id']:
return p
except exc.NoResultFound:
return
def list_active_sync_routers_on_hosting_devices(self, context, host,
router_ids=None,
hosting_device_ids=None):
agent = self._get_agent_by_type_and_host(
context, c_const.AGENT_TYPE_CFG, host)
if not agent.admin_state_up:
return []
query = context.session.query(
l3_models.RouterHostingDeviceBinding.router_id)
query = query.join(l3_models.HostingDevice)
query = query.filter(l3_models.HostingDevice.cfg_agent_id == agent.id)
if router_ids:
if len(router_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id ==
router_ids[0])
else:
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_id.in_(
router_ids))
if hosting_device_ids:
if len(hosting_device_ids) == 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_ids[0])
elif len(hosting_device_ids) > 1:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id.in_(
hosting_device_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data_ext(context, router_ids=router_ids,
active=True)
else:
return []
def get_active_routers_for_host(self, context, host):
query = context.session.query(
l3_models.RouterHostingDeviceBinding.router_id)
query = query.join(
models_v2.Port,
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
models_v2.Port.device_id)
query = query.join(p_binding.PortBindingPort)
query = query.filter(p_binding.PortBindingPort.host == host)
query = query.filter(models_v2.Port.name == 'mgmt')
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data_ext(context, router_ids=router_ids,
active=True)
else:
return []
@staticmethod
def _agent_state_filter(check_active, last_heartbeat):
"""Filters only active agents, if requested."""
if not check_active:
return True
return not agents_db.AgentDbMixin.is_agent_down(last_heartbeat)
def get_host_for_router(self, context, router, admin_state_up=None,
check_active=False):
query = context.session.query(agents_db.Agent.host,
agents_db.Agent.heartbeat_timestamp)
query = query.join(
p_binding.PortBindingPort,
p_binding.PortBindingPort.host == agents_db.Agent.host)
query = query.join(
models_v2.Port,
models_v2.Port.id == p_binding.PortBindingPort.port_id)
query = query.join(
l3_models.RouterHostingDeviceBinding,
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
models_v2.Port.device_id)
query = query.filter(
agents_db.Agent.topic == topics.L3_AGENT,
l3_models.RouterHostingDeviceBinding.router_id == router)
if admin_state_up is not None:
query = query.filter(
agents_db.Agent.admin_state_up == admin_state_up)
entry = query.first()
if entry and L3RouterApplianceDBMixin._agent_state_filter(check_active,
entry[1]):
return entry[0]
return ""
| {
"content_hash": "96e4d270887b87f05874e2a52bb4483c",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 79,
"avg_line_length": 47.608130081300814,
"alnum_prop": 0.5717066839714471,
"repo_name": "leeseulstack/openstack",
"id": "7d0c619ce6e729b83e9f70d019622848f9be2dc5",
"size": "29913",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/plugins/cisco/db/l3/l3_router_appliance_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "8816599"
},
{
"name": "Shell",
"bytes": "11768"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""Library for creating transition objects.
"""
import toolz as tz
import utils
import exceptions
def update_fn(trans, config):
"""Consume an update declaration of the form
{
'episode_start': <name of update rule>,
'episode_end': <name of update rule>
}
and return a function that updates the episode start and end times.
An update rule is one of
'advance': set to the data time
'copy': set to the value in the current state
'initialize': set to the initial value (e.g., None)
"""
update_dec = trans['update']
time_key = config['time_key']
state_initial = config['state_initial']
def _update(data, state):
def _rulemap(rule, key):
if rule == 'advance':
if key in ('episode_start', 'episode_end'):
return data[time_key]
elif key == 'episode_status_max':
return trans['final']
elif rule == 'copy':
return state[key]
elif rule == 'initialize':
return state_initial[key]
else:
raise exceptions.FSMError(
"rule ('{}') not recognized".format(rule))
return {k: _rulemap(r, k) for k, r in update_dec.iteritems()}
return _update
def activate(trans, config):
"""Return an active transition object given a transition declaration and
configuration data.
"""
time_key = config['time_key']
params = config['params']
def _triggered(data, state):
return trans['trigger'](data, state, **params)
def _update(data, state):
upd = tz.merge(
{
time_key: data[time_key],
'status': trans['final']},
update_fn(trans, config)(data, state))
return tz.merge(state, upd)
return utils.attrize(
name='transition',
initial=trans['initial'],
final=trans['final'],
triggered=_triggered,
update=_update)
| {
"content_hash": "58c9782cd243d77d0223a319ad99c696",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 27.575342465753426,
"alnum_prop": 0.5603576751117735,
"repo_name": "epfahl/auta",
"id": "323dd55972507ec7ac9fcdbebafab854edf84ad5",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auta/transition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11860"
}
],
"symlink_target": ""
} |
"""Tests for kernel connection utilities
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
import os
import nose.tools as nt
from IPython.config import Config
from IPython.consoleapp import IPythonConsoleApp
from IPython.core.application import BaseIPythonApplication
from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from IPython.utils.py3compat import str_to_bytes
from IPython.kernel import connect
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DummyConsoleApp(BaseIPythonApplication, IPythonConsoleApp):
def initialize(self, argv=[]):
BaseIPythonApplication.initialize(self, argv=argv)
self.init_connection_file()
sample_info = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5',
)
def test_write_connection_file():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
nt.assert_true(os.path.exists(cf))
with open(cf, 'r') as f:
info = json.load(f)
info['key'] = str_to_bytes(info['key'])
nt.assert_equal(info, sample_info)
def test_app_load_connection_file():
"""test `ipython console --existing` loads a connection file"""
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app = DummyConsoleApp(connection_file=cf)
app.initialize(argv=[])
for attr, expected in sample_info.items():
if attr in ('key', 'signature_scheme'):
continue
value = getattr(app, attr)
nt.assert_equal(value, expected, "app.%s = %s != %s" % (attr, value, expected))
def test_get_connection_file():
cfg = Config()
with TemporaryWorkingDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize(argv=[])
profile_cf = os.path.join(app.profile_dir.location, 'security', cf)
nt.assert_equal(profile_cf, app.connection_file)
with open(profile_cf, 'w') as f:
f.write("{}")
nt.assert_true(os.path.exists(profile_cf))
nt.assert_equal(connect.get_connection_file(app), profile_cf)
app.connection_file = cf
nt.assert_equal(connect.get_connection_file(app), profile_cf)
def test_find_connection_file():
cfg = Config()
with TemporaryDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize(argv=[])
BaseIPythonApplication._instance = app
profile_cf = os.path.join(app.profile_dir.location, 'security', cf)
with open(profile_cf, 'w') as f:
f.write("{}")
for query in (
'kernel.json',
'kern*',
'*ernel*',
'k*',
):
nt.assert_equal(connect.find_connection_file(query), profile_cf)
BaseIPythonApplication._instance = None
def test_get_connection_info():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
json_info = connect.get_connection_info(cf)
info = connect.get_connection_info(cf, unpack=True)
nt.assert_equal(type(json_info), type(""))
nt.assert_equal(info, sample_info)
info2 = json.loads(json_info)
info2['key'] = str_to_bytes(info2['key'])
nt.assert_equal(info2, sample_info)
| {
"content_hash": "f5b0eb53d96a66c49b4c4d3e71ec62d6",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 87,
"avg_line_length": 35.145161290322584,
"alnum_prop": 0.5658558972005507,
"repo_name": "omni5cience/django-inlineformfield",
"id": "847bc7f27c5c754273b83f6847aee6cb278db865",
"size": "4358",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/kernel/tests/test_connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
} |
from toee import *
import char_editor
def CheckPrereq(attachee, classLevelled, abilityScoreRaised):
#Req 1, turn undead feat
if not (char_editor.has_feat(feat_turn_undead) or char_editor.has_feat(feat_rebuke_undead)):
return 0
#Req2, shield proficency
if not char_editor.has_feat(feat_shield_proficiency):
return 0
return 1
| {
"content_hash": "46ca2704153668457b73dda182ee1487",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 93,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7485380116959064,
"repo_name": "GrognardsFromHell/TemplePlus",
"id": "2b94ff212dad8ca374aa115d0ef927a09d6acafc",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpdatasrc/tpgamefiles/scr/feats/feat - Divine Shield.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "629718"
},
{
"name": "C#",
"bytes": "167885"
},
{
"name": "C++",
"bytes": "10018792"
},
{
"name": "CMake",
"bytes": "91980"
},
{
"name": "CSS",
"bytes": "1292"
},
{
"name": "HLSL",
"bytes": "18884"
},
{
"name": "HTML",
"bytes": "433942"
},
{
"name": "PowerShell",
"bytes": "5374"
},
{
"name": "Python",
"bytes": "2850350"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='flask-canvas',
version='0.1',
keywords='python,facebook,canvas,oauth2',
url='https://github.com/demianbrecht/flask-canvas',
license='MIT',
author='Demian Brecht',
author_email='[email protected]',
description='A Flask extension for Facebook canvas-based apps',
py_modules=['flask_canvas'],
include_package_data=True,
platforms='any',
install_requires=['Flask'],
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
])
| {
"content_hash": "3bfd45d4b273cc3ea1537420984168e1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 34.166666666666664,
"alnum_prop": 0.6390243902439025,
"repo_name": "demianbrecht/flask-canvas",
"id": "47db0358311de4eeb1bdad8298916bb3c5939797",
"size": "820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14730"
},
{
"name": "Shell",
"bytes": "5117"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import argparse
import os
import stat
import StringIO
import sys
import urlparse
import zipfile
from distutils import version as dist_version
import yaml
import requests
USER_AGENT = 'appengine.py'
VERSION_URL = 'https://appengine.google.com/api/updatecheck'
OLD_VERSION_URL = 'http://googleappengine.googlecode.com/files/google_appengine_%s.zip'
NEW_DEPRECATED_URL = 'http://storage.googleapis.com/appengine-sdks/deprecated/%s/google_appengine_%%s.zip'
CURRENT_VERSION_URL = 'https://storage.googleapis.com/appengine-sdks/featured/google_appengine_%s.zip'
LAST_OLD_VERSION = dist_version.StrictVersion('1.8.9')
sdk_version_key = 'APPENGINEPY_SDK_VERSION'
def _extract_zip(archive, dest=None, members=None):
"""Extract the ZipInfo object to a real file on the path targetpath."""
# Python 2.5 compatibility.
dest = dest or os.getcwd()
members = members or archive.infolist()
for member in members:
if isinstance(member, basestring):
member = archive.getinfo(member)
_extract_zip_member(archive, member, dest)
def _extract_zip_member(archive, member, dest):
# Python 2.5 compatibility.
target = member.filename
if target[:1] == '/':
target = target[1:]
target = os.path.join(dest, target)
# It's a directory.
if target[-1:] == '/':
parent = target[:-1]
target = ''
else:
target = os.path.normpath(target)
parent = os.path.dirname(target)
if not os.path.exists(parent):
os.makedirs(parent)
if target:
with open(target, 'w') as fh:
fh.write(archive.read(member.filename))
def make_parser():
"""Returns a new option parser."""
p = argparse.ArgumentParser()
p.add_argument(
'sdk',
nargs='?',
default=None
)
p.add_argument(
'-p', '--prefix',
metavar='DIR',
help='Install SDK in DIR'
)
p.add_argument(
'-b', '--bindir',
metavar='DIR',
help='Install tools in DIR'
)
p.add_argument(
'-f', '--force',
action='store_true',
help='over-write existing installation',
default=False
)
p.add_argument(
'-n', '--no-bindir',
action='store_true',
default=False,
help='Do not install tools in DIR'
)
return p
def parse_args(argv):
"""Returns a tuple of (opts, args) for arguments."""
parser = make_parser()
args = parser.parse_args(argv[1:])
sdk = args.sdk
# Use APPENGINEPY_SDK_VERSION if set.
if not sdk and (sdk_version_key in os.environ):
sdk = (os.environ[sdk_version_key],)
return args, sdk
def check_version(url=VERSION_URL):
"""Returns the version string for the latest SDK."""
response = requests.get(url)
update_dict = yaml.load(response.text)
release_version = update_dict['release']
return release_version
def parse_sdk_name(name, current_version):
"""Returns a filename or URL for the SDK name.
The name can be a version string, a remote URL or a local path.
"""
# Version like x.y.z, return as-is.
try:
version = dist_version.StrictVersion(name)
if version == current_version:
# get from current.
url = CURRENT_VERSION_URL
elif version > LAST_OLD_VERSION:
# newer SDK, not on code.google.com
url = NEW_DEPRECATED_URL % ''.join(name.split('.'))
else:
# old SDK in code.google.com
url = OLD_VERSION_URL
return url % name
except ValueError:
# this means we couldn't parse as x.y.z
pass
# A network location.
url = urlparse.urlparse(name)
if url.scheme:
return name
# Else must be a filename.
return os.path.abspath(name)
def open_sdk(url):
"""Open the SDK from the URL, which can be either a network location or
a filename path. Returns a file-like object open for reading.
"""
if urlparse.urlparse(url).scheme:
return _download(url)
else:
return open(url)
def _download(url):
"""Downloads an URL and returns a file-like object open for reading,
compatible with zipping.ZipFile (it has a seek() method).
"""
file_download = requests.get(url)
return StringIO.StringIO(file_download.content)
def install_sdk(filename, dest='.', overwrite=False):
archive = zipfile.ZipFile(filename)
_extract_zip(archive, dest=dest)
return dest
def install_tools(src, dest, overwrite=False):
tools = [name for name in os.listdir(src) if name.endswith('.py')]
all_x = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
for name in tools:
src_name = os.path.join(src, name)
new_mode = os.stat(src_name).st_mode | all_x
os.chmod(src_name, new_mode)
dest_name = os.path.join(dest, name)
if overwrite:
try:
os.unlink(dest_name)
except OSError:
pass
os.symlink(src_name, dest_name)
return tools
def main(argv):
args, sdk = parse_args(argv)
current_version = check_version()
version = sdk or current_version
sdk_url = parse_sdk_name(version, current_version)
archive = open_sdk(sdk_url)
install_path = install_sdk(archive, dest=sys.prefix, overwrite=args.force)
src = os.path.join(install_path, 'google_appengine')
dest = args.prefix or os.path.join(sys.prefix, 'bin')
install_tools(src, dest, overwrite=args.force)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"content_hash": "1e34da359dafb226e5745f9ac11810b5",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 106,
"avg_line_length": 26.597156398104264,
"alnum_prop": 0.6218816821097648,
"repo_name": "optimizely/appengine.py",
"id": "9f8c096411582ac38bf37c941cd3d8b3ceb21734",
"size": "5634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9193"
}
],
"symlink_target": ""
} |
import lxml.etree
from urllib import quote
from copy import deepcopy
from pkg_resources import resource_stream # pylint: disable-msg=E0611
import sys
from swift3.exception import S3Exception
from swift3.utils import LOGGER, camel_to_snake, utf8encode, utf8decode
XMLNS_S3 = 'http://s3.amazonaws.com/doc/2006-03-01/'
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
class XMLSyntaxError(S3Exception):
pass
class DocumentInvalid(S3Exception):
pass
def cleanup_namespaces(elem):
def remove_ns(tag, ns):
if tag.startswith('{%s}' % ns):
tag = tag[len('{%s}' % ns):]
return tag
if not isinstance(elem.tag, basestring):
# elem is a comment element.
return
# remove s3 namespace
elem.tag = remove_ns(elem.tag, XMLNS_S3)
# remove default namespace
if elem.nsmap and None in elem.nsmap:
elem.tag = remove_ns(elem.tag, elem.nsmap[None])
for e in elem.iterchildren():
cleanup_namespaces(e)
def fromstring(text, root_tag=None):
try:
elem = lxml.etree.fromstring(text, parser)
except lxml.etree.XMLSyntaxError as e:
LOGGER.debug(e)
raise XMLSyntaxError(e)
cleanup_namespaces(elem)
if root_tag is not None:
# validate XML
try:
path = 'schema/%s.rng' % camel_to_snake(root_tag)
with resource_stream(__name__, path) as rng:
lxml.etree.RelaxNG(file=rng).assertValid(elem)
except IOError as e:
# Probably, the schema file doesn't exist.
exc_type, exc_value, exc_traceback = sys.exc_info()
LOGGER.error(e)
raise exc_type, exc_value, exc_traceback
except lxml.etree.DocumentInvalid as e:
LOGGER.debug(e)
raise DocumentInvalid(e)
return elem
def tostring(tree, encoding_type=None, use_s3ns=True):
if use_s3ns:
nsmap = tree.nsmap.copy()
nsmap[None] = XMLNS_S3
root = Element(tree.tag, attrib=tree.attrib, nsmap=nsmap)
root.text = tree.text
root.extend(deepcopy(tree.getchildren()))
tree = root
if encoding_type == 'url':
tree = deepcopy(tree)
for e in tree.iter():
# Some elements are not url-encoded even when we specify
# encoding_type=url.
blacklist = ['LastModified', 'ID', 'DisplayName', 'Initiated']
if e.tag not in blacklist:
if isinstance(e.text, basestring):
e.text = quote(e.text)
return lxml.etree.tostring(tree, xml_declaration=True, encoding='UTF-8')
class _Element(lxml.etree.ElementBase):
"""
Wrapper Element class of lxml.etree.Element to support
a utf-8 encoded non-ascii string as a text.
Why we need this?:
Original lxml.etree.Element supports only unicode for the text.
It declines maintainability because we have to call a lot of encode/decode
methods to apply account/container/object name (i.e. PATH_INFO) to each
Element instance. When using this class, we can remove such a redundant
codes from swift3 middleware.
"""
def __init__(self, *args, **kwargs):
# pylint: disable-msg=E1002
super(_Element, self).__init__(*args, **kwargs)
@property
def text(self):
"""
utf-8 wrapper property of lxml.etree.Element.text
"""
return utf8encode(lxml.etree.ElementBase.text.__get__(self))
@text.setter
def text(self, value):
lxml.etree.ElementBase.text.__set__(self, utf8decode(value))
parser_lookup = lxml.etree.ElementDefaultClassLookup(element=_Element)
parser = lxml.etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
Element = parser.makeelement
SubElement = lxml.etree.SubElement
| {
"content_hash": "3cbe531d0da104df05ad03d556b72f80",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 78,
"avg_line_length": 30.055555555555557,
"alnum_prop": 0.6398204383416952,
"repo_name": "swiftstack/swift3-stackforge",
"id": "98d43c7cfa4c67d9a664f4be674a824427b22343",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift3/etree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "731693"
},
{
"name": "Shell",
"bytes": "5176"
}
],
"symlink_target": ""
} |
import json
import logging
import redis
from django.shortcuts import render
from django.core.paginator import Paginator
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from account.decorators import login_required, super_admin_required
from account.models import SUPER_ADMIN, User
from problem.models import Problem
from contest.models import ContestProblem, Contest
from contest.decorators import check_user_contest_permission
from utils.shortcuts import serializer_invalid_response, error_response, success_response, error_page, paginate
from utils.throttling import TokenBucket, BucketController
from judge.result import result as judge_result
from .tasks import _judge
from .models import Submission
from .serializers import (CreateSubmissionSerializer, SubmissionSerializer,
SubmissionhareSerializer, SubmissionRejudgeSerializer,
CreateContestSubmissionSerializer, OpenAPICreateSubmissionSerializer,
OpenAPISubmissionSerializer)
logger = logging.getLogger("app_info")
def _submit_code(user, problem_id, language, code):
controller = BucketController(user_id=user.id,
redis_conn=redis.Redis(host=settings.REDIS_CACHE["host"],
port=settings.REDIS_CACHE["port"],
db=settings.REDIS_CACHE["db"]),
default_capacity=settings.TOKEN_BUCKET_DEFAULT_CAPACITY)
bucket = TokenBucket(fill_rate=settings.TOKEN_BUCKET_FILL_RATE,
capacity=settings.TOKEN_BUCKET_DEFAULT_CAPACITY,
last_capacity=controller.last_capacity,
last_timestamp=controller.last_timestamp)
if bucket.consume():
controller.last_capacity -= 1
else:
return error_response(u"您提交的频率过快, 请等待%d秒" % int(bucket.expected_time() + 1))
try:
problem = Problem.objects.get(id=problem_id)
except Problem.DoesNotExist:
return error_response(u"题目不存在")
submission = Submission.objects.create(user_id=user.id,
language=language,
code=code,
problem_id=problem.id)
try:
_judge.delay(submission.id, problem.time_limit, problem.memory_limit, problem.test_case_id,
problem.spj, problem.spj_language, problem.spj_code, problem.spj_version)
except Exception as e:
logger.error(e)
return error_response(u"提交判题任务失败")
return success_response({"submission_id": submission.id})
class OpenAPISubmitCodeAPI(APIView):
def post(self, request):
"""
openapi 创建提交
"""
serializer = OpenAPICreateSubmissionSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
try:
user = User.objects.get(openapi_appkey=data["appkey"])
except User.DoesNotExist:
return error_response(u"appkey无效")
return _submit_code(user, data["problem_id"], data["language"], data["code"])
else:
return serializer_invalid_response(serializer)
def get(self, request):
"""
openapi 获取提交详情
"""
submission_id = request.GET.get("submission_id", None)
appkey = request.GET.get("appkey", None)
if not (submission_id and appkey):
return error_response(u"参数错误")
try:
user = User.objects.get(openapi_appkey=appkey)
except User.DoesNotExist:
return error_response(u"appkey无效")
try:
submission = Submission.objects.get(id=submission_id, user_id=user.id)
return success_response(OpenAPISubmissionSerializer(submission).data)
except Submission.DoesNotExist:
return error_response(u"提交不存在")
class SubmissionAPIView(APIView):
@login_required
def post(self, request):
"""
提交代码
---
request_serializer: CreateSubmissionSerializer
"""
serializer = CreateSubmissionSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
return _submit_code(request.user, data["problem_id"], data["language"], data["code"])
else:
return serializer_invalid_response(serializer)
@login_required
def get(self, request):
submission_id = request.GET.get("submission_id", None)
if not submission_id:
return error_response(u"参数错误")
try:
submission = Submission.objects.get(id=submission_id, user_id=request.user.id)
except Submission.DoesNotExist:
return error_response(u"提交不存在")
response_data = {"result": submission.result}
if submission.result == 0:
response_data["accepted_answer_time"] = submission.accepted_answer_time
return success_response(response_data)
class ContestSubmissionAPIView(APIView):
@check_user_contest_permission
def post(self, request):
"""
创建比赛的提交
---
request_serializer: CreateContestSubmissionSerializer
"""
serializer = CreateContestSubmissionSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
contest = Contest.objects.get(id=data["contest_id"])
try:
problem = ContestProblem.objects.get(contest=contest, id=data["problem_id"])
except ContestProblem.DoesNotExist:
return error_response(u"题目不存在")
submission = Submission.objects.create(user_id=request.user.id,
language=int(data["language"]),
contest_id=contest.id,
code=data["code"],
problem_id=problem.id)
try:
_judge.delay(submission.id, problem.time_limit, problem.memory_limit, problem.test_case_id,
problem.spj, problem.spj_language, problem.spj_code, problem.spj_version)
except Exception as e:
logger.error(e)
return error_response(u"提交判题任务失败")
return success_response({"submission_id": submission.id})
else:
return serializer_invalid_response(serializer)
@login_required
def problem_my_submissions_list_page(request, problem_id):
"""
我单个题目所有提交的列表页
"""
try:
problem = Problem.objects.get(id=problem_id, visible=True)
except Problem.DoesNotExist:
return error_page(request, u"问题不存在")
submissions = Submission.objects.filter(user_id=request.user.id, problem_id=problem.id, contest_id__isnull=True). \
order_by("-create_time"). \
values("id", "result", "create_time", "accepted_answer_time", "language")
return render(request, "oj/submission/problem_my_submissions_list.html",
{"submissions": submissions, "problem": problem})
def _get_submission(submission_id, user):
"""
判断用户权限 看能否获取这个提交详情页面
"""
submission = Submission.objects.get(id=submission_id)
# 超级管理员或者提交者自己或者是一个分享的提交
if user.admin_type == SUPER_ADMIN or submission.user_id == user.id:
return {"submission": submission, "can_share": True}
if submission.contest_id:
contest = Contest.objects.get(id=submission.contest_id)
# 比赛提交的话,比赛创建者也可见
if contest.created_by == user:
return {"submission": submission, "can_share": True}
if submission.shared:
return {"submission": submission, "can_share": False}
else:
raise Submission.DoesNotExist
@login_required
def my_submission(request, submission_id):
"""
单个题目的提交详情页
"""
try:
result = _get_submission(submission_id, request.user)
submission = result["submission"]
except Submission.DoesNotExist:
return error_page(request, u"提交不存在")
try:
if submission.contest_id:
problem = ContestProblem.objects.get(id=submission.problem_id, visible=True)
else:
problem = Problem.objects.get(id=submission.problem_id, visible=True)
except Exception:
return error_page(request, u"提交不存在")
if submission.result in [judge_result["compile_error"], judge_result["system_error"], judge_result["waiting"]]:
info = submission.info
else:
info = json.loads(submission.info)
if "test_case" in info[0]:
info = sorted(info, key=lambda x: x["test_case"])
user = User.objects.get(id=submission.user_id)
return render(request, "oj/submission/my_submission.html",
{"submission": submission, "problem": problem, "info": info,
"user": user, "can_share": result["can_share"], "website_base_url": settings.WEBSITE_INFO["url"]})
class SubmissionAdminAPIView(APIView):
@super_admin_required
def get(self, request):
problem_id = request.GET.get("problem_id", None)
if not problem_id:
return error_response(u"参数错误")
submissions = Submission.objects.filter(problem_id=problem_id, contest_id__isnull=True).order_by("-create_time")
return paginate(request, submissions, SubmissionSerializer)
@login_required
def my_submission_list_page(request, page=1):
"""
我的所有提交的列表页
"""
# 是否显示所有人的提交
show_all = settings.SHOW_ALL_SUBMISSIONS_LIST or request.GET.get("show_all", False) == "true"
if show_all:
submissions = Submission.objects.filter(contest_id__isnull=True)
else:
submissions = Submission.objects.filter(user_id=request.user.id, contest_id__isnull=True)
submissions = submissions.values("id", "user_id", "problem_id", "result", "create_time", "accepted_answer_time",
"language").order_by("-create_time")
language = request.GET.get("language", None)
filter = None
if language:
submissions = submissions.filter(language=int(language))
filter = {"name": "language", "content": language}
result = request.GET.get("result", None)
if result:
submissions = submissions.filter(result=int(result))
filter = {"name": "result", "content": result}
paginator = Paginator(submissions, 20)
try:
submissions = paginator.page(int(page))
except Exception:
return error_page(request, u"不存在的页码")
# 因为提交页面经常会有重复的题目和用户,缓存一下查询结果
cache_result = {"problem": {}, "user": {}}
for item in submissions:
problem_id = item["problem_id"]
if problem_id not in cache_result["problem"]:
problem = Problem.objects.get(id=problem_id)
cache_result["problem"][problem_id] = problem.title
item["title"] = cache_result["problem"][problem_id]
if show_all:
user_id = item["user_id"]
if user_id not in cache_result["user"]:
user = User.objects.get(id=user_id)
cache_result["user"][user_id] = user
item["user"] = cache_result["user"][user_id]
if item["user_id"] == request.user.id or request.user.admin_type == SUPER_ADMIN:
item["show_link"] = True
else:
item["show_link"] = False
previous_page = next_page = None
try:
previous_page = submissions.previous_page_number()
except Exception:
pass
try:
next_page = submissions.next_page_number()
except Exception:
pass
return render(request, "oj/submission/my_submissions_list.html",
{"submissions": submissions, "page": int(page),
"previous_page": previous_page, "next_page": next_page, "start_id": int(page) * 20 - 20,
"filter": filter, "show_all": show_all})
class SubmissionShareAPIView(APIView):
def post(self, request):
serializer = SubmissionhareSerializer(data=request.data)
if serializer.is_valid():
submission_id = serializer.data["submission_id"]
try:
result = _get_submission(submission_id, request.user)
except Submission.DoesNotExist:
return error_response(u"提交不存在")
if not result["can_share"]:
return error_page(request, u"提交不存在")
submission = result["submission"]
submission.shared = not submission.shared
submission.save()
return success_response(submission.shared)
else:
return serializer_invalid_response(serializer)
class SubmissionRejudgeAdminAPIView(APIView):
@super_admin_required
def post(self, request):
serializer = SubmissionRejudgeSerializer(data=request.data)
if serializer.is_valid():
submission_id = serializer.data["submission_id"]
# 目前只考虑前台公开题目的重新判题
try:
submission = Submission.objects.get(id=submission_id, contest_id__isnull=True)
except Submission.DoesNotExist:
return error_response(u"提交不存在")
try:
problem = Problem.objects.get(id=submission.problem_id)
except Problem.DoesNotExist:
return error_response(u"题目不存在")
try:
_judge.delay(submission.id, problem.time_limit, problem.memory_limit, problem.test_case_id,
problem.spj, problem.spj_language, problem.spj_code, problem.spj_version)
except Exception as e:
logger.error(e)
return error_response(u"提交判题任务失败")
return success_response(u"任务提交成功")
else:
return serializer_invalid_response(serializer)
| {
"content_hash": "f3aff345aaae173870d2386faa752006",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 120,
"avg_line_length": 40.12893982808023,
"alnum_prop": 0.6115672973937879,
"repo_name": "mcmdhr/CSOJ",
"id": "9281582ef5dfceea36e9569e8ce05a37d717aede",
"size": "14877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submission/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48167"
},
{
"name": "HTML",
"bytes": "173998"
},
{
"name": "JavaScript",
"bytes": "130711"
},
{
"name": "Python",
"bytes": "247487"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
} |
"""
Created on Mon Jul 11 13:15:19 2016
@author: sweel_Rafelski
"""
import sys
import os
import os.path as op
import shutil as sh
import fnmatch as fn
import traceback
import errno
from wrappers import FalseException, UsageError
# pylint: disable=C0103
def mkdir_exist(path):
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def movefilesup(path):
"""
Move files from `Pos0` folder up to top level of `path` folder
"""
count = 0
for root, dirs, _ in os.walk(path, topdown=True):
hasPos0 = [(root, d) for d in dirs if fn.fnmatch(d, "Pos0")]
if hasPos0: # list is not empty
for r, _, files in os.walk(op.join(*hasPos0[0])):
for f in files:
src = op.join(r, f)
dst = op.join(hasPos0[0][0], f)
count += 1
print "Moving {} -->\n{}".format(src, dst)
sh.move(src, dst)
return count
def split_pad(folder):
"""
repad the FOLDER names middle index to 00[0-9] format
"""
for dirs in os.listdir(folder):
olddir = op.join(folder, dirs)
oldstr = op.basename(olddir)
oldstrL = oldstr.split("_")
newstr = "_".join((oldstrL[0], oldstrL[1].zfill(3)))
newdir = olddir.replace(oldstr, newstr)
print "renaming {} -->\n{}".format(olddir, newdir)
try:
os.rename(olddir, newdir)
except WindowsError:
pass
def repad(pth):
"""
Helper function for split_pad
"""
try:
split_pad(pth)
except IndexError:
try: # folder doesnt have "_", try subfolder
for dirs in os.listdir(os.getcwd()):
print "Now in {}".format(dirs)
split_pad(op.join(pth, dirs))
except IndexError:
traceback.print_stack(limit=4)
raise UsageError("Check folder paths")
def switch_labels(pth):
for root, dirs, files in os.walk(pth):
for f in files:
if 'GFP' in f:
try:
old_g = op.join(root, f)
old_r = old_g.replace('GFP', 'RFP')
if op.isfile(old_r):
print "Switchin labels for {}\n".format(old_g)
# save the gfp label to some long number
os.rename(old_g, op.join(root, "_27092450646347351L"))
# rename the rfp label to GFP
os.rename(old_r, old_r.replace('RFP', 'GFP'))
# rename the long number label to RFP
os.rename(op.join(root, "_27092450646347351L"), old_r)
else:
traceback.print_stack(limit=2)
raise FalseException
except FalseException:
raise UsageError("\nRelabeled file does not exist")
def main():
try:
try:
# change this path to where the preprocessed
# raw tif image stacks are
os.chdir(op.expanduser(os.sep.join(
('~', 'Desktop', 'New folder'))))
except WindowsError:
traceback.print_stack(limit=1)
raise UsageError("Couldn't find folder, check path")
path = os.getcwd()
print "Changed to {}".format(path)
cmove = movefilesup(path)
print "Moved {} files!".format(cmove)
repad(path)
while True:
try:
invar = raw_input("Do you wish to switch the labels for GFP/RFP??\ Press Y for yes")
if invar == 'Y':
switch_labels(path)
elif invar == 'Q' or invar == 'N':
print 'Quitting!'
break
except KeyError as e:
print "{} is not a valid input, try again (Y| [Q | N])".format(e)
continue
return 0
except UsageError as e:
print e
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "dbde129374094c201547798d9d33c86d",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 100,
"avg_line_length": 29.392857142857142,
"alnum_prop": 0.5069258809234508,
"repo_name": "moosekaka/sweepython",
"id": "01f28fb0c59d32f006b7d3fb02791fbfb9da1f87",
"size": "4139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pre_mitograph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "701922"
},
{
"name": "Python",
"bytes": "317525"
}
],
"symlink_target": ""
} |
import pandas as pd
# for backward compatability with pandas 0.23 - 0.25
class BackwardCompatibility(pd.api.extensions.ExtensionDtype):
name = "object"
def __init__(self, *args, **kwargs) -> None:
pass # pragma: no cover
if hasattr(pd, "StringDtype"):
StringDtype = pd.StringDtype
else: # pragma: no cover
class StringDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "DatetimeTZDtype"):
DatetimeTZDtype = pd.DatetimeTZDtype
else: # pragma: no cover
class DatetimeTZDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "CategoricalDtype"):
CategoricalDtype = pd.CategoricalDtype
else: # pragma: no cover
class CategoricalDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "PeriodDtype"):
PeriodDtype = pd.PeriodDtype
else: # pragma: no cover
class PeriodDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "SparseDtype"):
SparseDtype = pd.SparseDtype
else: # pragma: no cover
class SparseDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "IntervalDtype"):
IntervalDtype = pd.IntervalDtype
else: # pragma: no cover
class IntervalDtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "Int64Dtype"):
Int64Dtype = pd.Int64Dtype
else: # pragma: no cover
class Int64Dtype(BackwardCompatibility): # type: ignore
pass
if hasattr(pd, "BooleanDtype"):
BooleanDtype = pd.BooleanDtype
else: # pragma: no cover
class BooleanDtype(BackwardCompatibility): # type: ignore
pass
| {
"content_hash": "ee5b8d995484f59a40783b56adda8daf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 66,
"avg_line_length": 24.93846153846154,
"alnum_prop": 0.690314620604565,
"repo_name": "nanne-aben/strictly_typed_pandas",
"id": "bd17cadeff279056b6938e1f433d9c30398b3788",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "strictly_typed_pandas/pandas_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26502"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.utils import html
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.contrib.auth.password_validation import validate_password
from django.forms import ModelForm
from django.conf import settings
from .models import Book, Subscription, Profile
from .forms import AccountForm, AccountEditForm,ProfileEditForm
# Create your views here.
def index(request):
context = {'nbar': 'home',
'heading': 'Amazing Textbook Store',
'mission': 'home of amazingly cheap college textbooks',
'deals': [('black friday deal', 'https://placehold.it/150x80?text=IMAGE', 'Buy 50 mobiles and get a gift card'),
('christmas deal', 'https://placehold.it/150x80?text=No+Image', 'Buy 1 mobile and get 1 free')]
}
return render(request, 'books/index.html', context)
def book_list(request):
books = Book.objects.filter(available=True)
for book in books:
book.discounted_price = "%.2f"%(book.price - book.discount_percent/100*book.price)
context = {
'nbar': 'books',
'pageTitle': 'Books',
#'books': Book.objects.all(),
'books': books
}
return render(request, 'books/list.html', context)
def book_detail(request, id, slug):
book = get_object_or_404(Book, id=id, slug=slug, available=True)
context = {
'nbar': 'books',
'pageTitle': book.title,
'book': book
}
return render(request, 'books/detail.html', context)
def subscribe(request):
errors = []
context = {}
if 'email' in request.GET:
email_id = request.GET.get('email', '')
if not email_id:
errors.append('Please enter a valid email address.')
else:
subs = Subscription.objects.create(email=email_id)
context['pageTitle']= 'Thank you!'
context['panelTitle'] = 'Thank you!'
context['panelBody'] = 'Thank you for subscribing to our mailing list.'
return render(request, 'books/static.html', context)
else:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def deals(request):
context = {
'nbar': 'deals',
'pageTitle': 'Deals',
'panelTitle': 'Deals',
'panelBody': '<strong>Sorry, no deals at this time. Sign up to get deals delivered right to your inbox...</strong>'
}
return render(request, 'books/static.html', context)
def contact(request):
context = {
'nbar': 'contact',
'pageTitle': 'Contact',
'panelTitle': 'Contact',
'panelBody': """
<!-- List group -->
<ul class="list-group">
<li class="list-group-item"><strong>Corporate Office: </strong><br />
<address>111 University Blvd<br>
Grand Junction, CO 81501 <br>
☎: (970) 123-4567<br>
<span class="glyphicon glyphicon-envelope"></span>: [email protected]<br>
</address>
</li>
<li class="list-group-item"><strong>Denver Office: </strong><br />
<address>123 Amazing Street</br>
Denver, CO 81111 <br>
☎: (970) 123-1234<br>
<span class="glyphicon glyphicon-envelope"></span>: [email protected]<br>
</address>
</li>
<li class="list-group-item">Porta ac consectetur ac</li>
<li class="list-group-item">Vestibulum at eros</li>
</ul>
""",
}
return render(request, 'books/static.html', context)
def login(request):
# print('site = ', request.get_host())
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
valid = False
error_message = []
if not username or not password:
error_message = ['You must fill in all of the fields.']
else:
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
# correct password, and the user is marked active
auth.login(request, user)
request.session['user_id'] = user.id
valid = True
else:
error_message = ["User accocount has not been activated."]
else:
error_message = ["Invalid username or password."]
if valid:
return HttpResponseRedirect(reverse('dashboard'))
else:
return render(request,
'books/login.html',
{
'errorMessage': ' '.join(error_message),
'username': username,
'password': password,
})
else:
# No context variables to pass to the template system, hence blank
# dictionary object...
return render(request,
'books/login.html',
{
'pageTitle': 'Login',
})
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('index'))
def signup(request):
valid = False
error_message = []
message_type = 'info'
# path = request.get_full_path()
# print('path = ', path)
if request.user.is_authenticated():
# user already has an account and is authenticated; don't let them register again
error_message = [u'''You are logged in as {username}. If you'd like to register another account,
<a href="{url}">Logout</a> first.
'''.format(username=html.escape(request.user.username), url=settings.LOGOUT_URL)]
valid = False
# If it's a HTTP POST, we're interested in processing form data.
elif request.method == 'POST':
accForm = AccountForm(data=request.POST)
if accForm.is_valid():
# check for duplicate username
user = auth.models.User.objects.filter(username=accForm.cleaned_data['username'])
if user:
url = '/recover/' # not implemented
error_message = [u'''Account with email {username} already exists. <a href="{url}">
Forgot your password? </a>
'''.format(username=html.escape(accForm.cleaned_data['username']), url=url)]
valid = False
else:
try:
validate_password(accForm.cleaned_data['password'])
valid = True
except ValidationError as ex:
valid = False
for e in ex: #ex is list of error messages
error_message.append(e)
else:
valid = False
for k in accForm.errors:
error_message.append('<br>'.join(accForm.errors[k]))
if valid:
# Save the user's form data to the built-in user table.
user = accForm.save(commit=False)
user.set_password(accForm.cleaned_data['password']) # set the password using default hashing
user.is_active = True #set it to False if verifcation is required
user.is_superuser = False
user.is_staff = False
user.save()
# save user to profile table as well
profile = Profile(user=user)
profile.save()
# generate_activation_key_and_send_email(site_url, user)
# send_mail(subject, message, from_email, to_list, html_message=html_message, fail_silently=True)
# Update our variable to tell the template registration was successful.
error_message = [u'''The account is created. Follow the link to login...<a
href="{url}">Login</a>.
'''.format(url=reverse('login'))]
return render(request,
'books/message.html',
{
'pageTitle': 'Feedback',
'messageType': 'success',
'message': ' '.join(error_message),
})
else:
return render(request,
'books/signup.html',
{
'pageTitle': 'Account Registration',
'panelTitle': 'Account Registration',
'accountForm': accForm,
'errorMessage': '<br>'.join(error_message),
})
else:
accForm = AccountForm()
return render(request,
'books/signup.html',
{
'pageTitle': 'Account Registration',
'panelTitle': 'Account Registration',
'accountForm': accForm,
})
@login_required
def dashboard(request):
context = {
'pageTitle': 'Dashboard',
'panelTitle': 'Dashboard',
'panelBody': '<strong>TBD... Display account dashboard here...</strong>'
}
return render(request, 'books/static.html', context)
@login_required
def account(request):
errorMessage = []
errorType = 'danger'
if request.method == 'POST':
accForm = AccountEditForm(instance=request.user,
data=request.POST,
)
profileForm = ProfileEditForm(instance=request.user.profile,
data=request.POST,
files=request.FILES)
if accForm.is_valid() and profileForm.is_valid():
accForm.save()
profileForm.save()
errorMessage.append('Account update successful!')
errorType = 'success'
else:
for k in accForm.errors:
errorMessage.append(accForm.errors[k])
else:
accForm = AccountEditForm(instance=request.user)
profileForm = ProfileEditForm(instance=request.user.profile)
return render(request, 'books/account.html',
{
'pageTitle': 'Account Update',
'panelTitle': 'Account Update',
'accountForm': accForm,
'profileForm': profileForm,
'errorMessage': '<br>'.join(errorMessage),
'errorType': errorType
})
def search(request):
context = {}
if 'search' in request.GET:
q = request.GET.get('search', '')
if q:
books = Book.objects.filter(title__icontains=q)
context['pageTitle']= 'Search results'
context['panelTitle'] = '%d matching results'%len(books)
context['books'] = books
return render(request, 'books/search_results.html', context)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) | {
"content_hash": "2e64fb2f8449d80595cbee99d3bf30db",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 128,
"avg_line_length": 40.11764705882353,
"alnum_prop": 0.5288080041400725,
"repo_name": "rambasnet/bookstore",
"id": "efda02eefca2e682ddd9d09d9048113464414ee4",
"size": "11594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33"
},
{
"name": "HTML",
"bytes": "15360"
},
{
"name": "Python",
"bytes": "32237"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'],
[TestAction.destroy_vm, 'vm1'],
[TestAction.recover_vm, 'vm1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'],
[TestAction.start_vm, 'vm2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.resize_volume, 'vm2', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume1', 'size=random', 'cluster=cluster1', 'flag=scsi'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.attach_volume, 'vm2', 'volume2'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.delete_volume_backup, 'volume2-backup2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup3'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm2'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.create_volume, 'volume3', 'size=random', 'cluster=cluster1', 'flag=scsi'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.delete_volume_backup, 'volume1-backup3'],
[TestAction.create_mini_vm, 'vm3', 'cpu=random', 'cluster=cluster2'],
[TestAction.delete_volume, 'volume4'],
[TestAction.expunge_volume, 'volume4'],
[TestAction.add_image, 'image2', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image2', 'iso', 'vm4', 'cluster=cluster2'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup4'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_image_from_volume, 'vm3', 'vm3-image3'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.delete_vm_backup, 'vm1-backup1'],
])
'''
The final status:
Running:['vm2', 'vm3', 'vm4']
Stopped:['vm1']
Enadbled:['volume3-backup4', 'image2', 'vm3-image3']
attached:['volume2', 'volume1', 'volume3']
Detached:[]
Deleted:['volume2-backup2', 'volume1-backup3', 'vm1-backup1']
Expunged:['volume4', 'image1']
Ha:['vm2']
Group:
''' | {
"content_hash": "4de62049153d7f219478dc375af68898",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 104,
"avg_line_length": 41.411764705882355,
"alnum_prop": 0.6896306818181818,
"repo_name": "zstackio/zstack-woodpecker",
"id": "4056a72891c028938272e36354964850cae1050f",
"size": "2816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/multiclusters/paths/multi_path81.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import os
os.system('python runTrainer.py --agent=KerasNAFAgent --env=Motionv0Env --train-for=10000000 --test-for=0 --random-initial-position --gui --save-file=checkpoints/KerasNAF-Motionv0-$(date +%y%m%d%H%M%S).h5')
| {
"content_hash": "56405e0ed0af9e8866570e9ebbc35037",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 206,
"avg_line_length": 73,
"alnum_prop": 0.7351598173515982,
"repo_name": "benelot/bullet-gym",
"id": "bf4dcbcaf9214a3e8bb0aa82c52dc6ff4f419b28",
"size": "237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bullet-gym-primitive/trainKerasNAFMotionExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "718069"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
class Affine():
'''Affine model''' | {
"content_hash": "12a92e41e3b002fbc266c3a904a54bde",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 19,
"alnum_prop": 0.5789473684210527,
"repo_name": "lsbardel/flow",
"id": "f9c6331b1b7eb46754c7f792c617d92d4762ff2e",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flow/stoch/affine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "54225"
},
{
"name": "Perl",
"bytes": "2377"
},
{
"name": "Python",
"bytes": "639731"
},
{
"name": "Shell",
"bytes": "4249"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='DTest',
version='0.5.0',
description="Dependency-based Threaded Test Framework",
author="Kevin L. Mitchell",
author_email="[email protected]",
url="http://github.com/klmitch/dtest",
scripts=['bin/run-dtests'],
packages=['dtest'],
license="LICENSE.txt",
long_description=open('README.rst').read(),
requires=['eventlet'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Testing',
],
)
| {
"content_hash": "08c1fc608d9e22bbef16f7007387ec02",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 35.6,
"alnum_prop": 0.6095505617977528,
"repo_name": "klmitch/dtest",
"id": "e3c4e7db8ea09ccafb3209c5d2efc27ecf4628d4",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "227433"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ErythropoiesisFindings'
db.delete_table('main_erythropoiesisfindings')
# Deleting model 'IronStain'
db.delete_table('main_ironstain')
# Deleting model 'MegakaryocyteFeatures'
db.delete_table('main_megakaryocytefeatures')
# Deleting model 'CellCount'
db.delete_table('main_cellcount')
# Deleting model 'CellCountInstance'
db.delete_table('main_cellcountinstance')
# Deleting model 'BoneMarrowBackground'
db.delete_table('main_bonemarrowbackground')
# Deleting model 'GranulopoiesisFindings'
db.delete_table('main_granulopoiesisfindings')
def backwards(self, orm):
# Adding model 'ErythropoiesisFindings'
db.create_table('main_erythropoiesisfindings', (
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('megaloblastic_change', self.gf('django.db.models.fields.BooleanField')(default=False)),
('nuclear_asynchrony', self.gf('django.db.models.fields.BooleanField')(default=False)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('ragged_haemoglobinisation', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('multinucleated_forms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('main', ['ErythropoiesisFindings'])
# Adding model 'IronStain'
db.create_table('main_ironstain', (
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('stain_performed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('iron_content', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ringed_sideroblasts', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
))
db.send_create_signal('main', ['IronStain'])
# Adding model 'MegakaryocyteFeatures'
db.create_table('main_megakaryocytefeatures', (
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('fragmented', self.gf('django.db.models.fields.BooleanField')(default=False)),
('relative_count', self.gf('django.db.models.fields.CharField')(max_length=50)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hypolobulated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('micromegakaryocytes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('main', ['MegakaryocyteFeatures'])
# Adding model 'CellCount'
db.create_table('main_cellcount', (
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellType'])),
('normal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('cell_count_instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellCountInstance'])),
('abnormal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('main', ['CellCount'])
# Adding model 'CellCountInstance'
db.create_table('main_cellcountinstance', (
('datetime_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('overall_comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('datetime_submitted', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tissue_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
))
db.send_create_signal('main', ['CellCountInstance'])
# Adding model 'BoneMarrowBackground'
db.create_table('main_bonemarrowbackground', (
('site', self.gf('django.db.models.fields.CharField')(max_length=50)),
('haemodilution', self.gf('django.db.models.fields.CharField')(max_length=50)),
('trail_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('particle_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('particulate', self.gf('django.db.models.fields.CharField')(max_length=50)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('ease_of_aspiration', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('main', ['BoneMarrowBackground'])
# Adding model 'GranulopoiesisFindings'
db.create_table('main_granulopoiesisfindings', (
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('pelger', self.gf('django.db.models.fields.BooleanField')(default=False)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nuclear_atypia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('hypogranular', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('dohle_bodies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('main', ['GranulopoiesisFindings'])
models = {
'main.cellimage': {
'Meta': {'object_name': 'CellImage'},
'celltype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellType']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thumbnail_left': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_top': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.celltype': {
'Meta': {'object_name': 'CellType'},
'abbr_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'readable_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'visualisation_colour': ('colorful.fields.RGBColorField', [], {'max_length': '7', 'blank': 'True'})
},
'main.similarlookinggroup': {
'Meta': {'object_name': 'SimilarLookingGroup'},
'cell_image': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.CellImage']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['main']
| {
"content_hash": "169aac8b253ab2c95fe4130050868ab9",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 141,
"avg_line_length": 60.033557046979865,
"alnum_prop": 0.6212409167132477,
"repo_name": "oghm2/hackdayoxford",
"id": "e7df23b064cebd4472ecd52bc18d0d85cfe475d0",
"size": "8963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cellcounter/main/migrations/0005_auto__del_erythropoiesisfindings__del_ironstain__del_megakaryocytefeat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10096"
},
{
"name": "Python",
"bytes": "54636"
}
],
"symlink_target": ""
} |
"""Support for Homekit climate devices."""
import logging
from aiohomekit.model.characteristics import (
CharacteristicsTypes,
HeatingCoolingCurrentValues,
HeatingCoolingTargetValues,
)
from aiohomekit.utils import clamp_enum_to_char
from homeassistant.components.climate import (
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
ClimateEntity,
)
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
_LOGGER = logging.getLogger(__name__)
# Map of Homekit operation modes to hass modes
MODE_HOMEKIT_TO_HASS = {
HeatingCoolingTargetValues.OFF: HVAC_MODE_OFF,
HeatingCoolingTargetValues.HEAT: HVAC_MODE_HEAT,
HeatingCoolingTargetValues.COOL: HVAC_MODE_COOL,
HeatingCoolingTargetValues.AUTO: HVAC_MODE_HEAT_COOL,
}
# Map of hass operation modes to homekit modes
MODE_HASS_TO_HOMEKIT = {v: k for k, v in MODE_HOMEKIT_TO_HASS.items()}
CURRENT_MODE_HOMEKIT_TO_HASS = {
HeatingCoolingCurrentValues.IDLE: CURRENT_HVAC_IDLE,
HeatingCoolingCurrentValues.HEATING: CURRENT_HVAC_HEAT,
HeatingCoolingCurrentValues.COOLING: CURRENT_HVAC_COOL,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit climate."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
if service["stype"] != "thermostat":
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([HomeKitClimateEntity(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitClimateEntity(HomeKitEntity, ClimateEntity):
"""Representation of a Homekit climate device."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.HEATING_COOLING_CURRENT,
CharacteristicsTypes.HEATING_COOLING_TARGET,
CharacteristicsTypes.TEMPERATURE_CURRENT,
CharacteristicsTypes.TEMPERATURE_TARGET,
CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT,
CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET,
]
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_put_characteristics(
{CharacteristicsTypes.TEMPERATURE_TARGET: temp}
)
async def async_set_humidity(self, humidity):
"""Set new target humidity."""
await self.async_put_characteristics(
{CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET: humidity}
)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
await self.async_put_characteristics(
{
CharacteristicsTypes.HEATING_COOLING_TARGET: MODE_HASS_TO_HOMEKIT[
hvac_mode
],
}
)
@property
def current_temperature(self):
"""Return the current temperature."""
return self.service.value(CharacteristicsTypes.TEMPERATURE_CURRENT)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.service.value(CharacteristicsTypes.TEMPERATURE_TARGET)
@property
def min_temp(self):
"""Return the minimum target temp."""
if self.service.has(CharacteristicsTypes.TEMPERATURE_TARGET):
char = self.service[CharacteristicsTypes.TEMPERATURE_TARGET]
return char.minValue
return super().min_temp
@property
def max_temp(self):
"""Return the maximum target temp."""
if self.service.has(CharacteristicsTypes.TEMPERATURE_TARGET):
char = self.service[CharacteristicsTypes.TEMPERATURE_TARGET]
return char.maxValue
return super().max_temp
@property
def current_humidity(self):
"""Return the current humidity."""
return self.service.value(CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT)
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self.service.value(CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET)
@property
def min_humidity(self):
"""Return the minimum humidity."""
char = self.service[CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET]
return char.minValue or DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self):
"""Return the maximum humidity."""
char = self.service[CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET]
return char.maxValue or DEFAULT_MAX_HUMIDITY
@property
def hvac_action(self):
"""Return the current running hvac operation."""
# This characteristic describes the current mode of a device,
# e.g. a thermostat is "heating" a room to 75 degrees Fahrenheit.
# Can be 0 - 2 (Off, Heat, Cool)
value = self.service.value(CharacteristicsTypes.HEATING_COOLING_CURRENT)
return CURRENT_MODE_HOMEKIT_TO_HASS.get(value)
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode."""
# This characteristic describes the target mode
# E.g. should the device start heating a room if the temperature
# falls below the target temperature.
# Can be 0 - 3 (Off, Heat, Cool, Auto)
value = self.service.value(CharacteristicsTypes.HEATING_COOLING_TARGET)
return MODE_HOMEKIT_TO_HASS.get(value)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes."""
valid_values = clamp_enum_to_char(
HeatingCoolingTargetValues,
self.service[CharacteristicsTypes.HEATING_COOLING_TARGET],
)
return [MODE_HOMEKIT_TO_HASS[mode] for mode in valid_values]
@property
def supported_features(self):
"""Return the list of supported features."""
features = 0
if self.service.has(CharacteristicsTypes.TEMPERATURE_TARGET):
features |= SUPPORT_TARGET_TEMPERATURE
if self.service.has(CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET):
features |= SUPPORT_TARGET_HUMIDITY
return features
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
| {
"content_hash": "cb58b1c2978664396e2817bf935b28db",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 82,
"avg_line_length": 34.186868686868685,
"alnum_prop": 0.6732161323681489,
"repo_name": "pschmitt/home-assistant",
"id": "f06063c5fd24671059cb45f8832974130d81ffd8",
"size": "6769",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
from ticket import Globals, Bin, Ticket, TicketAttachment, MovedTicket
| {
"content_hash": "5c475e5230e0fb1c0c8b4f2b5807db95",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 70,
"avg_line_length": 71,
"alnum_prop": 0.8309859154929577,
"repo_name": "lym/allura-git",
"id": "2585be9bcb5a880abe9f2af7e2086aa7e97bd409",
"size": "941",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ForgeTracker/forgetracker/model/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7850"
},
{
"name": "CSS",
"bytes": "167419"
},
{
"name": "HTML",
"bytes": "787868"
},
{
"name": "JavaScript",
"bytes": "808388"
},
{
"name": "Makefile",
"bytes": "9792"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4115536"
},
{
"name": "RAML",
"bytes": "23257"
},
{
"name": "Ruby",
"bytes": "5726"
},
{
"name": "Shell",
"bytes": "115283"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.