text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Model admin
"""
from django.contrib import admin
from models import *
class ChannelAdmin(admin.ModelAdmin):
list_display = ('slug', 'title', 'created')
ordering = ('-created',)
class UserPreferencesAdmin(admin.ModelAdmin):
list_display = ('owner', 'created')
list_filter = ('created',)
class FilterEntryAdmin(admin.ModelAdmin):
list_display = ('author', 'target', 'kind', 'value')
list_filter = ('target', 'kind')
ordering = ('author', 'target', 'kind')
class MessageAdmin(admin.ModelAdmin):
list_display = ('channel', 'get_created_date', 'clock', 'get_identity', 'raw', 'ip')
list_display_links = ('get_created_date', 'clock')
list_filter = ('created', 'channel')
search_fields = ['raw','author__username', 'user_agent','ip']
ordering = ('-created',)
class UrlAdmin(admin.ModelAdmin):
list_display = ('created', 'url')
list_filter = ('created',)
raw_id_fields = ("message",)
admin.site.register(Channel, ChannelAdmin)
admin.site.register(UserPreferences, UserPreferencesAdmin)
admin.site.register(FilterEntry, FilterEntryAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(Url, UrlAdmin)
| {
"content_hash": "54cca1d93ca46910cfc8177f286da7f6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 32.69444444444444,
"alnum_prop": 0.6788445199660152,
"repo_name": "sveetch/djangotribune",
"id": "161473fecd59e969ef81f3e59f986fc26c16dbd0",
"size": "1201",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "djangotribune/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37690"
},
{
"name": "HTML",
"bytes": "8647"
},
{
"name": "JavaScript",
"bytes": "85711"
},
{
"name": "Python",
"bytes": "128219"
},
{
"name": "Ruby",
"bytes": "966"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import datashape as ds
import pytest
from toolz import first
from blaze import into
from blaze.utils import tmpfile
from blaze.compatibility import xfail
from blaze import PyTables, discover
import pandas as pd
tb = pytest.importorskip('tables')
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
now = np.datetime64('now').astype('datetime64[us]')
@pytest.fixture
def x():
y = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
return y
@pytest.yield_fixture
def tbfile(x):
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.close()
f.close()
yield filename
@pytest.fixture
def raw_dt_data():
raw_dt_data = [[1, 'Alice', 100, now],
[2, 'Bob', -200, now],
[3, 'Charlie', 300, now],
[4, 'Denis', 400, now],
[5, 'Edith', -500, now]]
for i, d in enumerate(raw_dt_data):
d[-1] += np.timedelta64(i, 'D')
return list(map(tuple, raw_dt_data))
@pytest.fixture
def dt_data(raw_dt_data):
return np.array(raw_dt_data, dtype=np.dtype([('id', 'i8'),
('name', 'S7'),
('amount', 'f8'),
('date', 'M8[ms]')]))
@pytest.yield_fixture
def dt_tb(dt_data):
class Desc(tb.IsDescription):
id = tb.Int64Col(pos=0)
name = tb.StringCol(itemsize=7, pos=1)
amount = tb.Float64Col(pos=2)
date = tb.Time64Col(pos=3)
non_date_types = list(zip(['id', 'name', 'amount'], ['i8', 'S7', 'f8']))
# has to be in microseconds as per pytables spec
dtype = np.dtype(non_date_types + [('date', 'M8[us]')])
rec = dt_data.astype(dtype)
# also has to be a floating point number
dtype = np.dtype(non_date_types + [('date', 'f8')])
rec = rec.astype(dtype)
rec['date'] /= 1e6
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'dt', description=Desc)
d.append(rec)
d.close()
f.close()
yield filename
class TestPyTablesLight(object):
def test_read(self, tbfile):
t = PyTables(path=tbfile, datapath='/title')
shape = t.shape
t._v_file.close()
assert shape == (5,)
def test_write_no_dshape(self, tbfile):
with pytest.raises(ValueError):
PyTables(path=tbfile, datapath='/write_this')
@xfail(raises=NotImplementedError,
reason='PyTables does not support object columns')
def test_write_with_bad_dshape(self, tbfile):
dshape = '{id: int, name: string, amount: float32}'
PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
def test_write_with_dshape(self, tbfile):
f = tb.open_file(tbfile, mode='a')
try:
assert '/write_this' not in f
finally:
f.close()
del f
# create our table
dshape = '{id: int, name: string[7, "ascii"], amount: float32}'
t = PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
shape = t.shape
filename = t._v_file.filename
t._v_file.close()
assert filename == tbfile
assert shape == (0,)
@xfail(reason="Don't yet support datetimes")
def test_table_into_ndarray(self, dt_tb, dt_data):
t = PyTables(dt_tb, '/dt')
res = into(np.ndarray, t)
try:
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_ndarray_into_table(self, dt_tb, dt_data):
dtype = ds.from_numpy(dt_data.shape, dt_data.dtype)
t = PyTables(dt_tb, '/out', dtype)
try:
res = into(np.ndarray, into(t, dt_data, filename=dt_tb, datapath='/out'))
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_datetime_discovery(self, dt_tb, dt_data):
t = PyTables(dt_tb, '/dt')
lhs, rhs = map(discover, (t, dt_data))
t._v_file.close()
assert lhs == rhs
def test_no_extra_files_around(self, dt_tb):
""" check the context manager auto-closes the resources """
assert not len(tb.file._open_files)
| {
"content_hash": "44c39720e64e28006afeda57b3d54487",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 85,
"avg_line_length": 31.100591715976332,
"alnum_prop": 0.5325342465753424,
"repo_name": "mrocklin/blaze",
"id": "8f8c557c9a46230728869583390dafb38089e7c7",
"size": "5256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blaze/tests/test_pytables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "581781"
},
{
"name": "Shell",
"bytes": "6532"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_colonel_sullustan_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "dc3223d81aef51f40a20ca185327ef88",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.7058823529411765,
"repo_name": "anhstudios/swganh",
"id": "889209d9e9e93af01aac2b24f85f51057d0eb357",
"size": "468",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_rebel_colonel_sullustan_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import ROOT
import numpy as np
def makeMomentMorph(w,interpParam, observable, pdfList, paramPoints):
paramVec = ROOT.TVectorD(len(paramPoints))
for i, p in enumerate(paramPoints):
paramVec[i]=p #seems silly, but other constructor gave problems
pdfs = ROOT.RooArgList()
for pdf in pdfList:
pdfs.add(pdf)
setting = ROOT.RooMomentMorph.Linear
morph = ROOT.RooMomentMorph('morph','morph',interpParam,
ROOT.RooArgList(observable),pdfs, paramVec,setting)
print morph
getattr(w,'import')(morph) # work around for morph = w.import(morph)
return w
def testMomentMorph():
#Going to make a few statistical models we want to interpolate
#initialize workspace with some common background part
w = ROOT.RooWorkspace('w')
w.factory('Exponential::e(x[-5,15],tau[-.15,-3,0])')
x = w.var('x')
frame = x.frame()
#center of Gaussian will move along the parameter points
mu = w.factory('mu[0,10]') #this is our continuous interpolation parameter
paramPoints = np.arange(5)
pdfs=[]
# Now make the specific Gaussians to add on top of common background
for i in paramPoints:
w.factory('Gaussian::g{i}(x,mu{i}[{i},-3,5],sigma[1, 0, 2])'.format(i=i))
w.factory('SUM::model{i}(s[50,0,100]*g{i},b[100,0,1000]*e)'.format(i=i))
w.Print() #this isn't displaying in iPython
pdf = w.pdf('model{i}'.format(i=i))
pdfs.append(pdf)
pdf.plotOn(frame)
w = makeMomentMorph(w,mu,x,pdfs,paramPoints)
morph = w.pdf('morph')
w.Print()
morph.Print()
return
#make plots of interpolated pdf
for i in np.arange(5):
mu.setVal(i+.1) #offset from the original point a bit to see morphing
mu.Print()
morph.plotOn(frame, ROOT.RooFit.LineColor(ROOT.kRed))
c1 = ROOT.TCanvas()
frame.Draw()
c1.SaveAs('test.pdf')
def testMomentMorph_orig():
#Going to make a few statistical models we want to interpolate
#initialize workspace with some common background part
w = ROOT.RooWorkspace('w')
w.factory('Exponential::e(x[-5,15],tau[-.15,-3,0])')
x = w.var('x')
frame = x.frame()
#center of Gaussian will move along the parameter points
mu = w.factory('mu[0,10]') #this is our continuous interpolation parameter
paramPoints = np.arange(5)
pdfs = ROOT.RooArgList()
#paramVec = ROOT.TVectorD(len(paramPoints),paramPoints) #this gives problems, why?
paramVec = ROOT.TVectorD(len(paramPoints))
# Now make the specific Gaussians to add on top of common background
for i in paramPoints:
w.factory('Gaussian::g{i}(x,mu{i}[{i},-3,5],sigma[1, 0, 2])'.format(i=i))
w.factory('SUM::model{i}(s[50,0,100]*g{i},b[100,0,1000]*e)'.format(i=i))
w.Print() #this isn't displaying in iPython
pdf = w.pdf('model{i}'.format(i=i))
pdf.plotOn(frame)
pdfs.add(pdf)
paramVec[int(i)]=i
#ok, now construct the MomentMorph, can choose from these settings
# { Linear, NonLinear, NonLinearPosFractions, NonLinearLinFractions, SineLinear } ;
setting = ROOT.RooMomentMorph.Linear
morph = ROOT.RooMomentMorph('morph','morph',mu,ROOT.RooArgList(x),pdfs, paramVec,setting)
getattr(w,'import')(morph) # work around for morph = w.import(morph)
morph.Print('v')
#make plots of interpolated pdf
for i in np.arange(5):
print i, paramVec[1]
mu.setVal(i+.5) #offset from the original point a bit to see morphing
mu.Print()
morph.plotOn(frame, ROOT.RooFit.LineColor(ROOT.kRed))
c1 = ROOT.TCanvas()
frame.Draw()
c1.SaveAs('test.pdf')
if __name__ == '__main__':
testMomentMorph() | {
"content_hash": "23e717507d7e3b9e25fec2d8ddfc1505",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 31.155963302752294,
"alnum_prop": 0.7058303886925795,
"repo_name": "cranmer/parametrized-learning",
"id": "c32b6035d375bbb3eec31249b136218cf0b15560",
"size": "3396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testMomentMorph.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6361"
},
{
"name": "C++",
"bytes": "156194"
},
{
"name": "Jupyter Notebook",
"bytes": "404193"
},
{
"name": "Makefile",
"bytes": "3168"
},
{
"name": "Python",
"bytes": "70037"
},
{
"name": "TeX",
"bytes": "470308"
}
],
"symlink_target": ""
} |
def get_kv_shape(shape, key_axes):
func = lambda axis: shape[axis]
return _get_kv_func(func, shape, key_axes)
def get_kv_axes(shape, key_axes):
func = lambda axis: axis
return _get_kv_func(func, shape, key_axes)
def _get_kv_func(func, shape, key_axes):
key_res = [func(axis) for axis in key_axes]
value_res = [func(axis) for axis in range(len(shape)) if axis not in key_axes]
return key_res, value_res
def zip_with_index(rdd):
"""
Alternate version of Spark's zipWithIndex that eagerly returns count.
"""
starts = [0]
if rdd.getNumPartitions() > 1:
nums = rdd.mapPartitions(lambda it: [sum(1 for _ in it)]).collect()
count = sum(nums)
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
else:
count = rdd.count()
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return count, rdd.mapPartitionsWithIndex(func)
| {
"content_hash": "b77c9074b3d4dca3dfbd95bb641979c1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 31.29032258064516,
"alnum_prop": 0.6103092783505155,
"repo_name": "andrewosh/bolt",
"id": "c006dd2012a0246d7a73febdd13b9a0d62a84112",
"size": "970",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bolt/spark/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122232"
}
],
"symlink_target": ""
} |
from replay import _ZERO_PROPERTIES
from report import _WEAPON_NAMES, _WEAPONS
from nicklog import load_timelines
from utils import Toggler, googlechart_url
import os
from math import log
_LOG_SUM_PROPERTIES = _ZERO_PROPERTIES[:]
_LOG_MAX_PROPERTIES = "kill_streak death_streak cap_streak chat_length".split(" ")
for p in _LOG_MAX_PROPERTIES:
_LOG_SUM_PROPERTIES.remove(p)
def _player_overview(player):
odd = Toggler("even", "odd")
html = "<h2>Totals</h2>"
html += '<table class="overview">'
html += '<tr class="%s"><th>Frags</th><td>%d</td></tr>\n' % (odd, player.kill_count)
html += '<tr class="%s"><th>Deaths</th><td>%d</td></tr>\n' % (odd, player.death_count)
html += '<tr class="%s"><th>Caps</th><td>%d</td></tr>\n' % (odd, player.flag_caps)
html += '<tr class="%s"><th>Suicides</th><td>%d</td></tr>\n' % (odd, player.suicides)
html += '<tr class="%s"><th>Team Kills</th><td>%d</td></tr>\n' % (odd, player.team_kills)
html += '<tr class="%s"><th>Health</th><td>%d</td></tr>\n' % (odd, player.health)
html += '<tr class="%s"><th>Armor</th><td>%d</td></tr>\n' % (odd, player.armor)
html += '<tr class="%s"><th>Best Frag Streak</th><td>%d</td></tr>\n' % (odd, player.kill_streak)
html += '<tr class="%s"><th>Worst Death Streak</th><td>%d</td></tr>\n' % (odd, player.death_streak)
html += '<tr class="%s"><th>Best Cap Streak</th><td>%d</td></tr>\n' % (odd, player.cap_streak)
html += '</table>\n'
return html
def to_level(base, value):
return int(log(2+(float(value)/base)))
def _player_levels(player):
odd = Toggler("even", "odd")
html = "<h2>Levels</h2>"
html += '<table class="levels">'
html += '<tr class="%s"><th>Fragger</th><td>Level %d</td></tr>\n' %\
(odd, to_level(2, player.kill_count))
html += '<tr class="%s"><th>Capper</th><td>Level %d</td></tr>\n' %\
(odd, to_level(0.7, player.flag_caps))
html += '<tr class="%s"><th>Scorer</th><td>Level %d</td></tr>\n' %\
(odd, to_level(3, player.score))
html += '</table>\n'
return html
def _average_weapon_row(row):
"""Interpolate value series by replaceing zeros with average values"""
i = 0
while i < len(row):
if row[i] > 0.0:
i += 1
continue
j = i+1
while j < len(row): # search end of zero series
if row[j] > 0.0:
break
j += 1
if j == len(row): # edge case: end reached
if j > i+1: # multiple trailing zeros
j -= 1
row[j] = row[i-1]
else: # just one trailing zero
row[j-1] = row[i-1]
break
if i == 0: # edge case: started with zeros
row[i-1] = row[j]
diff = (row[j] - row[i-1]) / (1+j-i)
plus = row[i-1]
for k in xrange(i, j):
plus += diff
row[k] = plus
i = j
return row
def _hitrate_data(player_timeline):
data = []
for p in player_timeline:
datapoint = []
for weapon,x,y in _WEAPONS:
wdata = getattr(p, weapon, {})
datapoint.append(wdata.get('hitrate', 0))
data.append(datapoint)
data = map(list, zip(*data))
avg_data = [_average_weapon_row(lst[:]) for lst in data]
return data, avg_data
def _stat_development(player_timeline):
html = "<h2>Stat Development</h2>\n"
data = [
[p.kill_count for p in player_timeline],
[p.death_count for p in player_timeline],
[p.flag_caps *10 for p in player_timeline]]
url = googlechart_url(data=data, legend=['frags','deaths','caps*10'])
html += '<img src="%s" />\n' % url
return html
def merge(player_into, player_from):
for key in _LOG_SUM_PROPERTIES:
val = getattr(player_from, key)
val_old = getattr(player_into, key)
setattr(player_into, key, val + val_old)
for key in _LOG_MAX_PROPERTIES:
val = getattr(player_from, key)
val_old = getattr(player_into, key)
setattr(player_into, key, max(val, val_old))
for w,x,y in _WEAPONS:
wstats = getattr(player_into, w)
for attr in ['shots', 'hits', 'kills', 'deaths']:
wstats[attr] = getattr(player_from, w)[attr]
return player_into
class Player:
def __init__(self, **inits):
for a,v in inits.items():
setattr(self, a, v)
for a in _ZERO_PROPERTIES:
setattr(self, a, 0)
for w,x,y in _WEAPONS:
setattr(self, w, {})
_ODD_CLASS = {True: 'odd', False: 'even'}
_HTML= """\
<html>
<head>
<title>%s</title>
<link rel="stylesheet" type="text/css" href="media/style.css" /
</head>
<body>
<p>View <a href="players.html">Overview of all players</a>.</p>
<h1>%s profile</h1>
<h2>Hitrate Development</h2>
<script type="text/javascript" src="media/protovis-3.1/protovis-d3.1.js"></script>
<script type="text/javascript" src="media/hitrate_diagram.js"></script>
<script type="text/javascript+protovis">
%s
draw_hitrate(hitrate_points, hitrate_points_interpolated, weapons);
</script>
%s
</body>
</html>
"""
def player_profile(player_timeline):
last = player_timeline[-1]
P = Player(nick=last.nick)
player = reduce(merge, player_timeline, P)
weapon_list = [_WEAPON_NAMES[w].replace(" ", " ") for (w,y,z) in _WEAPONS]
data, avg_data = _hitrate_data(player_timeline)
data = "var hitrate_points = %s;\n" % (str(data))
data += "var hitrate_points_interpolated = %s;\n" % (str(avg_data))
data += "var weapons = %s;\n" % weapon_list
html = ""
html += _stat_development(player_timeline)
html += _player_overview(player)
html += _player_levels(player)
html += '\n<table style="font-size: 0.8em; float: right;">'
odd = False
for prop in _ZERO_PROPERTIES:
html += '<tr class="%s"><th>%s</th><td>%d</td></tr>\n' % (_ODD_CLASS[odd], prop, getattr(player, prop))
odd = not odd
for weapon,x,y in _WEAPONS:
for key, val in getattr(player, weapon).items():
html += '<tr class="%s"><th>%s %s</th><td>%d</td></tr>\n' % (_ODD_CLASS[odd], weapon, key, val)
html += "</table>\n"
return _HTML % (player.nick, player.nick, data, html)
def _player_overview_item(odd, player_timeline):
current = player_timeline[-1]
slug_nick = player_timeline[0].slug_nick
nick = player_timeline[0].nick
return '<tr class="%s"><td class="elo">%d</td><td><a href="p_%s.html">%s</a></td></tr>\n' %\
(odd, current.elo*1000, slug_nick, nick)
def _player_elos(timelines):
elos = [[int(p.elo*1000) for p in line] for line in timelines]
max_elo = max(max(row) for row in elos)
nicks = [p[0].nick for p in timelines]
url = googlechart_url(data=elos, legend=nicks)
return '<img src="%s" alt="player ELO ratings" />\n' % url
def _hitrate_comparison(weapon, timelines):
nicks = list()
hitrates = list()
for line in timelines:
d = [getattr(x, weapon)['hitrate'] for x in line][-30:]
if sum(d) == 0:
continue
_average_weapon_row(d)
nicks.append( line[0].nick )
hitrates.append( d )
url = googlechart_url(data=hitrates, legend=nicks)
html = '<h3>Hitrate with %s</h3>' % (_WEAPON_NAMES[weapon])
html += '<img src="%s" alt="hitrates with %s" />' % (url, weapon)
return html
_OVERVIEW_FILE = "players.html"
_OVERVIEW_HTML= """\
<html>
<head>
<title>Player Overview</title>
<link rel="stylesheet" type="text/css" href="media/style.css" /
</head>
<body>
<h1>Player Overview</h1>
%s
</body>
</html>
"""
def player_overview(timelines, fname):
def cmp_timeline(t1, t2):
return -cmp(t1[-1].elo, t2[-1].elo)
timelines.sort(cmp=cmp_timeline)
html = ""
html += '<h2>Elo Development</h2>'
html += _player_elos(timelines)
html += '<h2>Elo Ranking</h2>'
html += '<table class="ranking">\n'
html += '<tr><th>Elo</th><th>Player</th>\n'
odd = Toggler("even", "odd")
for player_timeline in timelines:
html += _player_overview_item(odd, player_timeline)
html += '</table>\n'
for weapon,x,y in _WEAPONS:
if weapon in ('gauntlet', 'bfg', 'teleport'):
continue
html += _hitrate_comparison(weapon, timelines)
fh = open(fname, 'w')
fh.write(_OVERVIEW_HTML % html)
fh.close()
def write_profiles(options):
timelines = load_timelines(options.nicklog)
for player_timeline in timelines:
fname = os.path.join(options.directory, "p_"+player_timeline[0].slug_nick+".html")
pfh = open(fname, 'w')
pfh.write(player_profile(player_timeline))
pfh.close()
fname = os.path.join(options.directory, _OVERVIEW_FILE)
player_overview(timelines, fname)
| {
"content_hash": "d8eb68493c830154f7f64f2f991ba57c",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 105,
"avg_line_length": 33.41422594142259,
"alnum_prop": 0.635236664162284,
"repo_name": "qznc/arenastats",
"id": "c027864eaa8998403d88b0275254f356de9f3fb5",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quakelog/profile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "515093"
},
{
"name": "Python",
"bytes": "58677"
}
],
"symlink_target": ""
} |
import datetime
import logging
import os
from itertools import groupby
from math import ceil
from django.db.models import Max
from django.db.models import Sum
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import cast
from sqlalchemy import exists
from sqlalchemy import false
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.sql.expression import literal
from .paths import get_content_file_name
from .paths import get_content_storage_file_path
from .paths import using_remote_storage
from .sqlalchemybridge import Bridge
from .sqlalchemybridge import filter_by_uuids
from kolibri.core.content.apps import KolibriContentConfig
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.search import get_all_contentnode_label_metadata
from kolibri.core.content.utils.sqlalchemybridge import filter_by_checksums
from kolibri.core.content.utils.tree import get_channel_node_depth
from kolibri.core.device.models import ContentCacheKey
from kolibri.core.utils.lock import db_lock
logger = logging.getLogger(__name__)
CONTENT_APP_NAME = KolibriContentConfig.label
CHUNKSIZE = 10000
def _generate_MPTT_descendants_statement(mptt_values, ContentNodeTable):
"""
This logic is modified from:
https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L137
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
queries = []
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
return queries
def _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# Setup list to collect queries
or_queries = []
# First we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_results = connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall()
non_topic_node_ids = [result[0] for result in non_topic_results]
# If we have any node ids that are for non-topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Now get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
# Query modified from:
# https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L123
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Add constraints specific to our requirements, in terms of batching:
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# And topics:
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Extend the constraints we are filtering by with ones generated from the relevant
# MPTT values we have queried above.
or_queries.extend(
_generate_MPTT_descendants_statement(mptt_values, ContentNodeTable)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(false())
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
def _create_batch_update_statement(
bridge, channel_id, min_boundary, max_boundary, node_ids, exclude_node_ids
):
ContentNodeTable = bridge.get_table(ContentNode)
# Restrict the update statement to nodes falling within the boundaries
batch_statement = ContentNodeTable.update().where(
and_(
# Only update leaf nodes (non topics)
ContentNodeTable.c.kind != content_kinds.TOPIC,
# Only update nodes in the channel we specified
ContentNodeTable.c.channel_id == channel_id,
# Only select nodes inside the boundary conditions
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
)
)
if node_ids is not None:
# Construct a statement that restricts which nodes we update
# in this batch by the specified inclusion constraints
node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
ContentNodeTable.c.id.in_(node_ids_statement)
)
if exclude_node_ids is not None:
# Construct a statement that restricts nodes we update
# in this batch by the specified exclusion constraints
exclude_node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, exclude_node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
~ContentNodeTable.c.id.in_(exclude_node_ids_statement)
)
return batch_statement
def _calculate_batch_params(bridge, channel_id, node_ids, exclude_node_ids):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# To chunk the tree, we first find the full extent of the tree - this gives the
# highest rght value for this channel.
max_rght = connection.execute(
select([func.max(ContentNodeTable.c.rght)]).where(
ContentNodeTable.c.channel_id == channel_id
)
).scalar()
# Count the total number of constraints
constraint_count = len(node_ids or []) + len(exclude_node_ids or [])
# Aim for a constraint per batch count of about 250 on average
# This means that there will be at most 750 parameters from the constraints
# and should therefore also limit the overall SQL expression size.
dynamic_chunksize = int(
min(CHUNKSIZE, ceil(250 * max_rght / (constraint_count or 1)))
)
return max_rght, dynamic_chunksize
def set_leaf_nodes_invisible(channel_id, node_ids=None, exclude_node_ids=None):
"""
Set nodes in a channel as unavailable.
With no additional arguments, this will hide an entire channel.
With the additional nodes arguments, it will selectively flag nodes
as unavailable, based on the passed in ids, setting them as unavailable if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
connection = bridge.get_connection()
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Removing availability of non-topic ContentNode objects in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(available=False).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=None, exclude_node_ids=None
):
"""
Set nodes in a channel as available, based on their required files.
With no additional arguments, this will make every node in the channel
available or unavailable based on whether the files needed to render
those nodes are present on disk.
With the additional nodes arguments, it will selectively flag nodes
based on the passed in ids, marking their availability if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
Nodes in the channel not captured by the constraints will not have
their availability changed either way.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
# SQL Alchemy reference to the content node table
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
connection = bridge.get_connection()
# This statement defines the update condition for the contentnode
# running exists on this (as it is used below) will produce either
# True, in the case when the contentnode has the required files
# available for rendering, or False otherwise.
contentnode_statement = (
# We could select any property here, as it's the exist that matters.
select([1]).select_from(
# This does the first step in the many to many lookup for File
# and LocalFile.
FileTable.join(
LocalFileTable,
and_(
# This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
# This only joins on LocalFile objects that we know
# have associated files on disk.
LocalFileTable.c.available == True, # noqa
),
)
)
# Only look at files that are required (not supplementary)
.where(FileTable.c.supplementary == False) # noqa
# Correlate between the contentnode id and the foreign key
# to the content node on the file table to complete the
# many to many lookup
.where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
)
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Setting availability of non-topic ContentNode objects based on LocalFile availability in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(
available=exists(contentnode_statement)
).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def mark_local_files_as_unavailable(checksums, destination=None):
mark_local_files_availability(checksums, False, destination=destination)
def mark_local_files_as_available(checksums, destination=None):
"""
Shortcut method to update database if we are sure that the files are available.
Can be used after successful downloads to flag availability without having to do expensive disk reads.
"""
mark_local_files_availability(checksums, True, destination=destination)
def mark_local_files_availability(checksums, availability, destination=None):
if checksums:
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
logger.info(
"Setting availability to {availability} of {number} LocalFile objects based on passed in checksums".format(
number=len(checksums), availability=availability
)
)
connection = bridge.get_connection()
trans = connection.begin()
for i in range(0, len(checksums), CHUNKSIZE):
connection.execute(
LocalFileTable.update()
.where(
filter_by_checksums(
LocalFileTable.c.id, checksums[i : i + CHUNKSIZE]
)
)
.values(available=availability)
)
trans.commit()
bridge.end()
def _check_file_availability(files):
checksums_to_set_available = []
checksums_to_set_unavailable = []
for file in files:
try:
# Update if the file exists, *and* the localfile is set as unavailable.
if using_remote_storage() or os.path.exists(
get_content_storage_file_path(
get_content_file_name({"id": file[0], "extension": file[2]})
)
):
if not file[1]:
checksums_to_set_available.append(file[0])
# Update if the file does not exist, *and* the localfile is set as available.
else:
if file[1]:
checksums_to_set_unavailable.append(file[0])
except InvalidStorageFilenameError:
continue
return checksums_to_set_available, checksums_to_set_unavailable
def set_local_file_availability_from_disk(checksums=None, destination=None):
if type(checksums) == list and len(checksums) > CHUNKSIZE:
for i in range(0, len(checksums), CHUNKSIZE):
set_local_file_availability_from_disk(
checksums=checksums[i : i + CHUNKSIZE], destination=destination
)
return
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
query = select(
[LocalFileTable.c.id, LocalFileTable.c.available, LocalFileTable.c.extension]
)
if checksums is None:
logger.info(
"Setting availability of LocalFile objects based on disk availability"
)
elif type(checksums) == list:
logger.info(
"Setting availability of {number} LocalFile objects based on disk availability".format(
number=len(checksums)
)
)
query = query.where(filter_by_checksums(LocalFileTable.c.id, checksums))
else:
logger.info(
"Setting availability of LocalFile object with checksum {checksum} based on disk availability".format(
checksum=checksums
)
)
query = query.where(LocalFileTable.c.id == checksums)
connection = bridge.get_connection()
files = connection.execute(query).fetchall()
checksums_to_set_available, checksums_to_set_unavailable = _check_file_availability(
files
)
bridge.end()
mark_local_files_as_available(checksums_to_set_available, destination=destination)
mark_local_files_as_unavailable(
checksums_to_set_unavailable, destination=destination
)
def recurse_annotation_up_tree(channel_id):
bridge = Bridge(app_name=CONTENT_APP_NAME)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
node_depth = get_channel_node_depth(bridge, channel_id)
logger.info(
"Annotating ContentNode objects with children for {levels} levels".format(
levels=node_depth
)
)
child = ContentNodeTable.alias()
# start a transaction
trans = connection.begin()
start = datetime.datetime.now()
# Update all leaf ContentNodes to have num_coach_content to 1 or 0
# Update all leaf ContentNodes to have on_device_resources to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are not topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
.values(
num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
on_device_resources=cast(ContentNodeTable.c.available, Integer()),
)
)
# Before starting set availability to False on all topics.
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
.values(
available=False,
on_device_resources=0,
)
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expressions for annotation of coach content
# Expression that will resolve a boolean value for all the available children
# of a content node, whereby if they all have coach_content flagged on them, it will be true,
# but otherwise false.
# Everything after the select statement should be identical to the available_nodes expression above.
if bridge.engine.name == "sqlite":
# Use a min function to simulate an AND.
coach_content_nodes = select([func.min(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
elif bridge.engine.name == "postgresql":
# Use the postgres boolean AND operator
coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of on_device_resources for each child node
# of a contentnode
on_device_num = select([func.sum(child.c.on_device_resources)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
logger.info(
"Annotating ContentNode objects with children for level {level}".format(
level=level
)
)
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(
available=exists(available_nodes),
coach_content=coach_content_nodes,
num_coach_contents=coach_content_num,
on_device_resources=on_device_num,
)
)
# commit the transaction
trans.commit()
elapsed = datetime.datetime.now() - start
logger.debug(
"Recursive topic tree annotation took {} seconds".format(elapsed.seconds)
)
bridge.end()
def calculate_dummy_progress_for_annotation(node_ids, exclude_node_ids, total_progress):
num_annotation_constraints = len(node_ids or []) + len(exclude_node_ids or [])
# Calculate a percentage of the total progress to denote to annotation
# between 1 and 10
annotation_proportion = min(10, max(1, int(num_annotation_constraints / 500)))
# Create some progress proportional to annotation task
return int(annotation_proportion * total_progress / (100 - annotation_proportion))
def propagate_forced_localfile_removal(localfiles_dict_list):
total = len(localfiles_dict_list)
i = 0
# Even thought we are using the filter_by_uuids method below
# which prevents too many SQL parameters from being passed in to the query
# if we have too many UUIDs it is possible we might still generate too much SQL
# and cause issues - so we batch the ids here.
batch_size = 10000
while i < total:
file_slice = localfiles_dict_list[i : i + batch_size]
files = File.objects.filter(
supplementary=False,
local_file__in=LocalFile.objects.filter_by_uuids(
[f["id"] for f in file_slice]
),
)
ContentNode.objects.filter(files__in=files).update(available=False)
i += batch_size
def reannotate_all_channels():
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
recurse_annotation_up_tree(channel_id)
def update_content_metadata(
channel_id, node_ids=None, exclude_node_ids=None, public=None
):
set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids
)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id, public=public)
ContentCacheKey.update_cache_key()
# Do this call after refreshing the content cache key
# as the caching is dependent on the key.
get_all_contentnode_label_metadata()
def set_content_visibility(
channel_id, checksums, node_ids=None, exclude_node_ids=None, public=None
):
mark_local_files_as_available(checksums)
update_content_metadata(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, public=public
)
def set_content_visibility_from_disk(channel_id):
set_local_file_availability_from_disk()
update_content_metadata(channel_id)
def set_content_invisible(channel_id, node_ids, exclude_node_ids):
set_leaf_nodes_invisible(channel_id, node_ids, exclude_node_ids)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id)
ContentCacheKey.update_cache_key()
# Do this call after refreshing the content cache key
# as the caching is dependent on the key.
get_all_contentnode_label_metadata()
def set_channel_metadata_fields(channel_id, public=None):
with db_lock():
channel = ChannelMetadata.objects.get(id=channel_id)
calculate_published_size(channel)
calculate_total_resource_count(channel)
calculate_included_languages(channel)
calculate_next_order(channel)
if public is not None:
channel.public = public
channel.save()
def files_for_nodes(nodes):
return LocalFile.objects.filter(files__contentnode__in=nodes)
def total_file_size(files_or_nodes):
if issubclass(files_or_nodes.model, LocalFile):
localfiles = files_or_nodes
elif issubclass(files_or_nodes.model, ContentNode):
localfiles = files_for_nodes(files_or_nodes)
else:
raise TypeError("Expected queryset for LocalFile or ContentNode")
return localfiles.distinct().aggregate(Sum("file_size"))["file_size__sum"] or 0
def calculate_published_size(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.published_size = total_file_size(
files_for_nodes(content_nodes).filter(available=True)
)
channel.save()
def calculate_total_resource_count(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.total_resource_count = (
content_nodes.filter(available=True).exclude(kind=content_kinds.TOPIC).count()
)
channel.save()
def calculate_included_languages(channel):
content_nodes = ContentNode.objects.filter(
channel_id=channel.id, available=True
).exclude(lang=None)
languages = content_nodes.order_by("lang").values_list("lang", flat=True).distinct()
channel.included_languages.add(*list(languages))
def calculate_next_order(channel, model=ChannelMetadata):
if channel.order is None or channel.order == 0:
max_order = model.objects.aggregate(Max("order")).get("order__max", 0)
if max_order is None:
max_order = 0
channel.order = max_order + 1
channel.save()
def set_channel_ancestors(channel_id):
bridge = Bridge(app_name=CONTENT_APP_NAME)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
node_depth = get_channel_node_depth(bridge, channel_id)
parent = ContentNodeTable.alias()
# start a transaction
trans = connection.begin()
start = datetime.datetime.now()
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == 0,
ContentNodeTable.c.channel_id == channel_id,
)
)
.values(ancestors="[]")
)
# Go from the shallowest to deepest
for level in range(1, node_depth + 1):
if bridge.engine.name == "sqlite":
parent_id_expression = ContentNodeTable.c.parent_id
elif bridge.engine.name == "postgresql":
parent_id_expression = func.replace(
cast(ContentNodeTable.c.parent_id, String(length=36)), "-", ""
)
# Statement to generate the ancestors JSON using SQL, to avoid having to load data
# into Python in order to do this.
ancestors = select(
[
# Get all of the JSON from the parent's ancestors field, but remove the
# closing ]
func.substr(
parent.c.ancestors, 1, func.length(parent.c.ancestors) - literal(1)
)
# Conditionalize how we add new elements depending on whether the parent's
# ancestors are empty or not.
+ case(
[
(
# If the last (and presumably first) character of the parent's
# ancestors field is literal '[' then this is an empty ancestors list
func.substr(
parent.c.ancestors,
func.length(parent.c.ancestors) - literal(1),
1,
)
== literal("["),
# In this case we just open the object without having to prepend a comma.
'{"id": "',
)
],
# Otherwise we are adding a new element to a JSON list that already has elements in it
# so we prepend with a comma in order to separate.
else_=',{"id": "',
)
+ parent_id_expression
+ '","title": "'
+ func.replace(parent.c.title, '"', '\\"')
+ '"}]'
]
).where(
and_(
ContentNodeTable.c.parent_id == parent.c.id,
)
)
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level,
ContentNodeTable.c.channel_id == channel_id,
)
)
.values(
ancestors=ancestors,
)
)
# commit the transaction
trans.commit()
elapsed = datetime.datetime.now() - start
logger.debug(
"Recursive ancestor annotation took {} seconds".format(elapsed.seconds)
)
bridge.end()
| {
"content_hash": "5f0db5fe45f56635c68cb2bebb81ae82",
"timestamp": "",
"source": "github",
"line_count": 891,
"max_line_length": 123,
"avg_line_length": 36.101010101010104,
"alnum_prop": 0.6203133743704533,
"repo_name": "learningequality/kolibri",
"id": "fe901df9240a026689c39782edcd6f3bfb879252",
"size": "32166",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/content/utils/annotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3095586"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "Gherkin",
"bytes": "996801"
},
{
"name": "HTML",
"bytes": "22573"
},
{
"name": "JavaScript",
"bytes": "2233801"
},
{
"name": "Makefile",
"bytes": "12972"
},
{
"name": "Python",
"bytes": "3652744"
},
{
"name": "SCSS",
"bytes": "8551"
},
{
"name": "Shell",
"bytes": "3867"
},
{
"name": "Vue",
"bytes": "2193917"
}
],
"symlink_target": ""
} |
import facenet
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('people_per_batch', 45,
"""Number of people per batch.""")
tf.app.flags.DEFINE_integer('alpha', 0.2,
"""Positive to negative triplet distance margin.""")
embeddings = np.zeros((1800,128))
np.random.seed(123)
for ix in range(embeddings.shape[0]):
for jx in range(embeddings.shape[1]):
rnd = 1.0*np.random.randint(1,2**32)/2**32
embeddings[ix][jx] = rnd
emb_array = embeddings
image_data = np.zeros((1800,96,96,3))
num_per_class = [40 for i in range(45)]
np.random.seed(123)
apn, nrof_random_negs, nrof_triplets = facenet.select_triplets(emb_array, num_per_class, image_data)
| {
"content_hash": "0bec32c1b372f46c3cfda7a0bef4c8bf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 100,
"avg_line_length": 25.8,
"alnum_prop": 0.6447028423772609,
"repo_name": "lodemo/CATANA",
"id": "149e262b31a02ff7d9d6940990b8c5785eeb6b4f",
"size": "774",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/face_recognition/facenet/tmp/select_triplets_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4068"
},
{
"name": "HTML",
"bytes": "755393"
},
{
"name": "JavaScript",
"bytes": "1451186"
},
{
"name": "Jupyter Notebook",
"bytes": "12442842"
},
{
"name": "MATLAB",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "5006823"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
} |
from django.conf import settings
def viveum(request):
"""
Adds additional context variables to the default context.
"""
return {
'VIVEUM_ORDER_STANDARD_URL': settings.VIVEUM_PAYMENT.get('ORDER_STANDARD_URL'),
}
| {
"content_hash": "30f72ebf9e0eda471c723d0ccb259ec8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 87,
"avg_line_length": 24.1,
"alnum_prop": 0.6680497925311203,
"repo_name": "philippeowagner/django-shop-viveum",
"id": "6c9805835ddae73e93ee89f1007492c83cf0f038",
"size": "265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "viveum/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3839"
},
{
"name": "Python",
"bytes": "35353"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
} |
"""Agent implementing the server side of the AMQP management protocol.
Adapter layer between external attribute-value maps sent/received via the AMQP
management protocol and implementation objects (C or python) of the dispatch
router. Entity types are as described in qdrouter.json schema. Reading
configuration files is treated as a set of CREATE operations.
Maintains a set of L{EntityAdapter} that hold attribute maps reflecting the last
known attribute values of the implementation objects. Delegates management
operations to the correct adapter.
EntityAdapters are created/deleted in two ways:
- Externally by CREATE/DELETE operations (or loading config file)
- Internally by creation or deletion of corresponding implementation object.
Memory managment: The implementation is reponsible for informing the L{Agent}
when an implementation object is created and *before* it is deleted in the case
of a C object.
EntityAdapters can:
- Receive attribute maps via CREATE or UPDATE operations (reading configuration
files is treated as a set of CREATE operations) and set configuration in the
implementation objects.
- Refresh the adapters attribute map to reflect the current state of the
implementation objects, to respond to READ or QUERY operations with up-to-date values.
To avoid confusion the term "update" is only used for the EntityAdapter updating
the implementation object. The term "refresh" is used for the EntityAdapter
getting current information from the implementation object.
## Threading:
The agent is locked to be thread safe, called in the following threads:
- Reading configuration file in initialization thread (no contention).
- Management requests arriving in multiple, concurrent connection threads.
- Implementation objects created/deleted in multiple, concurrent connection threads.
When refreshing attributes, the agent must also read C implementation object
data that may be updated in other threads.
# FIXME aconway 2015-02-09:
Temporary solution is to lock the entire dispatch router lock during full refresh.
Better solution coming soon...
"""
import traceback, json, pstats
from itertools import ifilter, chain
from traceback import format_exc
from threading import Lock
from cProfile import Profile
from cStringIO import StringIO
from ctypes import c_void_p, py_object, c_long
from subprocess import Popen
from ..dispatch import IoAdapter, LogAdapter, LOG_INFO, LOG_WARNING, LOG_DEBUG, LOG_ERROR, TREATMENT_ANYCAST_CLOSEST
from qpid_dispatch.management.error import ManagementError, OK, CREATED, NO_CONTENT, STATUS_TEXT, \
BadRequestStatus, InternalServerErrorStatus, NotImplementedStatus, NotFoundStatus, ForbiddenStatus
from qpid_dispatch.management.entity import camelcase
from .schema import ValidationError, SchemaEntity, EntityType
from .qdrouter import QdSchema
from ..router.message import Message
from ..router.address import Address
from ..policy.policy_manager import PolicyManager
def dictstr(d):
"""Stringify a dict in the form 'k=v, k=v ...' instead of '{k:v, ...}'"""
return ", ".join("%s=%r" % (k, v) for k, v in d.iteritems())
def required_property(prop, request):
"""Raise exception if required property is missing"""
if not request.properties or prop not in request.properties:
raise BadRequestStatus("No '%s' property: %s"%(prop, request))
return request.properties[prop]
def not_implemented(operation, entity_type):
"""Raise NOT_IMPLEMENTED exception"""
raise NotImplementedStatus("Operation '%s' not implemented on %s" % (operation, entity_type))
class AtomicCount(object):
"""Simple atomic counter"""
def __init__(self, count=0):
self.count = count
self.lock = Lock()
def next(self):
with self.lock:
n = self.count
self.count += 1
return n
class Implementation(object):
"""Abstract implementation wrapper"""
def __init__(self, entity_type, key):
self.entity_type, self.key = entity_type, key
class CImplementation(Implementation):
"""Wrapper for a C implementation pointer"""
def __init__(self, qd, entity_type, pointer):
super(CImplementation, self).__init__(entity_type, pointer)
fname = "qd_entity_refresh_" + entity_type.short_name.replace('.', '_')
self.refreshfn = qd.function(fname, c_long, [py_object, c_void_p])
def refresh_entity(self, attributes):
return self.refreshfn(attributes, self.key) or True
class PythonImplementation(Implementation):
"""Wrapper for a Python implementation object"""
def __init__(self, entity_type, impl):
"""impl.refresh_entity(attributes) must be a valid function call"""
super(PythonImplementation, self).__init__(entity_type, id(impl))
self.refresh_entity = impl.refresh_entity
class EntityAdapter(SchemaEntity):
"""
Base class for agent entities with operations as well as attributes.
"""
def __init__(self, agent, entity_type, attributes=None, validate=True):
"""
@para agent: Containing L{Agent}
@param entity_type: L{EntityType}
@param attributes: Attribute name:value map
@param validate: If true, validate the entity.
"""
super(EntityAdapter, self).__init__(entity_type, attributes or {}, validate=validate)
# Direct __dict__ access to avoid validation as schema attributes
self.__dict__['_agent'] = agent
self.__dict__['_log'] = agent.log
self.__dict__['_qd'] = agent.qd
self.__dict__['_dispatch'] = agent.dispatch
self.__dict__['_policy'] = agent.policy
self.__dict__['_implementations'] = []
def validate(self, **kwargs):
"""Set default identity and name if not already set, then do schema validation"""
identity = self.attributes.get("identity")
name = self.attributes.get("name")
if identity:
if not name:
self.attributes[u"name"] = "%s/%s" % (self.entity_type.short_name, self._identifier())
else:
self.attributes[u"identity"] = "%s/%s" % (self.entity_type.short_name, self._identifier())
if not name:
self.attributes.setdefault(u'name', self.attributes[u'identity'])
super(EntityAdapter, self).validate(**kwargs)
def _identifier(self):
"""
Generate identifier. identity=type/identifier.
Default is per-type counter, derived classes can override.
"""
try: counter = type(self)._identifier_count
except AttributeError: counter = type(self)._identifier_count = AtomicCount()
return str(counter.next())
def _refresh(self):
"""Refresh self.attributes from implementation object(s)."""
for impl in self._implementations:
impl.refresh_entity(self.attributes)
return bool(self._implementations)
def _add_implementation(self, impl):
"""Add an implementaiton object to use to refresh our attributes"""
self._implementations.append(impl)
def create(self):
"""Subclasses can add extra create actions here"""
pass
def read(self, request):
"""Handle read request, default is to return attributes."""
request_type = self.entity_type.schema.long_name(request.properties.get('type'))
if request_type and self.type != request_type:
raise NotFoundStatus("Entity type '%s' does match requested type '%s'" %
(self.type, request_type))
return (OK, self.attributes)
def update(self, request):
"""Handle update request with new attributes from management client"""
self.entity_type.update_check(request.body, self.attributes)
newattrs = dict(self.attributes, **request.body)
self.entity_type.validate(newattrs, update=True)
self.attributes = newattrs
self._update()
return (OK, self.attributes)
def _update(self):
"""Subclasses implement update logic here"""
pass
def delete(self, request):
"""Handle delete request from client"""
self._delete()
self._agent.remove(self)
return (NO_CONTENT, {})
def _delete(self):
"""Subclasses implement delete logic here"""
pass
def __str__(self):
keys = sorted(self.attributes.keys())
# If the attribute is hidden the attribute value will show up as stars ('*******').
return "Entity(%s)" % ", ".join("%s=%s" % (k, '*******' if self.entity_type.attribute(k).hidden else self.attributes[k]) for k in keys)
class ContainerEntity(EntityAdapter):
"""
The ContainerEntity has been deprecated. Use the the RouterEntity instead
"""
def create(self):
self._qd.qd_dispatch_configure_container(self._dispatch, self)
def _identifier(self):
self.attributes.setdefault("containerName", "00000000-0000-0000-0000-000000000000")
return self.attributes["containerName"]
def __str__(self):
return super(ContainerEntity, self).__str__().replace("Entity(", "ContainerEntity(")
class RouterEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None):
super(RouterEntity, self).__init__(agent, entity_type, attributes, validate=False)
# Router is a mix of configuration and operational entity.
# The statistics attributes are operational not configured.
self._add_implementation(
CImplementation(agent.qd, entity_type, self._dispatch))
def _identifier(self): return self.attributes.get('id')
def create(self):
try:
if self.routerId:
self._agent.log(LOG_WARNING, "routerId is deprecated, use id instead")
except:
pass
self._qd.qd_dispatch_configure_router(self._dispatch, self)
def __str__(self):
return super(RouterEntity, self).__str__().replace("Entity(", "RouterEntity(")
class LogEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None, validate=True):
# Special defaults for DEFAULT module.
if attributes.get("module") == "DEFAULT":
defaults = dict(enable="info+", timestamp=True, source=False, output="stderr")
attributes = dict(defaults, **attributes)
super(LogEntity, self).__init__(agent, entity_type, attributes, validate=True)
def _identifier(self): return self.attributes.get('module')
def create(self):
self._qd.qd_log_entity(self)
def _update(self):
self._qd.qd_log_entity(self)
def _delete(self):
"""Can't actually delete a log source but return it to the default state"""
self._qd.qd_log_source_reset(self.attributes['module'])
def __str__(self):
return super(LogEntity, self).__str__().replace("Entity(", "LogEntity(")
class PolicyEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None):
super(PolicyEntity, self).__init__(agent, entity_type, attributes, validate=False)
# Policy is a mix of configuration and operational entity.
# The statistics attributes are operational not configured.
self._add_implementation(
CImplementation(agent.qd, entity_type, self._dispatch))
def create(self):
self._qd.qd_dispatch_configure_policy(self._dispatch, self)
self._qd.qd_dispatch_register_policy_manager(self._dispatch, self._policy)
def _identifier(self):
return self.attributes.get('module')
def __str__(self):
return super(PolicyEntity, self).__str__().replace("Entity(", "PolicyEntity(")
class VhostEntity(EntityAdapter):
def create(self):
self._policy.create_ruleset(self.attributes)
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(VhostEntity, self).__str__().replace("Entity(", "VhostEntity(")
def _delete(self):
self._policy.delete_ruleset(self.id)
def _update(self):
self._policy.update_ruleset(self.attributes)
class VhostStatsEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(VhostStatsEntity, self).__str__().replace("Entity(", "VhostStatsEntity(")
def _host_port_name_identifier(entity):
for attr in ['host', 'port', 'name']: # Set default values if need be
entity.attributes.setdefault(
attr, entity.entity_type.attribute(attr).missing_value())
if entity.attributes.get('name'):
return "%s:%s:%s" % (entity.attributes['host'], entity.attributes['port'], entity.attributes['name'])
else:
return "%s:%s" % (entity.attributes['host'], entity.attributes['port'])
class SslProfileEntity(EntityAdapter):
def create(self):
return self._qd.qd_dispatch_configure_ssl_profile(self._dispatch, self)
def _delete(self):
deleted = self._qd.qd_connection_manager_delete_ssl_profile(self._dispatch, self._implementations[0].key)
# SSL Profiles cannot be deleted if they are referenced by a connector/listener.
if not deleted:
raise ForbiddenStatus("SSL Profile is referenced by other listeners/connectors. Delete the associated "
"listeners/connectors before deleting the SSL Profile")
def _identifier(self):
return self.name
def __str__(self):
return super(SslProfileEntity, self).__str__().replace("Entity(", "SslProfileEntity(")
class ListenerEntity(EntityAdapter):
def create(self):
config_listener = self._qd.qd_dispatch_configure_listener(self._dispatch, self)
self._qd.qd_connection_manager_start(self._dispatch)
return config_listener
def _identifier(self):
return _host_port_name_identifier(self)
def __str__(self):
return super(ListenerEntity, self).__str__().replace("Entity(", "ListenerEntity(")
def _delete(self):
self._qd.qd_connection_manager_delete_listener(self._dispatch, self._implementations[0].key)
class ConnectorEntity(EntityAdapter):
def create(self):
config_connector = self._qd.qd_dispatch_configure_connector(self._dispatch, self)
self._qd.qd_connection_manager_start(self._dispatch)
return config_connector
def _delete(self):
self._qd.qd_connection_manager_delete_connector(self._dispatch, self._implementations[0].key)
def _identifier(self):
return _host_port_name_identifier(self)
def __str__(self):
return super(ConnectorEntity, self).__str__().replace("Entity(", "ConnectorEntity(")
class FixedAddressEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_fixed_address(self._dispatch, self)
def __str__(self):
return super(FixedAddressEntity, self).__str__().replace("Entity(", "FixedAddressEntity(")
class WaypointEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_waypoint(self._dispatch, self)
#self._qd.qd_waypoint_activate_all(self._dispatch)
def __str__(self):
return super(WaypointEntity, self).__str__().replace("Entity(", "WaypointEntity(")
class LinkRoutePatternEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_lrp(self._dispatch, self)
def __str__(self):
return super(LinkRoutePatternEntity, self).__str__().replace("Entity(", "LinkRoutePatternEntity(")
class AddressEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_address(self._dispatch, self)
def __str__(self):
return super(AddressEntity, self).__str__().replace("Entity(", "AddressEntity(")
class LinkRouteEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_link_route(self._dispatch, self)
def __str__(self):
return super(LinkRouteEntity, self).__str__().replace("Entity(", "LinkRouteEntity(")
class AutoLinkEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_auto_link(self._dispatch, self)
def __str__(self):
return super(AutoLinkEntity, self).__str__().replace("Entity(", "AutoLinkEntity(")
class ConsoleEntity(EntityAdapter):
def __str__(self):
return super(ConsoleEntity, self).__str__().replace("Entity(", "ConsoleEntity(")
def create(self):
# if a named listener is present, use its host:port
name = self.attributes.get('listener')
if name:
listeners = self._agent.find_entity_by_type("listener")
for listener in listeners:
if listener.name == name:
try:
#required
host = listener.attributes['host']
port = listener.attributes['port']
#optional
wsport = self.attributes.get('wsport')
home = self.attributes.get('home')
args = self.attributes.get('args')
pargs = []
pargs.append(self.attributes['proxy'])
if args:
# Replace any $port|$host|$wsport|$home
dargs = {'$port': port, '$host': host}
if wsport:
dargs['$wsport'] = wsport
if home:
dargs['$home'] = home
for k,v in dargs.iteritems():
args = args.replace(k,str(v))
pargs += args.split()
#run the external program
Popen(pargs)
except:
self._agent.log(LOG_ERROR, "Can't parse console entity: %s" % (format_exc()))
break
class DummyEntity(EntityAdapter):
def callme(self, request):
return (OK, dict(**request.properties))
class RouterLinkEntity(EntityAdapter):
def __str__(self):
return super(RouterLinkEntity, self).__str__().replace("Entity(", "RouterLinkEntity(")
class RouterNodeEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(RouterNodeEntity, self).__str__().replace("Entity(", "RouterNodeEntity(")
class RouterAddressEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('key')
def __str__(self):
return super(RouterAddressEntity, self).__str__().replace("Entity(", "RouterAddressEntity(")
class ConnectionEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('host') + ":" + str(self.attributes.get('identity'))
def __str__(self):
return super(ConnectionEntity, self).__str__().replace("Entity(", "ConnectionEntity(")
class LogStatsEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('identity')
def __str__(self):
return super(LogStatsEntity, self).__str__().replace("Entity(", "LogStatsEntity(")
class AllocatorEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('typeName')
def __str__(self):
return super(AllocatorEntity, self).__str__().replace("Entity(", "AllocatorEntity(")
class EntityCache(object):
"""
Searchable cache of entities, can be refreshed from implementation objects.
"""
def __init__(self, agent):
self.entities = []
self.implementations = {}
self.agent = agent
self.qd = self.agent.qd
self.schema = agent.schema
self.log = self.agent.log
def map_filter(self, function, test):
"""Filter with test then apply function."""
return map(function, ifilter(test, self.entities))
def map_type(self, function, type):
"""Apply function to all entities of type, if type is None do all entities"""
if type is None:
return map(function, self.entities)
else:
if not isinstance(type, EntityType): type = self.schema.entity_type(type)
return map(function, ifilter(lambda e: e.entity_type.is_a(type), self.entities))
def add(self, entity):
"""Add an entity to the agent"""
self.log(LOG_DEBUG, "Add entity: %s" % entity)
entity.validate() # Fill in defaults etc.
# Validate in the context of the existing entities for uniqueness
self.schema.validate_full(chain(iter([entity]), iter(self.entities)))
self.entities.append(entity)
def _add_implementation(self, implementation, adapter=None):
"""Create an adapter to wrap the implementation object and add it"""
cls = self.agent.entity_class(implementation.entity_type)
if not adapter:
adapter = cls(self.agent, implementation.entity_type, validate=False)
self.implementations[implementation.key] = adapter
adapter._add_implementation(implementation)
adapter._refresh()
self.add(adapter)
def add_implementation(self, implementation, adapter=None):
self._add_implementation(implementation, adapter=adapter)
def _remove(self, entity):
try:
self.entities.remove(entity)
self.log(LOG_DEBUG, "Remove %s entity: %s" %
(entity.entity_type.short_name, entity.attributes['identity']))
except ValueError: pass
def remove(self, entity):
self._remove(entity)
def _remove_implementation(self, key):
if key in self.implementations:
entity = self.implementations[key]
del self.implementations[key]
self._remove(entity)
def remove_implementation(self, key):
self._remove_implementation(key)
def refresh_from_c(self):
"""Refresh entities from the C dispatch runtime"""
REMOVE, ADD = 0, 1
def remove_redundant(events):
"""Remove redundant add/remove pairs of events."""
add = {} # add[pointer] = index of add event.
redundant = [] # List of redundant event indexes.
for i in xrange(len(events)):
action, type, pointer = events[i]
if action == ADD:
add[pointer] = i
elif pointer in add: # action == REMOVE and there's an ADD
redundant.append(add[pointer])
redundant.append(i)
del add[pointer]
for i in sorted(redundant, reverse=True):
events.pop(i)
# FIXME aconway 2014-10-23: locking is ugly, push it down into C code.
self.qd.qd_dispatch_router_lock(self.agent.dispatch)
try:
events = []
self.qd.qd_entity_refresh_begin(events)
remove_redundant(events)
for action, type, pointer in events:
if action == REMOVE:
self._remove_implementation(pointer)
elif action == ADD:
entity_type = self.schema.entity_type(type)
self._add_implementation(CImplementation(self.qd, entity_type, pointer))
# Refresh the entity values while the lock is still held.
for e in self.entities: e._refresh()
finally:
self.qd.qd_entity_refresh_end()
self.qd.qd_dispatch_router_unlock(self.agent.dispatch)
class ManagementEntity(EntityAdapter):
"""An entity representing the agent itself. It is a singleton created by the agent."""
def __init__(self, agent, entity_type, attributes, validate=True):
attributes = {"identity": "self", "name": "self"}
super(ManagementEntity, self).__init__(agent, entity_type, attributes, validate=validate)
self.__dict__["_schema"] = entity_type.schema
def requested_type(self, request):
type = request.properties.get('entityType')
if type: return self._schema.entity_type(type)
else: return None
def query(self, request):
"""Management node query operation"""
entity_type = self.requested_type(request)
if entity_type:
all_attrs = set(entity_type.attributes.keys())
else:
all_attrs = self._schema.all_attributes
names = set(request.body.get('attributeNames'))
if names:
unknown = names - all_attrs
if unknown:
if entity_type:
for_type = " for type %s" % entity_type.name
else:
for_type = ""
raise NotFoundStatus("Unknown attributes %s%s." % (list(unknown), for_type))
else:
names = all_attrs
results = []
def add_result(entity):
result = []
non_empty = False
for name in names:
result.append(entity.attributes.get(name))
if result[-1] is not None: non_empty = True
if non_empty: results.append(result)
self._agent.entities.map_type(add_result, entity_type)
return (OK, {'attributeNames': list(names), 'results': results})
def get_types(self, request):
type = self.requested_type(request)
return (OK, dict((t.name, [b.name for b in t.all_bases])
for t in self._schema.by_type(type)))
def get_annotations(self, request):
"""
We are not supporting any annotations at the moment.
"""
return (OK, {})
def get_operations(self, request):
type = self.requested_type(request)
return (OK, dict((t, et.operations)
for t, et in self._schema.entity_types.iteritems()
if not type or type.name == t))
def get_attributes(self, request):
type = self.requested_type(request)
return (OK, dict((t, [a for a in et.attributes])
for t, et in self._schema.entity_types.iteritems()
if not type or type.name == t))
def get_mgmt_nodes(self, request):
router = self._agent.entities.map_type(None, 'router')[0]
area = router.attributes['area']
def node_address(node):
return str(Address.topological(node.attributes['id'], "$management", area))
return (OK, self._agent.entities.map_type(node_address, 'router.node'))
def get_schema(self, request):
return (OK, self._schema.dump())
def _intprop(self, request, prop):
value = request.properties.get(prop)
if value is not None: value = int(value)
return value
def get_json_schema(self, request):
return (OK, json.dumps(self._schema.dump(), indent=self._intprop(request, "indent")))
def get_log(self, request):
logs = self._qd.qd_log_recent_py(self._intprop(request, "limit") or -1)
return (OK, logs)
def profile(self, request):
"""Start/stop the python profiler, returns profile results"""
profile = self.__dict__.get("_profile")
if "start" in request.properties:
if not profile:
profile = self.__dict__["_profile"] = Profile()
profile.enable()
self._log(LOG_INFO, "Started python profiler")
return (OK, None)
if not profile:
raise BadRequestStatus("Profiler not started")
if "stop" in request.properties:
profile.create_stats()
self._log(LOG_INFO, "Stopped python profiler")
out = StringIO()
stats = pstats.Stats(profile, stream=out)
try:
stop = request.properties["stop"]
if stop == "kgrind": # Generate kcachegrind output using pyprof2calltree
from pyprof2calltree import convert
convert(stats, out)
elif stop == "visualize": # Start kcachegrind using pyprof2calltree
from pyprof2calltree import visualize
visualize(stats)
else:
stats.print_stats() # Plain python profile stats
return (OK, out.getvalue())
finally:
out.close()
raise BadRequestStatus("Bad profile request %s" % (request))
class Agent(object):
"""AMQP managment agent. Manages entities, directs requests to the correct entity."""
def __init__(self, dispatch, qd):
self.qd = qd
self.dispatch = dispatch
self.schema = QdSchema()
self.entities = EntityCache(self)
self.request_lock = Lock()
self.log_adapter = LogAdapter("AGENT")
self.policy = PolicyManager(self)
self.management = self.create_entity({"type": "management"})
self.add_entity(self.management)
def log(self, level, text):
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def activate(self, address):
"""Register the management address to receive management requests"""
self.entities.refresh_from_c()
self.log(LOG_INFO, "Activating management agent on %s" % address)
self.io = IoAdapter(self.receive, address, 'L', '0', TREATMENT_ANYCAST_CLOSEST)
def entity_class(self, entity_type):
"""Return the class that implements entity_type"""
class_name = camelcase(entity_type.short_name, capital=True) + 'Entity'
entity_class = globals().get(class_name)
if not entity_class:
raise InternalServerErrorStatus(
"Can't find implementation '%s' for '%s'" % (class_name, entity_type.name))
return entity_class
def create_entity(self, attributes):
"""Create an instance of the implementation class for an entity"""
if attributes.get('identity') is not None:
raise BadRequestStatus("'identity' attribute cannot be specified %s" % attributes)
if attributes.get('type') is None:
raise BadRequestStatus("No 'type' attribute in %s" % attributes)
entity_type = self.schema.entity_type(attributes['type'])
return self.entity_class(entity_type)(self, entity_type, attributes)
def respond(self, request, status=OK, description=None, body=None):
"""Send a response to the client"""
if body is None: body = {}
description = description or STATUS_TEXT[status]
response = Message(
address=request.reply_to,
correlation_id=request.correlation_id,
properties={'statusCode': status, 'statusDescription': description},
body=body)
self.log(LOG_DEBUG, "Agent response:\n %s\n Responding to: \n %s"%(response, request))
try:
self.io.send(response)
except:
self.log(LOG_ERROR, "Can't respond to %s: %s"%(request, format_exc()))
def receive(self, request, unused_link_id, unused_cost):
"""Called when a management request is received."""
def error(e, trace):
"""Raise an error"""
self.log(LOG_ERROR, "Error performing %s: %s"%(request.properties.get('operation'), e.message))
self.respond(request, e.status, e.description)
# If there's no reply_to, don't bother to process the request.
if not request.reply_to:
return
# Coarse locking, handle one request at a time.
with self.request_lock:
try:
self.entities.refresh_from_c()
self.log(LOG_DEBUG, "Agent request %s"% request)
status, body = self.handle(request)
self.respond(request, status=status, body=body)
except ManagementError, e:
error(e, format_exc())
except ValidationError, e:
error(BadRequestStatus(str(e)), format_exc())
except Exception, e:
error(InternalServerErrorStatus("%s: %s"%(type(e).__name__, e)), format_exc())
def entity_type(self, type):
try: return self.schema.entity_type(type)
except ValidationError, e: raise NotFoundStatus(str(e))
def handle(self, request):
"""
Handle a request.
Dispatch management node requests to self, entity requests to the entity.
@return: (response-code, body)
"""
operation = required_property('operation', request)
if operation.lower() == 'create':
# Create requests are entity requests but must be handled by the agent since
# the entity does not yet exist.
return self.create(request)
else:
target = self.find_entity(request)
target.entity_type.allowed(operation, request.body)
try:
method = getattr(target, operation.lower().replace("-", "_"))
except AttributeError:
not_implemented(operation, target.type)
return method(request)
def _create(self, attributes):
"""Create an entity, called externally or from configuration file."""
entity = self.create_entity(attributes)
pointer = entity.create()
if pointer:
cimplementation = CImplementation(self.qd, entity.entity_type, pointer)
self.entities.add_implementation(cimplementation, entity)
else:
self.add_entity(entity)
return entity
def create(self, request):
"""
Create operation called from an external client.
Create is special: it is directed at an entity but the entity
does not yet exist so it is handled initially by the agent and
then delegated to the new entity.
"""
attributes = request.body
for a in ['type', 'name']:
prop = request.properties.get(a)
if prop:
old = attributes.setdefault(a, prop)
if old is not None and old != prop:
raise BadRequestStatus("Conflicting values for '%s'" % a)
attributes[a] = prop
if attributes.get('type') is None:
raise BadRequestStatus("No 'type' attribute in %s" % attributes)
et = self.schema.entity_type(attributes['type'])
et.allowed("CREATE", attributes)
et.create_check(attributes)
return (CREATED, self._create(attributes).attributes)
def configure(self, attributes):
"""Created via configuration file"""
self._create(attributes)
def add_entity(self, entity):
"""Add an entity adapter"""
self.entities.add(entity)
def remove(self, entity):
self.entities.remove(entity)
def add_implementation(self, implementation, entity_type_name):
"""Add an internal python implementation object, it will be wrapped with an entity adapter"""
self.entities.add_implementation(
PythonImplementation(self.entity_type(entity_type_name), implementation))
def remove_implementation(self, implementation):
"""Remove and internal python implementation object."""
self.entities.remove_implementation(id(implementation))
def find_entity(self, request):
"""Find the entity addressed by request"""
requested_type = request.properties.get('type')
if requested_type:
requested_type = self.schema.entity_type(requested_type)
# ids is a map of identifying attribute values
ids = dict((k, request.properties.get(k))
for k in ['name', 'identity'] if k in request.properties)
# Special case for management object: if no name/id and no conflicting type
# then assume this is for "self"
if not ids:
if not requested_type or self.management.entity_type.is_a(requested_type):
return self.management
else:
raise BadRequestStatus("%s: No name or identity provided" % requested_type)
def attrvals():
"""String form of the id attribute values for error messages"""
return " ".join(["%s=%r" % (k, v) for k, v in ids.iteritems()])
k, v = ids.iteritems().next() # Get the first id attribute
found = self.entities.map_filter(None, lambda e: e.attributes.get(k) == v)
if len(found) == 1:
entity = found[0]
elif len(found) > 1:
raise InternalServerErrorStatus(
"Duplicate (%s) entities with %s=%r" % (len(found), k, v))
else:
raise NotFoundStatus("No entity with %s" % attrvals())
for k, v in ids.iteritems():
if entity[k] != v: raise BadRequestStatus("Conflicting %s" % attrvals())
if requested_type:
if not entity.entity_type.is_a(requested_type):
raise BadRequestStatus("Entity type '%s' does not extend requested type '%s'" %
(entity.entity_type.name, requested_type))
return entity
def find_entity_by_type(self, type):
return self.entities.map_type(None, type)
| {
"content_hash": "ab0851d5b8d38bf2f6f1af799ac58ace",
"timestamp": "",
"source": "github",
"line_count": 941,
"max_line_length": 143,
"avg_line_length": 39.58023379383634,
"alnum_prop": 0.6205933682373473,
"repo_name": "dskarbek/qpid-dispatch",
"id": "7f50cb838ba65e84efd919fd445a0cf74dbcdcfd",
"size": "38034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/qpid_dispatch_internal/management/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1081285"
},
{
"name": "C++",
"bytes": "30337"
},
{
"name": "CMake",
"bytes": "20409"
},
{
"name": "CSS",
"bytes": "143052"
},
{
"name": "HTML",
"bytes": "71202"
},
{
"name": "Java",
"bytes": "1940"
},
{
"name": "JavaScript",
"bytes": "1766562"
},
{
"name": "Objective-C",
"bytes": "5351"
},
{
"name": "Python",
"bytes": "714388"
},
{
"name": "Shell",
"bytes": "20397"
}
],
"symlink_target": ""
} |
"""
Datastore Model objects for Compute Instances, with
InstanceDirectory manager.
# Create a new instance?
>>> InstDir = InstanceDirectory()
>>> inst = InstDir.new()
>>> inst.destroy()
True
>>> inst = InstDir['i-123']
>>> inst['ip'] = "192.168.0.3"
>>> inst['project_id'] = "projectA"
>>> inst.save()
True
>>> InstDir['i-123']
<Instance:i-123>
>>> InstDir.all.next()
<Instance:i-123>
>>> inst.destroy()
True
"""
from nova import vendor
from nova import datastore
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_prefix', 'compute-',
'prefix for keepers for instances')
# TODO(ja): singleton instance of the directory
class InstanceDirectory(object):
"""an api for interacting with the global state of instances """
def __init__(self):
self.keeper = datastore.Keeper(FLAGS.instances_prefix)
def get(self, instance_id):
""" returns an instance object for a given id """
return Instance(instance_id)
def __getitem__(self, item):
return self.get(item)
def by_project(self, project):
""" returns a list of instance objects for a project """
for instance_id in self.keeper.smembers('project:%s:instances' % project):
yield Instance(instance_id)
def by_node(self, node_id):
""" returns a list of instances for a node """
for instance in self.all:
if instance['node_name'] == node_id:
yield instance
def by_ip(self, ip_address):
""" returns an instance object that is using the IP """
for instance in self.all:
if instance['private_dns_name'] == ip_address:
return instance
return None
def by_volume(self, volume_id):
""" returns the instance a volume is attached to """
pass
def exists(self, instance_id):
return self.keeper.set_is_member('instances', instance_id)
@property
def all(self):
""" returns a list of all instances """
for instance_id in self.keeper.set_members('instances'):
yield Instance(instance_id)
def new(self):
""" returns an empty Instance object, with ID """
instance_id = utils.generate_uid('i')
return self.get(instance_id)
class Instance(object):
""" Wrapper around stored properties of an instance """
def __init__(self, instance_id):
""" loads an instance from the datastore if exists """
self.keeper = datastore.Keeper(FLAGS.instances_prefix)
self.instance_id = instance_id
self.initial_state = {}
self.state = self.keeper[self.__redis_key]
if self.state:
self.initial_state = self.state
else:
self.state = {'state': 'pending',
'instance_id': instance_id,
'node_name': 'unassigned',
'project_id': 'unassigned',
'user_id': 'unassigned'
}
@property
def __redis_key(self):
""" Magic string for instance keys """
return 'instance:%s' % self.instance_id
def __repr__(self):
return "<Instance:%s>" % self.instance_id
def get(self, item, default):
return self.state.get(item, default)
def __getitem__(self, item):
return self.state[item]
def __setitem__(self, item, val):
self.state[item] = val
return self.state[item]
def __delitem__(self, item):
""" We don't support this """
raise Exception("Silly monkey, Instances NEED all their properties.")
def save(self):
""" update the directory with the state from this instance
make sure you've set the project_id and user_id before you call save
for the first time.
"""
# TODO(ja): implement hmset in redis-py and use it
# instead of multiple calls to hset
state = self.keeper[self.__redis_key]
if not state:
state = {}
for key, val in self.state.iteritems():
# if (not self.initial_state.has_key(key)
# or self.initial_state[key] != val):
state[key] = val
self.keeper[self.__redis_key] = state
if self.initial_state == {}:
self.keeper.set_add('project:%s:instances' % self.project,
self.instance_id)
self.keeper.set_add('instances', self.instance_id)
self.initial_state = self.state
return True
@property
def project(self):
if self.state.get('project_id', None):
return self.state['project_id']
return self.state.get('owner_id', 'unassigned')
def destroy(self):
""" deletes all related records from datastore.
does NOT do anything to running libvirt state.
"""
self.keeper.set_remove('project:%s:instances' % self.project,
self.instance_id)
del self.keeper[self.__redis_key]
self.keeper.set_remove('instances', self.instance_id)
return True
@property
def volumes(self):
""" returns a list of attached volumes """
pass
@property
def reservation(self):
""" Returns a reservation object """
pass
# class Reservation(object):
# """ ORM wrapper for a batch of launched instances """
# def __init__(self):
# pass
#
# def userdata(self):
# """ """
# pass
#
#
# class NodeDirectory(object):
# def __init__(self):
# pass
#
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "f5d6dba42da7c58b14fee68323562eb1",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 82,
"avg_line_length": 28.897435897435898,
"alnum_prop": 0.5811889973380656,
"repo_name": "jxta/cc",
"id": "2754e9e6d1627b235d4d20cc5ea20a586bdcf15b",
"size": "6289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/compute/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315721"
},
{
"name": "Shell",
"bytes": "7870"
}
],
"symlink_target": ""
} |
from pdb import set_trace
from json import loads
from itertools import imap
from twitter_pb2 import *
tweets = Tweets()
with file('twitter.json', 'r') as f:
for line in imap(loads, f):
tweet = tweets.tweets.add()
tweet.is_delete = ('delete' in line)
if 'delete' in line:
status = line['delete']['status']
tweet.delete.id = status['id']
tweet.delete.uid = status['user_id']
else:
insert = tweet.insert
insert.uid = line['user']['id']
insert.truncated = line['truncated']
insert.text = line['text']
if line.get('in_reply_to_status_id', None):
insert.reply_to = line['in_reply_to_status_id']
insert.reply_to_name = line['in_reply_to_screen_name']
insert.id = line['id']
insert.favorite_count = line['favorite_count']
insert.source = line['source']
insert.retweeted = line['retweeted']
if line.get('possibly_sensitive', None):
insert.possibly_sensitive = line['possibly_sensitive']
insert.lang = line['lang']
insert.created_at = line['created_at']
if line.get('coordinates', None):
coords = line['coordinates']
insert.coord.lat = coords['coordinates'][0]
insert.coord.lon = coords['coordinates'][1]
insert.filter_level = line['filter_level']
if line.get('place', None):
place = line['place']
insert.place.url = place['url']
insert.place.country = place['country']
insert.place.country_code = place['country_code']
insert.place.place_type = place['place_type']
insert.place.id = place['id']
insert.place.name = place['name']
if place.get('bounding_box', None):
def add(pair):
coord = insert.place.bounding_box.add()
coord.lat = pair[0]
coord.lon = pair[1]
map(add, place['bounding_box']['coordinates'][0])
with file('twitter.pb', 'w') as f:
f.write(tweets.SerializeToString())
| {
"content_hash": "099a8bad2a8500a51d5b8a8563c4626d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 62,
"avg_line_length": 34.06896551724138,
"alnum_prop": 0.6027327935222672,
"repo_name": "mitdbg/asciiclass",
"id": "7cf53d55fbfaf17834f1528f8b1bc9f2dacd4a0b",
"size": "2158",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "labs/lab2/encode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "486"
},
{
"name": "C#",
"bytes": "10857"
},
{
"name": "CSS",
"bytes": "1763"
},
{
"name": "Java",
"bytes": "5764"
},
{
"name": "JavaScript",
"bytes": "1009821"
},
{
"name": "Python",
"bytes": "76278"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
} |
import os
import asyncio # noqa: F401
import discord
import logging
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
log = logging.getLogger('red.CrossQuote')
class CrossQuote:
"""
Cross server quote by message ID.
"""
__author__ = "mikeshardmind"
__version__ = "2.1"
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/crossquote/settings.json')
def save_json(self):
dataIO.save_json("data/crossquote/settings.json", self.settings)
@commands.group(name="crossquoteset",
pass_context=True, no_pm=True, hidden=True)
async def crossquoteset(self, ctx):
"""configuration settings for cross server quotes"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.admin_or_permissions(Manage_server=True)
@crossquoteset.command(name="bypasstoggle", pass_context=True, no_pm=True)
async def allow_without_permission(self, ctx):
"""allows people with manage server to allow users bypass needing
manage messages to quote from their server to others
bypass should be True or False. The default value is False"""
server = ctx.message.server
if server.id not in self.settings:
self.init_settings(server)
self.settings[server.id]['bypass'] = True
self.save_json()
else:
self.settings[server.id]['bypass'] = \
not self.settings[server.id]['bypass']
if self.settings[server.id]['bypass']:
await self.bot.say("Now anyone can quote from this server "
"if they can see the message")
else:
await self.bot.say("Quoting from this server again requires manage"
" messages")
@checks.is_owner()
@crossquoteset.command(name="init", hidden=True)
async def manual_init_settings(self):
"""adds default settings for all servers the bot is in
can be called manually by the bot owner (hidden)"""
serv_ids = map(lambda s: s.id, self.bot.servers)
for serv_id in serv_ids:
if serv_id not in self.settings:
self.settings[serv_id] = {'bypass': False,
'whitelisted': [],
'blacklisted': []
}
self.save_json()
async def init_settings(self, server=None):
"""adds default settings for all servers the bot is in
when needed and on join"""
if server:
if server.id not in self.settings:
self.settings[server.id] = {'bypass': False,
'whitelisted': [],
'blacklisted': []
}
self.save_json()
else:
serv_ids = map(lambda s: s.id, self.bot.servers)
for serv_id in serv_ids:
if serv_id not in self.settings:
self.settings[serv_id] = {'bypass': False,
'whitelisted': [],
'blacklisted': []
}
self.save_json()
@commands.command(pass_context=True, name='crosschanquote',
aliases=["ccq", "quote"])
async def _ccq(self, ctx, message_id: int):
"""
Quote someone with the message id.
To get the message id you need to enable developer mode.
"""
found = False
server = ctx.message.channel.server
if server.id not in self.settings:
await self.init_settings(server)
for channel in server.channels:
if not found:
try:
message = await self.bot.get_message(channel,
str(message_id))
if message:
found = True
except Exception as error:
log.debug("{}".format(error))
if found:
await self.sendifallowed(ctx.message.author,
ctx.message.channel, message)
else:
em = discord.Embed(description='I\'m sorry, I couldn\'t find '
'that message', color=discord.Color.red())
await self.bot.send_message(ctx.message.channel, embed=em)
@checks.is_owner()
@commands.command(pass_context=True, name="getmsgcontent", hidden=True)
async def _gmsgcontent(self, ctx, x: str):
"""debugging tool"""
message = await self.bot.get_message(ctx.message.channel, x)
if message:
await self.bot.say("```{}```".format(message.content))
await self.bot.say("```{}```".format(message.clean_content))
@commands.command(pass_context=True, name='crossservquote',
aliases=["csq", "crossquote"])
async def _csq(self, ctx, message_id: int):
"""
Quote someone with the message id.
To get the message id you need to enable developer mode.
"""
found = False
for server in self.bot.servers:
if server.id not in self.settings:
await self.init_settings(server)
for channel in server.channels:
if not found:
try:
message = await self.bot.get_message(channel,
str(message_id))
if message:
found = True
except Exception as error:
log.debug("{}".format(error))
if found:
if ctx.message.channel.server == message.channel.server:
em = discord.Embed(description='Using the cross server quote '
'is slow. Use cross channel quote for '
'messages on the same server.',
color=discord.Color.red())
await self.bot.send_message(ctx.message.author, embed=em)
await self.sendifallowed(ctx.message.author,
ctx.message.channel, message)
else:
em = discord.Embed(description='I\'m sorry, I couldn\'t find '
'that message', color=discord.Color.red())
await self.bot.send_message(ctx.message.channel, embed=em)
async def sendifallowed(self, who, where, message=None):
"""checks if a response should be sent
then sends the appropriate response"""
if message:
channel = message.channel
server = channel.server
self.init_settings(server)
perms_managechannel = channel.permissions_for(who).manage_messages
can_bypass = self.settings[server.id]['bypass']
source_is_dest = where.server.id == server.id
if perms_managechannel or can_bypass or source_is_dest:
em = self.qform(message)
else:
em = discord.Embed(description='You don\'t have '
'permission to quote from that server',
color=discord.Color.red())
else:
em = discord.Embed(description='I\'m sorry, I couldn\'t '
'find that message', color=discord.Color.red())
await self.bot.send_message(where, embed=em)
def qform(self, message):
channel = message.channel
server = channel.server
content = message.content
author = message.author
sname = server.name
cname = channel.name
avatar = author.avatar_url if author.avatar \
else author.default_avatar_url
footer = 'Said in {} #{}'.format(sname, cname)
em = discord.Embed(description=content, color=author.color,
timestamp=message.timestamp)
em.set_author(name='{}'.format(author.name), icon_url=avatar)
em.set_footer(text=footer)
if message.attachments:
a = message.attachments[0]
fname = a['filename']
url = a['url']
if fname.split('.')[-1] in ['png', 'jpg', 'gif', 'jpeg']:
em.set_image(url=url)
else:
em.add_field(name='Message has an attachment',
value='[{}]({})'.format(fname, url),
inline=True)
return em
def check_folder():
f = 'data/crossquote'
if not os.path.exists(f):
os.makedirs(f)
def check_file():
f = 'data/crossquote/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = CrossQuote(bot)
bot.add_listener(n.init_settings, "on_server_join")
bot.add_cog(n)
| {
"content_hash": "85cec241adfc32a20fec99e517d152b8",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 39.41025641025641,
"alnum_prop": 0.5227716330513988,
"repo_name": "Garcia1008/tournament",
"id": "9be39d64050cf44698f6c8e1a7dc28930792d2da",
"size": "9222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossquote/crossquote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177304"
}
],
"symlink_target": ""
} |
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import sys
import click
# Visualize a dotplot to compare two orderings of a list
# e.g. python scripts/scaffplot.py --inferred data/test/scaffolding.inferred.txt --actual data/test/scaffolding.key.txt --outpath data/test/test2.png
@click.command()
@click.option('--scores', required=True, help='A file containing a newline-delimited list of elements.')
@click.option('--label', default="unlabeled", help='Label to place in title of plot.')
@click.option('--outpath', required=True, default="", help='Path to write the output figure.')
def optviz(scores, label, outpath):
"""Visualize a order comparison dotplot for two ordered lists of ids."""
x=[]
y=[]
#y2=[]
last = 0
# Scan through the actual/key list
with open(scores, "r") as f:
for line in f:
arr = line.strip().split(" ")
if int(arr[0]) < int(last):
break
else:
last = arr[0]
x.append(int(arr[0]))
y.append(float(arr[1]))
#y2.append(float(arr[2]))
data = {}
id_count = 0
plt.clf()
plt.plot(x, y)
plt.title('Optimization score progression, ' + label)
frame = plt.gca()
plt.xlabel("Iteration")
plt.ylabel("Score")
plt.savefig(outpath)
if __name__ == '__main__':
optviz()
| {
"content_hash": "6cae9d0c4d9bdfe346ae521734b88b9f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 149,
"avg_line_length": 28.306122448979593,
"alnum_prop": 0.6113914924297044,
"repo_name": "cb01/lxy",
"id": "aab8a50f1681e33d2aa92e14820ce7656ea7e312",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/optimizationviz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Go",
"bytes": "77016"
},
{
"name": "Python",
"bytes": "3696"
},
{
"name": "Shell",
"bytes": "8080"
}
],
"symlink_target": ""
} |
import pytest
from pandas.util._validators import (
validate_bool_kwarg,
validate_kwargs,
)
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = {good_arg: "foo", bad_arg + "o": "bar"}
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = rf"{_fname}\(\) got an unexpected keyword argument '{bad_arg}'"
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (
rf"the '{bad_arg}' parameter is not supported "
rf"in the pandas implementation of {_fname}\(\)"
)
compat_args = {"foo": 1, "bar": "s", "baz": None}
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = {"f": None, "b": 1, "ba": "s"}
kwargs = {"f": None, "b": 1}
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = (
f'For argument "{name}" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
| {
"content_hash": "a6da48a2831abac55dacb792358a2fd6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 73,
"avg_line_length": 26.59090909090909,
"alnum_prop": 0.6045584045584046,
"repo_name": "pandas-dev/pandas",
"id": "de49cdd5e247d69671229e1813a7b15a1feb4286",
"size": "1755",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pandas/tests/util/test_validate_kwargs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import numpy as np
import math
import time
from numba import autojit
# - Start Inner Loop - #
# - bbeta float
# - nGridCapital: int64
# - gridCapitalNextPeriod: int64
# - mOutput: float (17820 x 5)
# - nProductivity: int64
# - vGridCapital: float (17820, )
# - mValueFunction: float (17820 x 5)
# - mPolicyFunction: float (17820 x 5)
@autojit
def innerloop(bbeta, nGridCapital, gridCapitalNextPeriod, mOutput, nProductivity, vGridCapital, expectedValueFunction, mValueFunction, mValueFunctionNew, mPolicyFunction):
for nCapital in xrange(nGridCapital):
valueHighSoFar = -100000.0
capitalChoice = vGridCapital[0]
for nCapitalNextPeriod in xrange(gridCapitalNextPeriod, nGridCapital):
consumption = mOutput[nCapital,nProductivity] - vGridCapital[nCapitalNextPeriod]
valueProvisional = (1-bbeta)*np.log(consumption)+bbeta*expectedValueFunction[nCapitalNextPeriod,nProductivity];
if valueProvisional > valueHighSoFar:
valueHighSoFar = valueProvisional
capitalChoice = vGridCapital[nCapitalNextPeriod]
gridCapitalNextPeriod = nCapitalNextPeriod
else:
break
mValueFunctionNew[nCapital,nProductivity] = valueHighSoFar
mPolicyFunction[nCapital,nProductivity] = capitalChoice
return mValueFunctionNew, mPolicyFunction
def main_func():
# 1. Calibration
aalpha = 1.0/3.0 # Elasticity of output w.r.t. capital
bbeta = 0.95 # Discount factor
# Productivity values
vProductivity = np.array([0.9792, 0.9896, 1.0000, 1.0106, 1.0212],float)
# Transition matrix
mTransition = np.array([[0.9727, 0.0273, 0.0000, 0.0000, 0.0000],
[0.0041, 0.9806, 0.0153, 0.0000, 0.0000],
[0.0000, 0.0082, 0.9837, 0.0082, 0.0000],
[0.0000, 0.0000, 0.0153, 0.9806, 0.0041],
[0.0000, 0.0000, 0.0000, 0.0273, 0.9727]],float)
## 2. Steady State
capitalSteadyState = (aalpha*bbeta)**(1/(1-aalpha))
outputSteadyState = capitalSteadyState**aalpha
consumptionSteadyState = outputSteadyState-capitalSteadyState
print "Output = ", outputSteadyState, " Capital = ", capitalSteadyState, " Consumption = ", consumptionSteadyState
# We generate the grid of capital
vGridCapital = np.arange(0.5*capitalSteadyState,1.5*capitalSteadyState,0.00001)
nGridCapital = len(vGridCapital)
nGridProductivity = len(vProductivity)
## 3. Required matrices and vectors
mOutput = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mValueFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mValueFunctionNew = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mPolicyFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
expectedValueFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
# 4. We pre-build output for each point in the grid
for nProductivity in range(nGridProductivity):
mOutput[:,nProductivity] = vProductivity[nProductivity]*(vGridCapital**aalpha)
## 5. Main iteration
maxDifference = 10.0
tolerance = 0.0000001
iteration = 0
log = math.log
zeros = np.zeros
dot = np.dot
while(maxDifference > tolerance):
expectedValueFunction = dot(mValueFunction,mTransition.T)
for nProductivity in xrange(nGridProductivity):
# We start from previous choice (monotonicity of policy function)
gridCapitalNextPeriod = 0
# - Start Inner Loop - #
mValueFunctionNew, mPolicyFunction = innerloop(bbeta, nGridCapital, gridCapitalNextPeriod, mOutput, nProductivity, vGridCapital, expectedValueFunction, mValueFunction, mValueFunctionNew, mPolicyFunction)
# - End Inner Loop - #
maxDifference = (abs(mValueFunctionNew-mValueFunction)).max()
mValueFunction = mValueFunctionNew
mValueFunctionNew = zeros((nGridCapital,nGridProductivity),dtype=float)
iteration += 1
if(iteration%10 == 0 or iteration == 1):
print " Iteration = ", iteration, ", Sup Diff = ", maxDifference
return (maxDifference, iteration, mValueFunction, mPolicyFunction)
if __name__ == '__main__':
# - Start Timer - #
t1=time.time()
# - Call Main Function - #
maxDiff, iterate, mValueF, mPolicyFunction = main_func()
# - End Timer - #
t2 = time.time()
print " Iteration = ", iterate, ", Sup Duff = ", maxDiff
print " "
print " My Check = ", mPolicyFunction[1000-1,3-1]
print " "
print "Elapse time = is ", t2-t1 | {
"content_hash": "95f03151f0ce563573b1ed37fc219e5f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 215,
"avg_line_length": 37.63358778625954,
"alnum_prop": 0.6350912778904665,
"repo_name": "lexu1upenn/Comparison-Programming-Languages-Economics",
"id": "ef3087da4b325c14ea422766752521a10fcd9098",
"size": "5046",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "RBC_Python_Numba.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16943"
},
{
"name": "C#",
"bytes": "6034"
},
{
"name": "C++",
"bytes": "18766"
},
{
"name": "FORTRAN",
"bytes": "5528"
},
{
"name": "HTML",
"bytes": "135"
},
{
"name": "Java",
"bytes": "7535"
},
{
"name": "JavaScript",
"bytes": "6230"
},
{
"name": "Julia",
"bytes": "3603"
},
{
"name": "Mathematica",
"bytes": "11777"
},
{
"name": "Matlab",
"bytes": "6962"
},
{
"name": "Python",
"bytes": "12944"
},
{
"name": "R",
"bytes": "9411"
},
{
"name": "Swift",
"bytes": "5669"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
def fill_years(apps, schema_editor):
AcademicYear = apps.get_model('schools', 'AcademicYear')
for year in AcademicYear.objects.all():
year.from_year, year.to_year = year.name.split('-')
year.save()
def empty_years(apps, schema_editor):
AcademicYear = apps.get_model('schools', 'AcademicYear')
for year in AcademicYear.objects.all():
year.from_year, year.to_year = '', ''
year.save()
class Migration(migrations.Migration):
dependencies = [
('schools', '0005_auto_20150107_1605'),
]
operations = [
migrations.RunPython(
fill_years, # forward
empty_years # backward
)
]
| {
"content_hash": "87a4762fd6df5183287cf160f9c9602c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 60,
"avg_line_length": 24.870967741935484,
"alnum_prop": 0.6199740596627756,
"repo_name": "klpdotorg/dubdubdub",
"id": "0e08b8d81580c2253880afd11bc904be9965fadc",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/schools/migrations/0006_auto_20150107_1605.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "478"
},
{
"name": "CSS",
"bytes": "335110"
},
{
"name": "HTML",
"bytes": "655218"
},
{
"name": "JavaScript",
"bytes": "1941014"
},
{
"name": "PLpgSQL",
"bytes": "156345"
},
{
"name": "Python",
"bytes": "920256"
},
{
"name": "Shell",
"bytes": "10544"
}
],
"symlink_target": ""
} |
__author__ = 'patelm'
import logging
import importlib
# key name that the PluginManager will use
DATASTORE_KEY = 'datastore'
DRIVER_KEY = 'driver'
RESOURCE_LOCATOR_KEY = 'resource-locator'
# There are the required methods for each plugin type.
# The PluginManager will check to make sure any plugin being loaded has the
# required attributes prior to instantiating the implementation
required_attributes = {DATASTORE_KEY: ['add_backend'],
DRIVER_KEY: ['update'],
RESOURCE_LOCATOR_KEY: ['get_resources']}
# todo lots of exception handling.
class PluginManager():
# plugin cache
plugins = {}
def __init__(self, config):
logging.debug('initializing plugins ')
self._load_plugins(config)
def get_datastore(self):
"""Simple 'getter'
:return: The loaded Datastore plugin implementation
:rtype: elasticd.plugins.Datastore
"""
return self.plugins[DATASTORE_KEY]
def get_driver(self):
"""Simple 'getter'
:return: The loaded Driver plugin implementation
:rtype: elasticd.plugins.Driver
"""
return self.plugins[DRIVER_KEY]
def get_resource_locator(self):
"""Simple 'getter'
:return: The loaded ResourceLocator plugin implementation
:rtype: elasticd.plugins.ResourceLocator
"""
return self.plugins[RESOURCE_LOCATOR_KEY]
def _load_plugins(self, config):
self._load_plugin(DATASTORE_KEY, config)
self._load_plugin(DRIVER_KEY, config)
self._load_plugin(RESOURCE_LOCATOR_KEY, config)
def _load_plugin(self, plugin_type, config):
logging.debug('Loading %s' % plugin_type)
module_name = config.get(plugin_type, 'module_name')
plugin_class = config.get(plugin_type, 'plugin_class')
# Load the module and get a handle to the class definition.
module = importlib.import_module(module_name)
plugin_class = getattr(module, plugin_class)
# Validate the class definition is correct.
if self._plugin_is_valid(plugin_class, required_attributes[plugin_type]):
# Instantiate the class and cache it within this plugin manager
self.plugins[plugin_type] = plugin_class(config)
@staticmethod
def _plugin_is_valid(plugin, _required_attributes):
"""Check the plugin implementation to be loaded is valid by looking for the required attributes
:param plugin: The implementation to be loaded
:param _required_attributes: The attributes required for this implementation
:return: True the implementation of the plugin is valid; False otherwise
:rtype: bool
"""
valid = True
for attribute in _required_attributes:
if hasattr(plugin, attribute):
valid = True
else:
return False
return valid
| {
"content_hash": "1a2b6ac4ec8b8c67194e34f75f3b496a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 103,
"avg_line_length": 33.39772727272727,
"alnum_prop": 0.6457978904389248,
"repo_name": "SMxJrz/Elasticd",
"id": "11090f1676d9f0706f908bb4e15e28adfa7fc6fe",
"size": "2939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "elasticd/plugin_manager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "552"
},
{
"name": "Python",
"bytes": "18226"
}
],
"symlink_target": ""
} |
import sys, os.path
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os
import mimetypes
import gzip
import tempfile
import shutil
from optparse import OptionParser
import re
from urllib import quote_plus
from urllib2 import urlopen
from s3config import S3CONFIG
excludes = r'|'.join([r'.*\.git$'])
def deploy_to_s3(directory, bucket_name, key_id, key):
"""
Deploy a directory to an s3 bucket using parallel uploads.
"""
directory = directory.rstrip('/')
connection = S3Connection(key_id, key)
bucket = connection.get_bucket(bucket_name)
tempdir = tempfile.mkdtemp('s3deploy')
for keyname, absolute_path in find_file_paths(directory):
s3_upload(connection, keyname, absolute_path, bucket, bucket_name, tempdir)
shutil.rmtree(tempdir, True)
return True
def s3_upload(connection, keyname, absolute_path, bucket, bucket_name, tempdir):
"""
Upload a file to s3
"""
bucket = connection.get_bucket(bucket)
mimetype = mimetypes.guess_type(absolute_path)
options = {'Content-Type': mimetype[0]}
# There's a possible race condition if files have the same name
if mimetype[0] is not None and mimetype[0].startswith('text/'):
upload = open(absolute_path)
options['Content-Encoding'] = 'gzip'
key_parts = keyname.split('/')
filename = key_parts.pop()
temp_path = os.path.join(tempdir, filename)
gzfile = gzip.open(temp_path, 'wb')
gzfile.write(upload.read())
gzfile.close()
absolute_path = temp_path
k = Key(bucket)
k.key = keyname
print "Uploading %s" % keyname
k.set_contents_from_filename(absolute_path, options, policy='public-read')
if not keyname.startswith('bootstrap/') and keyname.endswith('.html'):
param = "http://%s/%s?fbrefresh=CANBEANYTHING" % (bucket_name, keyname)
print "Refreshing Facebook info for: %s" % param
fb_url = "http://developers.facebook.com/tools/debug/og/object?q=%s" % quote_plus(param)
urlopen(fb_url)
def find_file_paths(directory):
"""
A generator function that recursively finds all files in the upload directory.
"""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if not re.match(excludes, d)]
rel_path = os.path.relpath(root, directory)
for f in files:
if f.startswith('.'):
continue
if rel_path == '.':
yield (f, os.path.join(root, f))
else:
yield (os.path.join(rel_path, f), os.path.join(root, f))
def parse_args():
parser = OptionParser()
parser.add_option("-d", "--dir", dest="dir", action="store", default="_out",
help="Specify the directory which should be copied to the remote bucket. Default '_out'")
parser.add_option("-b", "--bucket", dest="bucket", action="store", default=None,
help="Specify the S3 bucket to which the files should be deployed")
(options, args) = parser.parse_args()
return options
if __name__ == '__main__':
opts = parse_args()
bucket = S3CONFIG[opts.bucket]
print "Deploying to %s" % bucket['bucket']
deploy_to_s3(opts.dir, bucket['bucket'], bucket['key_id'], bucket['key'])
| {
"content_hash": "689b854bd9041e0da0aee34087cbc31c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 111,
"avg_line_length": 34.635416666666664,
"alnum_prop": 0.6369924812030076,
"repo_name": "newsapps/tarbell-0.8",
"id": "1698c1a8c2fbefb8478c8133be90d4a14f75f0f6",
"size": "3325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "151986"
},
{
"name": "JavaScript",
"bytes": "78211"
},
{
"name": "Python",
"bytes": "14008"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import sys
import textwrap
from collections import OrderedDict
from pip.basecommand import Command, SUCCESS
from pip.download import PipXmlrpcTransport
from pip.models import PyPI
from pip.utils import get_terminal_size
from pip.utils.logging import indent_log
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor import pkg_resources
from pip._vendor.six.moves import xmlrpc_client
logger = logging.getLogger(__name__)
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-i', '--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
index_url = options.index
with self._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = OrderedDict()
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values())
def print_results(hits, name_column_width=None, terminal_width=None):
if not hits:
return
if name_column_width is None:
name_column_width = max([
len(hit['name']) + len(hit.get('versions', ['-'])[-1])
for hit in hits
]) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
version = hit.get('versions', ['-'])[-1]
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, target_width)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%-*s - %s' % (name_column_width,
'%s (%s)' % (name, version), summary)
try:
logger.info(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
with indent_log():
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
return max(versions, key=parse_version)
| {
"content_hash": "83b7cbabca7e1fe5221a9d73bc5565cf",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 33.61194029850746,
"alnum_prop": 0.5825932504440497,
"repo_name": "fiber-space/pip",
"id": "a9cf8c93218e903da88bcb7c76d11dcfc30ed349",
"size": "4504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pip/commands/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2342"
},
{
"name": "Python",
"bytes": "973048"
},
{
"name": "Shell",
"bytes": "1885"
}
],
"symlink_target": ""
} |
import io
import pytest
from freezegun import freeze_time
from FireEyeCM import *
from test_data.result_constants import QUARANTINED_EMAILS_CONTEXT, GET_ALERTS_CONTEXT, GET_ALERTS_DETAILS_CONTEXT, \
GET_ARTIFACTS_METADATA_CONTEXT, GET_EVENTS_CONTEXT
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_get_alerts(mocker):
"""Unit test
Given
- get_alerts command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_alerts_request response.
Then
- Validate The entry context
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request',
return_value=util_load_json('test_data/get_alerts.json'))
command_results = get_alerts(client=client,
args={'limit': '2', 'start_time': '8 days', 'src_ip': '2.2.2.2'})
assert command_results.outputs == GET_ALERTS_CONTEXT
def test_get_alert_details(mocker):
"""Unit test
Given
- get_alert_details command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_alert_details_request response.
Then
- Validate The entry context
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alert_details_request',
return_value=util_load_json('test_data/get_alert_details.json'))
command_results = get_alert_details(client=client, args={'alert_id': '563'})
assert command_results[0].outputs == GET_ALERTS_DETAILS_CONTEXT
def test_alert_acknowledge(mocker):
"""Unit test
Given
- alert_acknowledge command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's alert_acknowledge_request response.
Then
- Validate the human readable
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'alert_acknowledge_request', return_value=None)
command_results = alert_acknowledge(client=client, args={'uuid': 'uuid'})
assert command_results[0].readable_output == 'Alert uuid was acknowledged successfully.'
def test_alert_acknowledge_already_acknowledged(mocker):
"""Unit test
Given
- alert_acknowledge command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's alert_acknowledge_request response for an already acknowledged alert.
Then
- Validate the human readable
"""
error_msg = 'Error in API call [404] - Not Found' \
'{"fireeyeapis": {"@version": "v2.0.0", "description": "Alert not found or cannot update.' \
' code:ALRTCONF008", "httpStatus": 404, "message": "Alert not found or cannot update"}}'
def error_404_mock(*kwargs):
raise Exception(error_msg)
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch('FireEyeCM.FireEyeClient.alert_acknowledge_request', side_effect=error_404_mock)
command_results = alert_acknowledge(client=client, args={'uuid': 'uuid'})
assert command_results[0].readable_output == \
'Alert uuid was not found or cannot update. It may have been acknowledged in the past.'
def test_get_artifacts_metadata(mocker):
"""Unit test
Given
- get_artifacts_metadata_by_uuid command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_artifacts_metadata_by_uuidrequest response.
Then
- Validate The entry context
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_artifacts_metadata_by_uuid_request',
return_value=util_load_json('test_data/get_artifact_metadata.json'))
command_results = get_artifacts_metadata_by_uuid(client=client, args={'uuid': 'uuid'})
assert command_results[0].outputs == GET_ARTIFACTS_METADATA_CONTEXT
def test_get_quarantined_emails(mocker):
"""Unit test
Given
- get_quarantined_emails command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_quarantined_emails_request response.
Then
- Validate The entry context
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_quarantined_emails_request',
return_value=util_load_json('test_data/quarantined_emails.json'))
command_results = get_quarantined_emails(client=client, args={})
assert command_results.outputs == QUARANTINED_EMAILS_CONTEXT
def test_get_report_not_found(mocker):
"""Unit test
Given
- get_reports command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_reports_request response for a non found report.
Then
- Validate the human readable
"""
error_msg = 'Error in API call [400] - Bad Request ' \
'{"fireeyeapis": {"@version": "v2.0.0", "description": "WSAPI_REPORT_ALERT_NOT_FOUND.' \
' code:WSAPI_WITH_ERRORCODE_2016", "httpStatus": 400,' \
' "message": "parameters{infection_id=34013; infection_type=malware-callback}"}}'
def error_400_mock(*kwargs):
raise Exception(error_msg)
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch('FireEyeCM.FireEyeClient.get_reports_request', side_effect=error_400_mock)
command_results = get_reports(client=client, args={'report_type': 'alertDetailsReport', 'infection_id': '34013',
'infection_type': 'mallware-callback'})
assert command_results.readable_output == 'Report alertDetailsReport was not found with the given arguments.'
def test_get_events_no_events(mocker):
"""Unit test
Given
- get_events command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_events_request response for no events.
Then
- Validate the human readable
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_events_request',
return_value=util_load_json('test_data/get_events_none.json'))
command_results = get_events(client=client, args={'end_time': '2020-05-19T23:00:00.000-00:00',
'duration': '48_hours', 'limit': '3'})
assert command_results.readable_output == 'No events in the given timeframe were found.'
def test_get_events(mocker):
"""Unit test
Given
- get_events command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_events_request response.
Then
- Validate The entry context
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_events_request',
return_value=util_load_json('test_data/get_events.json'))
command_results = get_events(client=client, args={'end_time': '2021-05-19T23:00:00.000-00:00',
'duration': '48_hours', 'limit': '3'})
assert command_results.outputs == GET_EVENTS_CONTEXT
def test_release_quarantined_emails(mocker):
"""Unit test
Given
- release_quarantined_emails command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's release_quarantined_emails_request response.
Then
- Validate that an error is raised from the command
"""
def mocked_release_quarantined_emails_requests(*args):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def text(self):
return '1234'
return MockResponse({"1234": "Unable to release the email:quarantined email does not exist\\n"}, 200)
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'release_quarantined_emails_request',
side_effect=mocked_release_quarantined_emails_requests)
with pytest.raises(DemistoException):
release_quarantined_emails(client=client, args={'sensor_name': 'FireEyeEX', 'queue_ids': '1234'})
def test_delete_quarantined_emails(mocker):
"""Unit test
Given
- delete_quarantined_emails command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's delete_quarantined_emails_request response.
Then
- Validate that an error is raised from the command
"""
def mocked_delete_quarantined_emails_requests(*args):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def text(self):
return '1234'
return MockResponse({"1234": "Unable to delete the email:quarantined email does not exist\\n"}, 200)
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'delete_quarantined_emails_request',
side_effect=mocked_delete_quarantined_emails_requests)
with pytest.raises(DemistoException):
delete_quarantined_emails(client=client, args={'sensor_name': 'FireEyeEX', 'queue_ids': '1234'})
def test_fetch_incidents(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_alerts_request.
Then
- run the fetch incidents command using the Client
Validate The length of the results and the last_run.
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request', return_value=util_load_json('test_data/alerts.json'))
last_run, incidents = fetch_incidents(client=client,
last_run={},
first_fetch='1 year',
max_fetch=50,
info_level='concise')
assert len(incidents) == 11
assert last_run.get('time') == '2021-05-18 12:02:54 +0000' # occurred time of the last alert
def test_fetch_incidents_with_limit(mocker):
"""Unit test
Given
- fetch incidents command
- command args with a harsh limit
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_alerts_request.
Then
- run the fetch incidents command using the Client
Validate The length of the results and the last_run of the limited incident.
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request', return_value=util_load_json('test_data/alerts.json'))
last_run, incidents = fetch_incidents(client=client,
last_run={},
first_fetch='1 year',
max_fetch=5,
info_level='concise')
assert len(incidents) == 5
assert last_run.get('time') == '2021-05-18 05:04:36 +0000' # occurred time of the last alert
def test_fetch_incidents_last_alert_ids(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the last_event_alert_ids
- mock the Client's get_alerts_request.
Then
- Validate that no incidents will be returned.
- Validate that the last_run is pushed in two days from the latest incident fetched
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request', return_value=util_load_json('test_data/alerts.json'))
last_run_time = '2021-05-18T12:02:54+00:00'
next_run_time = (dateparser.parse(last_run_time[:-6]) + timedelta(hours=48)).isoformat()
last_alert_ids = '["35267", "35268", "35269", "35272", "35273", "35274", "35275", "35276", "35277", "35278", ' \
'"35279"]'
last_run = {
'time': last_run_time,
'last_alert_ids': last_alert_ids
}
next_run, incidents = fetch_incidents(client=client,
last_run=last_run,
first_fetch='1 year',
max_fetch=50,
info_level='concise')
assert len(incidents) == 0
# trim miliseconds to avoid glitches such as 2021-05-19T10:21:52.121+00:00 != 2021-05-19T10:21:52.123+00:00
assert next_run.get('time')[:-6] == next_run_time
assert next_run.get('last_alert_ids') == last_alert_ids
# We freeze the time since we are using dateparser.parse('now') in the fetch incidents
@freeze_time('2021-02-15T17:10:00+00:00')
def test_fetch_incidents_no_alerts(mocker):
"""Unit test
Given
- Current time is 2021-02-15 17:10:00 +00:00
- Fetch incidents command is called
When
- No results returned from the search for the start_time = 2021-02-14 17:01:14 +00:00 (no new alerts created until
now)
Then
- Validate that no incident will be returned.
- Validate that the last_run is set to the current time minus ten minutes (2021-02-15 17:00:00 +00:00)
- Validate last_alert_ids is reset to empty list
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request', return_value=util_load_json('test_data/no_alerts.json'))
last_run_time = '2021-02-14T17:01:14+00:00'
last_alert_ids = '["1", "2", "3", "4", "5"]'
last_run = {
'time': last_run_time,
'last_alert_ids': last_alert_ids
}
next_run, incidents = fetch_incidents(client=client,
last_run=last_run,
first_fetch='1 year',
max_fetch=50,
info_level='concise')
assert len(incidents) == 0
assert next_run.get('time') == '2021-02-15T17:00:00+00:00'
assert next_run.get('last_alert_ids') == []
| {
"content_hash": "f68e92930833da2cc678c8205590a962",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 118,
"avg_line_length": 41.5735294117647,
"alnum_prop": 0.633769602641198,
"repo_name": "VirusTotal/content",
"id": "8e7350da10a1e1757259909fed04f6deb09f4e37",
"size": "16962",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/FireEyeCM/Integrations/FireEyeCM/FireEyeCM_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""
Tests for L{nevow._flat}.
"""
import sys, traceback
from zope.interface import implements
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred, succeed
from nevow.inevow import IRequest, IQ, IRenderable, IData
from nevow._flat import FlattenerError, UnsupportedType, UnfilledSlot
from nevow._flat import flatten, deferflatten
from nevow.tags import Proto, Tag, slot, raw, xml
from nevow.tags import invisible, br, div, directive
from nevow.entities import nbsp
from nevow.url import URL
from nevow.rend import Fragment
from nevow.loaders import stan
from nevow.flat import flatten as oldFlatten, precompile as oldPrecompile
from nevow.flat.ten import registerFlattener
from nevow.testutil import FakeRequest
from nevow.context import WovenContext
# Use the co_filename mechanism (instead of the __file__ mechanism) because
# it is the mechanism traceback formatting uses. The two do not necessarily
# agree with each other. This requires a code object compiled in this file.
# The easiest way to get a code object is with a new function. I'll use a
# lambda to avoid adding anything else to this namespace. The result will
# be a string which agrees with the one the traceback module will put into a
# traceback for frames associated with functions defined in this file.
HERE = (lambda: None).func_code.co_filename
class TrivialRenderable(object):
"""
An object which renders to a parameterized value.
@ivar result: The object which will be returned by the render method.
@ivar requests: A list of all the objects passed to the render method.
"""
implements(IRenderable)
def __init__(self, result):
self.result = result
self.requests = []
def render(self, request):
"""
Give back the canned response and record the given request.
"""
self.requests.append(request)
return self.result
class RenderRenderable(object):
"""
An object which renders to a parameterized value and has a render method
which records how it is invoked.
@ivar renders: A list of two-tuples giving the arguments the render method
has been invoked with.
@ivar name: A string giving the name of the render method.
@ivar tag: The value which will be returned from C{render}.
@ivar result: The value which will be returned from the render method.
"""
implements(IRenderable)
def __init__(self, renders, name, tag, result):
self.renders = renders
self.name = name
self.tag = tag
self.result = result
def renderer(self, name):
if name == self.name:
return self.renderMethod
raise ValueError("Invalid renderer name")
def renderMethod(self, request, tag):
self.renders.append((self, request))
return self.result
def render(self, request):
return self.tag
class FlattenMixin:
"""
Helper defining an assertion method useful for flattener tests.
"""
def assertStringEqual(self, value, expected):
"""
Assert that the given value is a C{str} instance and that it equals the
expected value.
"""
self.assertTrue(isinstance(value, str))
self.assertEqual(value, expected)
class FlattenTests(TestCase, FlattenMixin):
"""
Tests for L{nevow._flat.flatten}.
"""
def flatten(self, root, request=None):
"""
Helper to get a string from L{flatten}.
"""
result = []
# This isn't something shorter because this way is nicer to look at in
# a debugger.
for part in flatten(request, root, False, False):
result.append(part)
return "".join(result)
def test_unflattenable(self):
"""
Flattening an object which references an unflattenable object fails
with L{FlattenerError} which gives the flattener's stack at the
point of failure and which has an L{UnsupportedType} exception in
its arguments.
"""
unflattenable = object()
deepest = [unflattenable]
middlest = (deepest,)
outermost = [middlest]
err = self.assertRaises(FlattenerError, self.flatten, outermost)
self.assertEqual(
err._roots, [outermost, middlest, deepest, unflattenable])
self.assertTrue(isinstance(err.args[0], UnsupportedType))
def test_str(self):
"""
An instance of L{str} is flattened to itself.
"""
self.assertStringEqual(self.flatten('bytes<>&"\0'), 'bytes<>&"\0')
def test_raw(self):
"""
An instance of L{raw} is flattened to itself.
"""
self.assertStringEqual(
self.flatten(raw('bytes<>^&"\0')), 'bytes<>^&"\0')
def test_attributeRaw(self):
"""
An instance of L{raw} is flattened with " quoting if C{True} is passed
for C{inAttribute}. L{raw} instances are expected to have already had
&, <, and > quoted. L{raw} support is primarily for backwards
compatibility.
"""
self.assertStringEqual(
"".join(flatten(None, raw('"&<>'), True, True)), '"&<>')
def test_attributeString(self):
"""
An instance of L{str} is flattened with attribute quoting rules if
C{True} is passed for C{inAttribute}.
"""
self.assertStringEqual(
"".join(flatten(None, '"&<>', True, False)),
""&<>")
def test_textNodeString(self):
"""
An instance of L{str} is flattened with XML quoting rules if C{True} is
passed for C{inXML}.
"""
self.assertStringEqual(
"".join(flatten(None, '"&<>', False, True)),
'"&<>')
def test_unicode(self):
"""
An instance of L{unicode} is flattened to the UTF-8 representation of
itself.
"""
self.assertStringEqual(self.flatten(u'bytes<>&"\0'), 'bytes<>&"\0')
unich = u"\N{LATIN CAPITAL LETTER E WITH GRAVE}"
self.assertStringEqual(self.flatten(unich), unich.encode('utf-8'))
def test_xml(self):
"""
An L{xml} instance is flattened to the UTF-8 representation of itself.
"""
self.assertStringEqual(self.flatten(xml("foo")), "foo")
unich = u"\N{LATIN CAPITAL LETTER E WITH GRAVE}"
self.assertStringEqual(self.flatten(xml(unich)), unich.encode('utf-8'))
def test_entity(self):
"""
An instance of L{Entity} is flattened to the XML representation of an
arbitrary codepoint.
"""
self.assertStringEqual(self.flatten(nbsp), " ")
def test_entityChild(self):
"""
An instance of L{Entity} which is a child of a L{Tag} is flattened to
the XML representation of an arbitrary codepoint.
"""
self.assertStringEqual(
self.flatten(div[nbsp]), "<div> </div>")
def test_entityAttribute(self):
"""
An instance of L{Entity} which is the value of an attribute of a L{Tag}
is flattened to the XML representation of an arbitrary codepoint.
"""
self.assertStringEqual(
self.flatten(div(foo=nbsp)), '<div foo=" "></div>')
def test_iterable(self):
"""
A C{list}, C{tuple} or C{generator} is flattened to the concatenation
of whatever its elements flatten to, in order.
"""
sequence = ("bytes", "<", ">", "&")
result = "bytes<>&"
self.assertStringEqual(self.flatten(tuple(sequence)), result)
self.assertStringEqual(self.flatten(list(sequence)), result)
def gen():
for e in sequence:
yield e
self.assertStringEqual(self.flatten(gen()), result)
def test_singletonProto(self):
"""
L{Proto} instances corresponding to tags which are allowed to be
self-closing are flattened that way.
"""
self.assertStringEqual(self.flatten(br), "<br />")
def test_nonSingletonProto(self):
"""
L{Proto} instances corresponding to tags which are not allowed to be
self-closing are not flattened that way.
"""
self.assertStringEqual(self.flatten(div), "<div></div>")
def test_invisibleProto(self):
"""
L{Proto} instances with an empty C{name} attribute don't have markup
generated for them.
"""
self.assertStringEqual(self.flatten(invisible), "")
self.assertStringEqual(self.flatten(Proto("")), "")
def test_emptySingletonTag(self):
"""
L{Tag} instances which are allowed to be self-closing are flattened
that way.
"""
self.assertStringEqual(self.flatten(br()), "<br />")
def test_emptyNonSingletonTag(self):
"""
L{Tag} instances which are not allowed to be self-closing are not
flattened that way.
"""
self.assertStringEqual(self.flatten(div()), "<div></div>")
def test_invisibleTag(self):
"""
L{Tag} instances with an empty C{tagName} attribute don't have markup
generated for them, only for their children.
"""
self.assertStringEqual(self.flatten(invisible["foo"]), "foo")
self.assertStringEqual(self.flatten(Tag("")["foo"]), "foo")
def test_unicodeTagName(self):
"""
A L{Tag} with a C{tagName} attribute which is C{unicode} instead of
C{str} is flattened to an XML representation.
"""
self.assertStringEqual(self.flatten(Tag(u'div')), "<div></div>")
self.assertStringEqual(self.flatten(Tag(u'div')['']), "<div></div>")
def test_unicodeAttributeName(self):
"""
A L{Tag} with an attribute name which is C{unicode} instead of C{str}
is flattened to an XML representation.
"""
self.assertStringEqual(
self.flatten(Tag(u'div', {u'foo': 'bar'})), '<div foo="bar"></div>')
def test_stringTagAttributes(self):
"""
C{str} L{Tag} attribute values are flattened by applying XML attribute
value quoting rules.
"""
self.assertStringEqual(
self.flatten(div(foo="bar")), '<div foo="bar"></div>')
self.assertStringEqual(
self.flatten(div(foo='"><&')),
'<div foo=""><&"></div>')
def test_tupleTagAttributes(self):
"""
C{tuple} L{Tag} attribute values are flattened by flattening the tuple
and applying XML attribute value quoting rules to the result.
"""
self.assertStringEqual(
self.flatten(div(foo=('"', ">", "<", "&"))),
'<div foo=""><&"></div>')
def test_tagChildren(self):
"""
The children of a L{Tag} are flattened to strings included inside the
XML markup delimiting the tag.
"""
self.assertStringEqual(
self.flatten(div["baz"]), '<div>baz</div>')
self.assertStringEqual(
self.flatten(div[["b", "a", "z"]]), '<div>baz</div>')
def test_nestedTags(self):
"""
The contents of a L{Tag} which is a child of another L{Tag} should be
quoted just once.
"""
self.assertStringEqual(
self.flatten(div[div['&']]),
"<div><div>&</div></div>")
def test_patternTag(self):
"""
A L{Tag} with a I{pattern} special is omitted from the flattened
output.
"""
self.assertStringEqual(self.flatten(div(pattern="foo")), "")
def test_onePatternTag(self):
"""
A L{Tag} returned from L{IQ.onePattern} is represented in the flattened
output.
"""
self.assertStringEqual(
self.flatten(IQ(div(pattern="foo")).onePattern("foo")),
"<div></div>")
def test_renderAttribute(self):
"""
A L{Tag} with a I{render} special is replaced with the return value of
the corresponding render method on the L{IRenderable} above the tag.
"""
result = ("foo", " ", "bar")
renders = []
class RendererRenderable(TrivialRenderable):
def render_foo(self, request, tag):
renders.append((request, tag))
return result
def renderer(self, name):
return getattr(self, 'render_' + name)
request = object()
tag = div(render="foo", bar="baz")["quux"]
renderer = RendererRenderable(tag)
self.assertStringEqual(self.flatten(renderer, request), "".join(result))
self.assertEqual(len(renders), 1)
self.assertIdentical(renders[0][0], request)
self.assertEqual(renders[0][1].tagName, tag.tagName)
self.assertEqual(renders[0][1].attributes, {"bar": "baz"})
self.assertEqual(renders[0][1].children, ["quux"])
def test_renderDirectiveAttribute(self):
"""
A L{Tag} with a I{render} special which is a L{directive} is treated
the same way as if the special value were just a string.
"""
result = ("foo", " ", "bar")
renders = []
class RendererRenderable(TrivialRenderable):
def render_foo(self, request, tag):
renders.append((request, tag))
return result
def renderer(self, name):
return getattr(self, 'render_' + name)
request = object()
tag = div(render=directive("foo"), bar="baz")["quux"]
renderer = RendererRenderable(tag)
self.assertStringEqual(self.flatten(renderer, request), "".join(result))
self.assertEqual(len(renders), 1)
self.assertIdentical(renders[0][0], request)
self.assertEqual(renders[0][1].tagName, tag.tagName)
self.assertEqual(renders[0][1].attributes, {"bar": "baz"})
self.assertEqual(renders[0][1].children, ["quux"])
def test_renderAttributeOnRenderableNestedInRenderable(self):
"""
A L{Tag} with a renderer which returns an L{IRenderable} which renders
to a L{Tag} with a I{render} special is replaced with the return value
of the corresponding render method on the nested L{IRenderable}.
"""
result = ("foo", " ", "bar")
request = object()
renders = []
inner = RenderRenderable(renders, "bar", div(render="bar"), result)
outer = RenderRenderable(renders, "foo", div(render="foo"), inner)
self.assertStringEqual(self.flatten(outer, request), "".join(result))
self.assertEqual(renders, [(outer, request), (inner, request)])
def test_renderAttributeNestedInList(self):
"""
A L{Tag} with a renderer which is in a list which is returned by
L{IRenderable.render} is replaced with the result of the named renderer
on the L{IRenderable} which returned the list.
"""
result = ("foo", " ", "bar")
renders = []
renderable = RenderRenderable(
renders, "foo", [div(render="foo")], result)
self.assertStringEqual(
self.flatten(renderable, None), "".join(result))
def test_renderAttributeNestedInTag(self):
"""
A L{Tag} with a renderer which is a child of a L{Tag} which was
returned by L{IRenderable.render} is replaced with the result of the
named renderer on the L{IRenderable} which returned the L{Tag}.
"""
result = "quux"
renders = []
tag = div[div(render="foo")]
renderable = RenderRenderable(renders, "foo", tag, result)
self.assertStringEqual(
self.flatten(renderable, None), "<div>quux</div>")
def test_renderAttributeNestedInAttributeValue(self):
"""
A L{Tag} with a renderer which is the value of an attribute of a L{Tag}
which was returned by L{IRenderable.render} is replaced with the result
of the named renderer on the L{IRenderable} which returned the L{Tag}.
"""
result = "quux"
renders = []
request = object()
tag = div(foo=invisible(render="bar"))
renderable = RenderRenderable(renders, "bar", tag, result)
self.assertStringEqual(
self.flatten(renderable, request), '<div foo="quux"></div>')
self.assertEqual(renders, [(renderable, request)])
def test_renderAttributeNestedInSlot(self):
"""
A L{Tag} with a renderer which is used as the value of a L{slot} which
was returned by L{IRenderable.render} is replaced with the result of
the named renderer on the L{IRenderable} which returned the L{slot}.
"""
result = "quux"
renders = []
outer = div[slot("bar")]
inner = div(render="foo")
outer.fillSlots("bar", inner)
renderable = RenderRenderable(renders, "foo", outer, result)
self.assertStringEqual(
self.flatten(renderable, None), "<div>quux</div>")
def test_renderAttributeNestedInPrecompiledSlot(self):
"""
A L{Tag} with a renderer which is used as the value of a
L{_PrecompiledSlot} which was returned by L{IRenderable.render} is
replaced with the result of the named renderer on the L{IRenderable}
which returned the L{_PrecompiledSlot}.
"""
result = "quux"
renders = []
request = object()
outer = invisible[stan(div[slot("bar")]).load()]
inner = div(render="foo")
outer.fillSlots("bar", inner)
renderable = RenderRenderable(renders, "foo", outer, result)
self.assertStringEqual(
self.flatten(renderable, request), "<div>quux</div>")
self.assertEqual(renders, [(renderable, request)])
def test_renderAttributedNestedInRenderResult(self):
"""
A L{Tag} with a renderer which is returned by a render method is
replaced with the return value of the named renderer on the
L{IRenderable} which had the render method which returned the L{Tag}.
"""
class TwoRenderers(object):
implements(IRenderable)
def renderer(self, name):
return getattr(self, name)
def foo(self, request, tag):
return div(render="bar")
def bar(self, request, tag):
return "baz"
def render(self, request):
return div(render="foo")
renderable = TwoRenderers()
self.assertStringEqual(self.flatten(renderable), "baz")
def test_slotsNestedInRenderResult(self):
"""
A L{slot} in the return value of a render function is replaced by the
value of that slot as found on the tag which had the render directive.
"""
tag = div(render="foo")
tag.fillSlots("bar", '"&<>')
renderable = RenderRenderable([], "foo", tag, slot("bar"))
self.assertStringEqual(self.flatten(renderable), '"&<>')
def test_renderTextDataQuoted(self):
"""
Strings returned by a render method on an L{IRenderable} provider which
is a child of a L{Tag} are XML quoted.
"""
tag = div[RenderRenderable([], "foo", div(render="foo"), '"&<>')]
self.assertStringEqual(self.flatten(tag), '<div>"&<></div>')
def test_renderMethodReturnsInputTag(self):
"""
If a render method returns the tag it was passed, the tag is flattened
as though it did not have a render directive.
"""
class IdempotentRenderable(object):
implements(IRenderable)
def renderer(self, name):
return getattr(self, name)
def foo(self, request, tag):
return tag
def render(self, request):
return div(render="foo", bar="baz")["hello, world"]
renderable = IdempotentRenderable()
self.assertStringEqual(
self.flatten(renderable), '<div bar="baz">hello, world</div>')
def test_url(self):
"""
An L{URL} object is flattened to the appropriate representation of
itself, whether it is the child of a tag or the value of a tag
attribute.
"""
link = URL.fromString("http://foo/fu?bar=baz&bar=baz#quux%2f")
self.assertStringEqual(
self.flatten(link),
"http://foo/fu?bar=baz&bar=baz#quux%2F")
self.assertStringEqual(
self.flatten(div[link]),
'<div>http://foo/fu?bar=baz&bar=baz#quux%2F</div>')
self.assertStringEqual(
self.flatten(div(foo=link)),
'<div foo="http://foo/fu?bar=baz&bar=baz#quux%2F"></div>')
self.assertStringEqual(
self.flatten(div[div(foo=link)]),
'<div><div foo="http://foo/fu?bar=baz&bar=baz#quux%2F"></div>'
'</div>')
link = URL.fromString("http://foo/fu?%2f=%7f")
self.assertStringEqual(
self.flatten(link),
"http://foo/fu?%2F=%7F")
self.assertStringEqual(
self.flatten(div[link]),
'<div>http://foo/fu?%2F=%7F</div>')
self.assertStringEqual(
self.flatten(div(foo=link)),
'<div foo="http://foo/fu?%2F=%7F"></div>')
def test_unfilledSlot(self):
"""
Flattening a slot which has no known value results in an
L{FlattenerError} exception which has an L{UnfilledSlot} exception
in its arguments.
"""
exc = self.assertRaises(FlattenerError, self.flatten, slot("foo"))
self.assertTrue(isinstance(exc.args[0], UnfilledSlot))
def test_filledSlotTagChild(self):
"""
Flattening a slot as a child of a L{Tag} which has been given a value
for that slot results in the slot being replaced with the value in the
output.
"""
tag = div[slot("foo")]
tag.fillSlots("foo", "bar")
self.assertStringEqual(self.flatten(tag), "<div>bar</div>")
def test_filledSlotTagChildEscaping(self):
"""
Flattening a slot as a child of a L{Tag} which has been given a string
value results in that string value being XML escaped in the output.
"""
tag = div[slot("foo")]
tag.fillSlots("foo", '"&<>')
self.assertStringEqual(self.flatten(tag), '<div>"&<></div>')
def test_filledSlotNestedTagChild(self):
"""
Flattening a slot as a child of a L{Tag} which is itself a child of a
L{Tag} which has been given a value for that slot results in the slot
being replaced with the value in the output.
"""
tag = div[div[slot("foo")]]
tag.fillSlots("foo", "bar")
self.assertStringEqual(self.flatten(tag), "<div><div>bar</div></div>")
def test_filledSlotTagAttribute(self):
"""
Flattening a slot which is the value of an attribute of a L{Tag}
results in the value of the slot appearing as the attribute value in
the output.
"""
tag = div(foo=slot("bar"))
tag.fillSlots("bar", "baz")
self.assertStringEqual(self.flatten(tag), '<div foo="baz"></div>')
def test_slotFilledWithProto(self):
"""
Filling a slot with a L{Proto} results in the slot being replaced with
the serialized form of the tag in the output.
"""
tag = div[slot("foo")]
tag.fillSlots("foo", br)
self.assertStringEqual(self.flatten(tag), "<div><br /></div>")
def test_unfilledPrecompiledSlot(self):
"""
Flattening a L{_PrecompiledSlot} for which no value has been supplied
results in an L{FlattenerError} exception.
"""
tag = oldPrecompile(div[slot("foo")])
self.assertRaises(FlattenerError, self.flatten, tag)
def test_precompiledSlot(self):
"""
A L{_PrecompiledSlot} is replaced with the value of that slot when
flattened.
"""
tag = invisible[oldPrecompile(div[slot("foo")])]
tag.fillSlots("foo", '"&<>')
self.assertStringEqual(self.flatten(tag), '<div>"&<></div>')
def test_precompiledSlotTagAttribute(self):
"""
A L{_PrecompiledSlot} which is the value of an attribute is replaced
with the value of the slot with XML attribute quoting applied.
"""
tag = invisible[oldPrecompile(div(foo=slot("foo")))]
tag.fillSlots("foo", '"&<>')
self.assertStringEqual(self.flatten(tag), '<div foo=""&<>"></div>')
def test_precompiledSlotFilledWithSlot(self):
"""
A L{_PrecompiledSlot} slot which is filled with another slot is
replaced with the value the other slot is filled with.
"""
tag = invisible[oldPrecompile(div[slot("foo")])]
tag.fillSlots("foo", slot("bar"))
tag.fillSlots("bar", '"&<>')
self.assertStringEqual(self.flatten(tag), '<div>"&<></div>')
def test_renderable(self):
"""
Flattening an object which provides L{IRenderable} results in
something based on the result of the object's C{render} method.
"""
request = object()
renderable = TrivialRenderable("bytes")
self.assertStringEqual(
self.flatten(renderable, request), "bytes")
self.assertEqual(renderable.requests, [request])
def test_renderableNestingRenderable(self):
"""
Flattening an L{IRenderable} provider which returns another
L{IRenderable} from its C{render} method results in the result of
flattening the result of the inner L{IRenderable}'s C{render} method
which is called with the request object.
"""
request = object()
inner = TrivialRenderable("bytes")
outer = TrivialRenderable(inner)
self.assertStringEqual(self.flatten(outer, request), "bytes")
self.assertEqual(inner.requests, [request])
def test_listNestingRenderable(self):
"""
Flattening a C{list} which has an object providing L{IRenderable} as a
child results in the result of the L{IRenderable}'s C{render} method
which is called with the request object.
"""
request = object()
renderable = TrivialRenderable("bytes")
self.assertStringEqual(self.flatten([renderable], request), "bytes")
self.assertEqual(renderable.requests, [request])
def test_tagNestingRenderable(self):
"""
Flattening a L{Tag} which has an object providing L{IRenderable} as a
child results in markup for the tag with child data from the
L{IRenderable}'s C{render} which is called with the request object.
"""
request = object()
inner = TrivialRenderable("bytes")
outer = div[inner]
self.assertStringEqual(
self.flatten(outer, request), "<div>bytes</div>")
self.assertEqual(inner.requests, [request])
def test_slotNestingRenderable(self):
"""
Flattening a L{slot} which is filled with an object providing
L{IRenderable} results in the result of the L{IRenderable}'s C{render}
method which is called with the request object.
"""
request = object()
inner = TrivialRenderable("bytes")
outer = slot("foo")
tag = div[outer]
tag.fillSlots("foo", inner)
self.assertStringEqual(self.flatten(tag, request), "<div>bytes</div>")
self.assertEqual(inner.requests, [request])
def test_slotFromRenderable(self):
"""
An L{IRenderable} provider which returns a C{Tag} inside a C{slot}
from its C{render} method has that slot filled with the slot data
available on the tag.
"""
tag = div[slot("foo")]
tag.fillSlots("foo", "bar")
renderable = TrivialRenderable(tag)
self.assertStringEqual(self.flatten(renderable), "<div>bar</div>")
def _nestingTest(self, nestedObject, expected):
limit = sys.getrecursionlimit()
sys.setrecursionlimit(100)
try:
self.assertStringEqual(self.flatten(nestedObject), expected)
finally:
sys.setrecursionlimit(limit)
def test_deeplyNestedList(self):
"""
Flattening succeeds for an object with a level of list nesting
significantly greater than the Python maximum recursion limit.
"""
obj = ["foo"]
for i in xrange(1000):
obj = [obj]
self._nestingTest(obj, "foo")
def test_deeplyNestedSlot(self):
"""
Flattening succeeds for an object with a level of slot nesting
significantly greater than the Python maximum recursion limit.
"""
tag = div()[slot("foo-0")]
for i in xrange(1000):
tag.fillSlots("foo-" + str(i), slot("foo-" + str(i + 1)))
tag.fillSlots("foo-1000", "bar")
self._nestingTest(tag, "<div>bar</div>")
def test_deeplyNestedTag(self):
"""
Flattening succeeds for a tag with a level of nesting significantly
greater than the Python maximum recursion limit.
"""
n = 1000
tag = div["foo"]
for i in xrange(n - 1):
tag = div[tag]
self._nestingTest(tag, "<div>" * n + "foo" + "</div>" * n)
def test_deeplyNestedRenderables(self):
"""
Flattening succeeds for an object with a level of L{IRenderable}
nesting significantly greater than the Python maximum recursion limit.
"""
obj = TrivialRenderable("foo")
for i in xrange(1000):
obj = TrivialRenderable(obj)
self._nestingTest(obj, "foo")
def test_legacyRenderer(self):
"""
Flattening an L{IRenderer} succeeds with the same result as using the
old flattener.
"""
class Legacy(Fragment):
docFactory = stan(invisible(render=directive('foo')))
def render_foo(self, ctx, data):
return '"&<>'
fragment = Legacy()
self.assertEqual(
self.flatten(fragment), oldFlatten(fragment))
self.assertEqual(
self.flatten(div(foo=fragment)),
oldFlatten(div(foo=fragment)))
def test_legacySerializable(self):
"""
Flattening an object for which a flattener was registered with
L{registerFlattener} succeeds with the result of the registered
flattener function.
"""
request = FakeRequest()
result = 'bytes"'
serializable = LegacySerializable(result)
self.assertEqual(
self.flatten(div(foo=serializable), request),
'<div foo="' + result + '"></div>')
[context] = serializable.flattenedWith
self.assertTrue(isinstance(context, WovenContext))
self.assertFalse(context.precompile)
self.assertTrue(context.isAttrib)
self.assertIdentical(context.locate(IRequest), request)
self.assertIdentical(context.locate(IData), None)
def test_legacySerializableReturnsSlot(self):
"""
A slot returned by a flattener registered with L{registerFlattener} is
filled with the value of a slot from "outside" the L{ISerializable}.
"""
request = FakeRequest()
result = slot('foo')
serializable = LegacySerializable(result)
tag = div(foo=serializable)
tag.fillSlots("foo", "bar")
self.assertEqual(self.flatten(tag, request), '<div foo="bar"></div>')
def test_flattenExceptionStack(self):
"""
If an exception is raised by a render method, L{FlattenerError} is
raised with information about the stack between the flattener and the
frame which raised the exception.
"""
def broken():
raise RuntimeError("foo")
class BrokenRenderable(object):
implements(IRenderable)
def render(self, request):
# insert another stack frame before the exception
broken()
request = object()
renderable = BrokenRenderable()
exc = self.assertRaises(
FlattenerError, self.flatten, renderable, request)
self.assertEqual(
# There are probably some frames above this, but I don't care what
# they are.
exc._traceback[-2:],
[(HERE, 927, 'render', 'broken()'),
(HERE, 920, 'broken', 'raise RuntimeError("foo")')])
class FlattenerErrorTests(TestCase):
"""
Tests for L{FlattenerError}.
"""
def test_string(self):
"""
If a L{FlattenerError} is created with a string root, up to around 40
bytes from that string are included in the string representation of the
exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), ['abc123xyz'], [])),
"Exception while flattening:\n"
" 'abc123xyz'\n"
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), ['0123456789' * 10], [])),
"Exception while flattening:\n"
" '01234567890123456789<...>01234567890123456789'\n"
"RuntimeError: reason\n")
def test_unicode(self):
"""
If a L{FlattenerError} is created with a unicode root, up to around 40
characters from that string are included in the string representation
of the exception.
"""
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'abc\N{SNOWMAN}xyz'], [])),
"Exception while flattening:\n"
" u'abc\\u2603xyz'\n" # Codepoint for SNOWMAN
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'01234567\N{SNOWMAN}9' * 10],
[])),
"Exception while flattening:\n"
" u'01234567\\u2603901234567\\u26039<...>01234567\\u2603901234567"
"\\u26039'\n"
"RuntimeError: reason\n")
def test_renderable(self):
"""
If a L{FlattenerError} is created with an L{IRenderable} provider root,
the repr of that object is included in the string representation of the
exception.
"""
class Renderable(object):
implements(IRenderable)
def __repr__(self):
return "renderable repr"
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [Renderable()], [])),
"Exception while flattening:\n"
" renderable repr\n"
"RuntimeError: reason\n")
def test_tag(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance with source
location information, the source location is included in the string
representation of the exception.
"""
tag = Tag(
'div', filename='/foo/filename.xhtml', lineNumber=17, columnNumber=12)
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [tag], [])),
"Exception while flattening:\n"
" File \"/foo/filename.xhtml\", line 17, column 12, in \"div\"\n"
"RuntimeError: reason\n")
def test_tagWithoutLocation(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance without source
location information, only the tagName is included in the string
representation of the exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [Tag('span')], [])),
"Exception while flattening:\n"
" Tag <span>\n"
"RuntimeError: reason\n")
def test_traceback(self):
"""
If a L{FlattenerError} is created with traceback frames, they are
included in the string representation of the exception.
"""
# Try to be realistic in creating the data passed in for the traceback
# frames.
def f():
g()
def g():
raise RuntimeError("reason")
try:
f()
except RuntimeError, exc:
# Get the traceback, minus the info for *this* frame
tbinfo = traceback.extract_tb(sys.exc_info()[2])[1:]
else:
self.fail("f() must raise RuntimeError")
self.assertEqual(
str(FlattenerError(exc, [], tbinfo)),
"Exception while flattening:\n"
" File \"%s\", line %d, in f\n"
" g()\n"
" File \"%s\", line %d, in g\n"
" raise RuntimeError(\"reason\")\n"
"RuntimeError: reason\n" % (
HERE, f.func_code.co_firstlineno + 1,
HERE, g.func_code.co_firstlineno + 1))
class LegacySerializable(object):
"""
An object for which a legacy flattener is registered and which can only be
flattened using that flattener.
"""
def __init__(self, value):
self.value = value
self.flattenedWith = []
def flattenLegacySerializable(legacy, context):
"""
Old-style flattener for L{LegacySerializable}.
"""
legacy.flattenedWith.append(context)
return [legacy.value]
registerFlattener(flattenLegacySerializable, LegacySerializable)
class DeferflattenTests(TestCase, FlattenMixin):
"""
Tests for L{nevow._flat.deferflatten}.
"""
def deferflatten(self, root, request=None):
"""
Helper to get a string from L{deferflatten}.
"""
result = []
d = deferflatten(request, root, False, False, result.append)
def cbFlattened(ignored):
return "".join(result)
d.addCallback(cbFlattened)
return d
def test_unflattenable(self):
"""
L{deferflatten} returns a L{Deferred} which fails with
L{FlattenerError} if it is passed an object which cannot be flattened.
"""
return self.assertFailure(
self.deferflatten(object()), FlattenerError)
def test_unfilledSlotDeferredResult(self):
"""
Flattening a L{Deferred} which results in an unfilled L{slot} results
in a L{FlattenerError} failure.
"""
return self.assertFailure(
self.deferflatten(succeed(slot("foo"))),
FlattenerError)
def test_renderable(self):
"""
Flattening an object which provides L{IRenderable} results in the
result of the object's C{render} method which is called with the
request.
"""
request = object()
renderable = TrivialRenderable("bytes")
def cbFlattened(result):
self.assertStringEqual(result, "bytes")
self.assertEqual(renderable.requests, [request])
flattened = self.deferflatten(renderable, request)
flattened.addCallback(cbFlattened)
return flattened
def test_renderableException(self):
"""
Flattening an object which provides L{IRenderable} with a C{render}
method which synchronously raises an exception results in a L{Deferred}
which fails with L{FlattenerError}.
"""
class TestException(Exception):
pass
class BrokenRenderable(object):
implements(IRenderable)
def render(self, request):
raise TestException()
flattened = self.deferflatten(BrokenRenderable())
return self.assertFailure(flattened, FlattenerError)
def test_deferredRenderAttribute(self):
"""
Flattening an object which provides L{IRenderable} with a C{render}
method which returns a L{Deferred} which is called back with a L{Tag}
with a render attribute results in the return value of the named
renderer from the L{IRenderer} which returned the L{Deferred}.
"""
flattened = self.deferflatten(
RenderRenderable([], "foo", succeed(div(render="foo")), "bar"))
flattened.addCallback(self.assertStringEqual, "bar")
return flattened
def test_synchronousDeferredSlot(self):
"""
Flattening a L{slot} which is filled with a L{Deferred} which has a
result already results in the result of the L{Deferred}.
"""
tag = div[slot("foo")]
tag.fillSlots("foo", succeed("bar"))
flattened = self.deferflatten(tag)
flattened.addCallback(self.assertStringEqual, "<div>bar</div>")
return flattened
def test_asynchronousDeferredSlot(self):
"""
Flattening a L{slot} which is filled with a L{Deferred} which does not
have a result already results in the result of the L{Deferred} when it
becomes available.
"""
tag = div[slot("foo")]
deferred = Deferred()
tag.fillSlots("foo", deferred)
flattened = self.deferflatten(tag)
flattened.addCallback(self.assertStringEqual, "<div>bar</div>")
deferred.callback("bar")
return flattened
def test_deferredNestingRenderable(self):
"""
Flattening a L{Deferred} which has an object providing L{IRenderable}
as the result results in the result of the L{IRenderable}'s C{render}
method.
"""
request = object()
renderable = TrivialRenderable("bytes")
deferred = succeed(renderable)
def cbFlattened(result):
self.assertStringEqual(result, "bytes")
self.assertEqual(renderable.requests, [request])
flattened = self.deferflatten(deferred, request)
flattened.addCallback(cbFlattened)
return deferred
def test_reusedDeferred(self):
"""
Flattening a C{list} which contains the same L{Deferred} twice results
in the result of the L{Deferred} twice.
"""
deferred = succeed("bytes")
root = [deferred, deferred]
flattened = self.deferflatten(root)
flattened.addCallback(self.assertStringEqual, "bytesbytes")
return flattened
def test_manySynchronousDeferreds(self):
"""
Flattening a structure with many more L{Deferreds} than there are
frames allowed by the Python recursion limit succeeds if all the
L{Deferred}s have results already.
"""
results = [str(i) for i in xrange(1000)]
deferreds = map(succeed, results)
limit = sys.getrecursionlimit()
sys.setrecursionlimit(100)
try:
flattened = self.deferflatten(deferreds)
except:
sys.setrecursionlimit(limit)
raise
else:
def cb(passthrough):
sys.setrecursionlimit(limit)
return passthrough
flattened.addBoth(cb)
flattened.addCallback(self.assertStringEqual, "".join(results))
return flattened
def test_deferredQuoting(self):
"""
Flattening a L{Deferred} results in the result of the L{Deferred}
without any quoting.
"""
flattened = self.deferflatten(succeed('"&<>'))
flattened.addCallback(self.assertStringEqual, '"&<>')
return flattened
def test_deferredAttributeValueQuoting(self):
"""
Flattening a L{Tag} which has an attribute value which is a L{Deferred}
results in the result of the L{Deferred} being XML attribute quoted and
included as the value for that attribute of the tag.
"""
tag = div(foo=succeed('"&<>'))
flattened = self.deferflatten(tag)
flattened.addCallback(
self.assertStringEqual, '<div foo=""&<>"></div>')
return flattened
def test_deferredTagChildQuoting(self):
"""
Flattening a L{Tag} which has a child which is a L{Deferred} results in
the result of the L{Deferred} being XML quoted and included as a child
value for the tag.
"""
tag = div[succeed('"&<>')]
flattened = self.deferflatten(tag)
flattened.addCallback(
self.assertStringEqual, '<div>"&<></div>')
return flattened
def test_slotDeferredResultQuoting(self):
"""
Flattening a L{Tag} with a L{Deferred} as a child which results in a
L{slot} results in the value of the slot being XML quoted and included
as a child value for the tag.
"""
deferred = succeed(slot("foo"))
tag = div[deferred]
tag.fillSlots("foo", '"&<>')
flattened = self.deferflatten(tag)
flattened.addCallback(
self.assertStringEqual, '<div>"&<></div>')
return flattened
def test_legacyAsynchronousRenderer(self):
"""
Flattening an L{IRenderer} which returns a L{Deferred} from one of its
render methods succeeds with therthe same result as using the old
flattener.
"""
deferredResult = Deferred()
rendererCalled = []
class Legacy(Fragment):
docFactory = stan(invisible(render=directive('foo')))
def render_foo(self, ctx, data):
rendererCalled.append(None)
return deferredResult
fragment = Legacy()
finished = self.deferflatten(fragment)
finished.addCallback(
self.assertStringEqual, "foobarbaz")
# Sanity check - we do not want the Deferred to have been called back
# before it is returned from the render method.
self.assertTrue(rendererCalled)
deferredResult.callback("foobarbaz")
return finished
def test_attributeString(self):
"""
An instance of L{str} is flattened with attribute quoting rules if
C{True} is passed for C{inAttribute}.
"""
result = []
finished = deferflatten(None, '"&<>', True, False, result.append)
finished.addCallback(lambda ignored: "".join(result))
finished.addCallback(self.assertStringEqual, ""&<>")
return finished
def test_textNodeString(self):
"""
An instance of L{str} is flattened with XML quoting rules if C{True} is
passed for C{inXML}.
"""
result = []
finished = deferflatten(None, '"&<>', False, True, result.append)
finished.addCallback(lambda ignored: "".join(result))
finished.addCallback(self.assertStringEqual, '"&<>')
return finished
| {
"content_hash": "dafe53b03e5ad03fa62b738a0b5b67db",
"timestamp": "",
"source": "github",
"line_count": 1348,
"max_line_length": 90,
"avg_line_length": 34.76706231454006,
"alnum_prop": 0.5997524858106089,
"repo_name": "perkinslr/pypyjs",
"id": "c3e42d1d886d078061ca14c9ad3780f129327ca1",
"size": "46923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "addedLibraries/nevow/test/test_newflat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63586"
},
{
"name": "CSS",
"bytes": "7648"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "HTML",
"bytes": "7097"
},
{
"name": "JavaScript",
"bytes": "488078"
},
{
"name": "Makefile",
"bytes": "5877"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "26517313"
},
{
"name": "Shell",
"bytes": "1406"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import LogView
urlpatterns = patterns(
'',
url(r'^', LogView.as_view()),
)
| {
"content_hash": "6d407507513cf6e19c58db65f18dbb69",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 17.5,
"alnum_prop": 0.6642857142857143,
"repo_name": "wq/offline-website-logger",
"id": "6e8d36ba2fa3dac109614548221cf8ebc67e04cd",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "owl/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18527"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
import ctypes
import platform
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def _set_thread_execution(state):
ctypes.windll.kernel32.SetThreadExecutionState(state)
def prevent_standby():
if platform.system() == 'Windows':
_set_thread_execution(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)
def allow_standby():
if platform.system() == 'Windows':
_set_thread_execution(ES_CONTINUOUS)
def long_running(func):
def inner(*args, **kwargs):
prevent_standby()
result = func(*args, **kwargs)
allow_standby()
return result
return inner
| {
"content_hash": "56e91604d0048a9a1ab91b47d89588cf",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 21.96551724137931,
"alnum_prop": 0.6389324960753532,
"repo_name": "kbarnes3/MKVtoMP4",
"id": "3fa06fd0d2ac0a6172dc917a50c0de8dde0a7e22",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/MKVtoMP4/powermanagement.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "3938"
},
{
"name": "Python",
"bytes": "21289"
}
],
"symlink_target": ""
} |
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import hashlib
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided,
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function, or a function
from the hashlib module. Defaults to sha1.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
_has_native_pbkdf2 = hasattr(hashlib, 'pbkdf2_hmac')
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` times and produces a
key of `keylen` bytes. By default, SHA-1 is used as hash function;
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha1
data = to_bytes(data)
salt = to_bytes(salt)
# If we're on Python with pbkdf2_hmac we can try to use it for
# compatible digests.
if _has_native_pbkdf2:
_test_hash = hashfunc()
if hasattr(_test_hash, 'name') and \
_test_hash.name in _hash_funcs:
return hashlib.pbkdf2_hmac(_test_hash.name,
data, salt, iterations,
keylen)
mac = hmac.HMAC(data, None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal, or `False` if they are not.
.. versionadded:: 0.7
"""
if isinstance(a, text_type):
a = a.encode('utf-8')
if isinstance(b, text_type):
b = b.encode('utf-8')
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if PY2:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
else:
for x, y in izip(a, b):
rv |= x ^ y
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('Salt length must be positive')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha1:2000$salt$hash
pbkdf2:sha1$salt$hash
:param password: the password to hash.
:param method: the hash method to use (one that hashlib supports). Can
optionally be in the format ``pbkdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters.
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`.
:param password: the plaintext password to compare against the hash.
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| {
"content_hash": "cda12d37af793a37dcd7b1a96024e78f",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 35.01901140684411,
"alnum_prop": 0.6116178067318132,
"repo_name": "maxhawkins/random_diet_club",
"id": "b0f304e0daeff85efcc8ac59bf4454927b89cb71",
"size": "9235",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/werkzeug/security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "28991"
},
{
"name": "HTML",
"bytes": "23062"
},
{
"name": "JavaScript",
"bytes": "25496"
},
{
"name": "Python",
"bytes": "2533105"
}
],
"symlink_target": ""
} |
import os
import shutil
import time
from threading import Thread
import yaml
from traits.api import TraitError, Float, provides, Str
from pychron.core.progress import open_progress
from pychron.core.yaml import yload
from pychron.furnace.base_furnace_manager import BaseFurnaceManager
from pychron.furnace.configure_dump import ConfigureDump
from pychron.furnace.ifurnace_manager import IFurnaceManager
from pychron.furnace.thermo.stage_manager import ThermoFurnaceStageManager
from pychron.graph.time_series_graph import TimeSeriesStreamStackedGraph
from pychron.hardware.furnace.thermo.furnace_controller import ThermoFurnaceController
from pychron.paths import paths
@provides(IFurnaceManager)
class ThermoFurnaceManager(BaseFurnaceManager):
controller_klass = ThermoFurnaceController
temperature_readback_min = Float(0)
temperature_readback_max = Float(1600.0)
mode = "normal"
settings_name = "furnace_settings"
status_txt = Str
_alive = False
_pid_str = None
_dumper_thread = None
def activate(self):
self.refresh_states()
self._load_sample_states()
self.load_settings()
self.start_update()
self.stage_manager.refresh(warn=True)
def start_update(self):
self.info("Start update")
self.reset_scan_timer(func=self._update_scan)
def stop_update(self):
self.info("Stop update")
self._stop_update()
# def test_furnace_api(self):
# self.info('testing furnace api')
# ret, err = False, ''
# if self.controller:
# ret = self.controller.test_connection()
# return ret, err
#
# def test_connection(self):
# self.info('testing connection')
# return self.test_furnace_api()
def clear_sample_states(self):
self._clear_sample_states()
def refresh_states(self):
self.switch_manager.load_indicator_states()
def prepare_destroy(self):
self.debug("prepare destroy")
self._stop_update()
if self.timer:
self.timer.stop()
def get_setpoint_blob(self):
self.debug("get setpoint blob")
blob = self.response_recorder.get_setpoint_blob()
return blob
def get_response_blob(self):
self.debug("get response blob")
blob = self.response_recorder.get_response_blob()
return blob
def get_output_blob(self):
self.debug("get output blob")
blob = self.response_recorder.get_output_blob()
return blob
def get_achieved_output(self):
self.debug("get achieved output")
return self.response_recorder.max_response
def set_response_recorder_period(self, p):
self.debug("set response recorder period={}".format(p))
self.response_recorder.period = p
def enable(self):
self.debug("enable")
return True
def get_process_value(self):
return self.controller.get_process_value()
def extract(self, v, **kw):
self.debug("extract")
# self.response_recorder.start()
self.debug("set setpoint to {}".format(v))
self.setpoint = v
def disable(self):
self.debug("disable")
# self.response_recorder.stop()
self.setpoint = 0
disable_device = disable
def check_reached_setpoint(self, v, n, tol, std):
return self.response_recorder.check_reached_setpoint(v, n, tol, std)
def start_response_recorder(self):
self.response_recorder.start()
def stop_response_recorder(self):
self.response_recorder.stop()
def move_to_position(self, pos, *args, **kw):
self.debug("move to position {}".format(pos))
self.stage_manager.goto_position(pos)
def dump_sample(self, block=False):
self.debug("dump sample")
if self._dumper_thread is None:
progress = open_progress(n=100)
if block:
return self._dump_sample(progress)
else:
self._dumper_thread = Thread(
name="DumpSample", target=self._dump_sample, args=(progress,)
)
self._dumper_thread.setDaemon(True)
self._dumper_thread.start()
else:
self.warning_dialog("dump already in progress")
def configure_dump(self):
self.debug("configure dump")
v = ConfigureDump(model=self)
v.edit_traits()
def is_dump_complete(self):
ret = self._dumper_thread is None
return ret
def get_active_pid_parameters(self):
result = self._pid_str or ""
self.debug("active pid ={}".format(result))
return result
# def set_pid_parameters(self, v):
# self.debug('setting pid parameters for {}'.format(v))
# from pychron.hardware.eurotherm.base import get_pid_parameters
# params = get_pid_parameters(v)
# if params:
# _, param_str = params
# self._pid_str = param_str
# self.controller.set_pid(param_str)
def set_setpoint(self, v):
self.debug("set setpoint={}".format(v))
# self.set_pid_parameters(v)
self.graph.record(v)
self.graph.record(v)
if self.controller:
self.controller.set_setpoint(v)
d = self.graph.get_data(axis=1)
if not self.graph_y_auto:
self.graph.set_y_limits(
min_=min(d.min(), v) * 0.9, max_=max(d.max(), v) * 1.1
)
self.graph.redraw()
def read_output_percent(self, force=False, verbose=False):
v = 0
if self.controller:
# force = update and not self.controller.is_scanning()
v = self.controller.read_output_percent(force=force, verbose=verbose)
try:
self.output_percent_readback = v
return v
except TraitError:
pass
def read_temperature(self, force=False, verbose=False):
v = 0
if self.controller:
# force = update and not self.controller.is_scanning()
v = self.controller.read_temperature(force=force, verbose=verbose)
try:
self.temperature_readback = v
return v
except TraitError:
pass
# canvas
def set_software_lock(self, name, lock):
if self.switch_manager is not None:
if lock:
self.switch_manager.lock(name)
else:
self.switch_manager.unlock(name)
# def open_valve(self, name, **kw):
# if not self._open_logic(name):
# self.debug('logic failed')
# do_later(self.warning_dialog, 'Open Valve Failed. Prevented by safety logic')
# return False, False
#
# if self.switch_manager:
# return self.switch_manager.open_switch(name, **kw)
#
# def close_valve(self, name, **kw):
# if not self._close_logic(name):
# self.debug('logic failed')
# do_later(self.warning_dialog, 'Close Valve Failed. Prevented by safety logic')
# return False, False
#
# if self.switch_manager:
# return self.switch_manager.close_switch(name, **kw)
def set_selected_explanation_item(self, item):
pass
# logic
# private
def _clear_sample_states(self):
self.debug("clear sample states")
self._backup_sample_states()
self._dump_sample_states(states=[])
def _load_sample_states(self):
self.debug("load sample states")
p = paths.furnace_sample_states
if os.path.isfile(p):
states = yload(p)
self.debug("states={}".format(states))
for si in states:
hole = self.stage_manager.stage_map.get_hole(si)
self.debug("si={} hole={}".format(si, hole))
if hole:
hole.analyzed = True
def _dump_sample_states(self, states=None):
if states is None:
states = self.stage_manager.get_sample_states()
self.debug("dump sample states")
p = paths.furnace_sample_states
with open(p, "w") as wfile:
yaml.dump(states, wfile)
def _backup_sample_states(self):
if os.path.isfile(paths.furnace_sample_states):
root, base = os.path.split(paths.furnace_sample_states)
bp = os.path.join(root, "~{}".format(base))
self.debug("backing up furnace sample states to {}".format(bp))
shutil.copyfile(paths.furnace_sample_states, bp)
def _update_scan(self):
response = self.controller.get_process_value(verbose=False)
self.temperature_readback = response or 0
output = self.controller.get_output(verbose=False)
self.output_percent_readback = output or 0
setpoint = self.controller.get_setpoint(verbose=False)
self._update_scan_graph(response, output, setpoint or 0)
def _stop_update(self):
self.debug("stop update")
self._alive = False
self.timer.stop()
def _update_scan_graph(self, response, output, setpoint):
x = None
update = False
if response is not None:
x = self.graph.record(response, series=1, track_y=False)
update = True
if output is not None:
self.graph.record(output, x=x, series=0, plotid=1, track_y=False)
update = True
if update:
ss = self.graph.get_data(plotid=0, axis=1)
if len(ss) > 1:
xs = self.graph.get_data(plotid=0)
xs[-1] = x
self.graph.set_data(xs, plotid=0)
else:
self.graph.record(setpoint, x=x, track_y=False)
if self.graph_y_auto:
temp_plot = self.graph.plots[0].plots["plot0"][0]
setpoint_plot = self.graph.plots[0].plots["plot1"][0]
temp_data = temp_plot.value.get_data()
setpoint_data = setpoint_plot.value.get_data()
ma = max(temp_data.max(), setpoint_data.max())
if self.setpoint == 0:
mi = 0
else:
mi = min(setpoint_data.min(), temp_data.min())
self.graph.set_y_limits(min_=mi, max_=ma, pad="0.1", plotid=0)
if self._recording:
self.record_data_manager.write_to_frame((x, response or 0, output or 0))
def _start_recording(self):
self._recording = True
self.record_data_manager = dm = self._record_data_manager_factory()
dm.new_frame(directory=paths.furnace_scans_dir)
dm.write_to_frame(("time", "temperature", "output"))
self._start_time = time.time()
def _stop_recording(self):
self._recording = False
def _graph_factory(self, *args, **kw):
g = TimeSeriesStreamStackedGraph()
# g.plotcontainer.padding_top = 5
# g.plotcontainer.padding_right = 5
g.new_plot(
xtitle="Time (s)",
ytitle="Temp. (C)",
padding_top=5,
padding_left=75,
padding_right=5,
)
g.set_scan_width(600, plotid=0)
g.set_data_limits(1.8 * 600, plotid=0)
# setpoint
g.new_series(plotid=0, line_width=2, render_style="connectedhold")
# response
g.new_series(plotid=0)
g.new_plot(ytitle="Output (%)", padding_top=5, padding_left=75, padding_right=5)
g.set_scan_width(600, plotid=1)
g.set_data_limits(1.8 * 600, plotid=1)
g.new_series(plotid=1)
g.set_y_limits(min_=-2, max_=102, plotid=1)
return g
def _dump_sample(self, progress):
"""
:return:
"""
ret = True
self.debug("dump sample started")
# handlers
def _setpoint_changed(self, new):
self.set_setpoint(new)
def _stage_manager_default(self):
sm = ThermoFurnaceStageManager(stage_manager_id="thermo.furnace.stage_map")
return sm
# ============= EOF =============================================
| {
"content_hash": "8182096967a054a2a2827ab1f8b0343d",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 92,
"avg_line_length": 31.715404699738905,
"alnum_prop": 0.5824483411541944,
"repo_name": "USGSDenverPychron/pychron",
"id": "610331e31d229553429893fa41a18f3905b407e6",
"size": "12881",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/furnace/thermo/furnace_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
__all__ = (
'drizzle',
'firebird',
'mssql',
'mysql',
'oracle',
'postgresql',
'sqlite',
'sybase',
)
from .. import util
def _auto_fn(name):
"""default dialect importer.
plugs into the :class:`.PluginLoader`
as a first-hit system.
"""
if "." in name:
dialect, driver = name.split(".")
else:
dialect = name
driver = "base"
try:
module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects
except ImportError:
return None
module = getattr(module, dialect)
if hasattr(module, driver):
module = getattr(module, driver)
return lambda: module.dialect
else:
return None
registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
| {
"content_hash": "21d9596ebf42375e92b2a0dd3e6871ee",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 20.763157894736842,
"alnum_prop": 0.5665399239543726,
"repo_name": "FRC-Team-3140/north-american-happiness",
"id": "974d4f787567321707589687beee71e48ff17144",
"size": "1028",
"binary": false,
"copies": "77",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sqlalchemy/dialects/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6472"
},
{
"name": "JavaScript",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "6901716"
},
{
"name": "Shell",
"bytes": "3801"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.pt.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple está querendo comprar uma startup do Reino Unido por 100 milhões de dólares",
"Carros autônomos empurram a responsabilidade do seguro para os fabricantes."
"São Francisco considera banir os robôs de entrega que andam pelas calçadas",
"Londres é a maior cidade do Reino Unido"
]
| {
"content_hash": "dbfbb07d788fcd3e41339423468c4c9e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 29.705882352941178,
"alnum_prop": 0.7386138613861386,
"repo_name": "recognai/spaCy",
"id": "239929215c1166ede8aed6d7bc0be17bde9e805f",
"size": "528",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spacy/lang/pt/examples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103274"
},
{
"name": "C++",
"bytes": "161734"
},
{
"name": "CSS",
"bytes": "42943"
},
{
"name": "HTML",
"bytes": "904913"
},
{
"name": "JavaScript",
"bytes": "17993"
},
{
"name": "Python",
"bytes": "199094758"
},
{
"name": "Shell",
"bytes": "1091"
}
],
"symlink_target": ""
} |
"""Constants used by Home Assistant components."""
__version__ = "0.25.0.dev0"
REQUIRED_PYTHON_VER = (3, 4)
PLATFORM_FORMAT = '{}.{}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# If no name is specified
DEVICE_DEFAULT_NAME = "Unnamed Device"
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_CONDITION = 'condition'
CONF_CUSTOMIZE = 'customize'
CONF_ELEVATION = 'elevation'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_EVENT = 'event'
CONF_FILENAME = 'filename'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_ICON = 'icon'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PASSWORD = 'password'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_STATE = 'state'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_WEEKDAY = 'weekday'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = "homeassistant_start"
EVENT_HOMEASSISTANT_STOP = "homeassistant_stop"
EVENT_STATE_CHANGED = "state_changed"
EVENT_TIME_CHANGED = "time_changed"
EVENT_CALL_SERVICE = "call_service"
EVENT_SERVICE_EXECUTED = "service_executed"
EVENT_PLATFORM_DISCOVERED = "platform_discovered"
EVENT_COMPONENT_LOADED = "component_loaded"
EVENT_SERVICE_REGISTERED = "service_registered"
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_CLOSED = 'closed'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
# #### STATE AND EVENT ATTRIBUTES ####
# Contains current time for a TIME_CHANGED event
ATTR_NOW = "now"
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = "domain"
ATTR_SERVICE = "service"
ATTR_SERVICE_DATA = "service_data"
# Data for a SERVICE_EXECUTED event
ATTR_SERVICE_CALL_ID = "service_call_id"
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = "friendly_name"
# A picture to represent entity
ATTR_ENTITY_PICTURE = "entity_picture"
# Icon to use in the frontend
ATTR_ICON = "icon"
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = "unit_of_measurement"
# Temperature attribute
ATTR_TEMPERATURE = "temperature"
TEMP_CELCIUS = "°C"
TEMP_CELSIUS = "°C"
TEMP_FAHRENHEIT = "°F"
# Contains the information that is discovered
ATTR_DISCOVERED = "discovered"
# Location of the device/sensor
ATTR_LOCATION = "location"
ATTR_BATTERY_LEVEL = "battery_level"
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For devices which support an armed state
ATTR_ARMED = "device_armed"
# For devices which support a locked state
ATTR_LOCKED = "locked"
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = "device_tripped"
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = "last_tripped_time"
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = "hidden"
# Location of the entity
ATTR_LATITUDE = "latitude"
ATTR_LONGITUDE = "longitude"
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = "stop"
SERVICE_HOMEASSISTANT_RESTART = "restart"
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_VOLUME_UP = "volume_up"
SERVICE_VOLUME_DOWN = "volume_down"
SERVICE_VOLUME_MUTE = "volume_mute"
SERVICE_VOLUME_SET = "volume_set"
SERVICE_MEDIA_PLAY_PAUSE = "media_play_pause"
SERVICE_MEDIA_PLAY = "media_play"
SERVICE_MEDIA_PAUSE = "media_pause"
SERVICE_MEDIA_STOP = "media_stop"
SERVICE_MEDIA_NEXT_TRACK = "media_next_track"
SERVICE_MEDIA_PREVIOUS_TRACK = "media_previous_track"
SERVICE_MEDIA_SEEK = "media_seek"
SERVICE_ALARM_DISARM = "alarm_disarm"
SERVICE_ALARM_ARM_HOME = "alarm_arm_home"
SERVICE_ALARM_ARM_AWAY = "alarm_arm_away"
SERVICE_ALARM_TRIGGER = "alarm_trigger"
SERVICE_LOCK = "lock"
SERVICE_UNLOCK = "unlock"
SERVICE_OPEN = "open"
SERVICE_CLOSE = "close"
SERVICE_MOVE_UP = 'move_up'
SERVICE_MOVE_DOWN = 'move_down'
SERVICE_STOP = 'stop'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = "/"
URL_API = "/api/"
URL_API_STREAM = "/api/stream"
URL_API_CONFIG = "/api/config"
URL_API_DISCOVERY_INFO = "/api/discovery_info"
URL_API_STATES = "/api/states"
URL_API_STATES_ENTITY = "/api/states/{}"
URL_API_EVENTS = "/api/events"
URL_API_EVENTS_EVENT = "/api/events/{}"
URL_API_SERVICES = "/api/services"
URL_API_SERVICES_SERVICE = "/api/services/{}/{}"
URL_API_EVENT_FORWARD = "/api/event_forwarding"
URL_API_COMPONENTS = "/api/components"
URL_API_ERROR_LOG = "/api/error_log"
URL_API_LOG_OUT = "/api/log_out"
URL_API_TEMPLATE = "/api/template"
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_HEADER_HA_AUTH = "X-HA-access"
HTTP_HEADER_ACCEPT_ENCODING = "Accept-Encoding"
HTTP_HEADER_CONTENT_TYPE = "Content-type"
HTTP_HEADER_CONTENT_ENCODING = "Content-Encoding"
HTTP_HEADER_VARY = "Vary"
HTTP_HEADER_CONTENT_LENGTH = "Content-Length"
HTTP_HEADER_CACHE_CONTROL = "Cache-Control"
HTTP_HEADER_EXPIRES = "Expires"
HTTP_HEADER_ORIGIN = "Origin"
HTTP_HEADER_X_REQUESTED_WITH = "X-Requested-With"
HTTP_HEADER_ACCEPT = "Accept"
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers"
ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT,
HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE]
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
| {
"content_hash": "6cbbab8c738bbeba4cb054c6bd9b379a",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 27.84081632653061,
"alnum_prop": 0.7240873772174168,
"repo_name": "devdelay/home-assistant",
"id": "5d745765fb74976bfdad4660ab17c2977a616272",
"size": "6840",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/const.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1367764"
},
{
"name": "Python",
"bytes": "2777590"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
"""
Invoice views for gunclub
"""
import datetime
from django.core.mail import EmailMessage
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import (HttpResponseRedirect,
HttpResponseForbidden,
HttpResponse)
from gunclub import settings
from member.models import Profile
from invoice.models import Invoice
from invoice.forms import EditInvoiceForm
from invoice import pdf
def lower(string):
""" Lower all caracters and replace spaces to -.
"""
return string.lower().replace(' ', '-')
@login_required
def member_invoice(request, member_id):
if not request.user.is_staff:
return HttpResponseForbidden()
member = get_object_or_404(Profile, id=member_id)
year = datetime.date.today().year
membership = member.date_membership.month
for month in range(membership, 13):
due_date = datetime.date(year, month, member.invoice_due_day)
try:
invoice = Invoice.objects.get(user=member.user,
due_date=due_date)
except Invoice.DoesNotExist:
invoice = Invoice.objects.create(user=member.user,
due_date=due_date,
value=settings.INVOICE_VALUE)
invoices = Invoice.objects.filter(user=member.user).order_by('due_date')
return render_to_response('invoice/list.html',
{'invoices': invoices,
'member': member},
context_instance=RequestContext(request))
@login_required
def edit_invoice(request, invoice_id):
if not request.user.is_staff:
return HttpResponseForbidden()
if invoice_id:
invoice = get_object_or_404(Invoice, id=invoice_id)
form = EditInvoiceForm(instance=invoice)
if request.method == 'POST':
form = EditInvoiceForm(request.POST, instance=invoice)
if form.is_valid():
invoice = form.save()
return HttpResponseRedirect(
reverse('member_invoice', kwargs={'member_id': invoice.user.id}))
return render_to_response(
'invoice/edit.html',
{'form': form,
'invoice_id': invoice_id},
context_instance=RequestContext(request)
)
@login_required
def print_invoice_booklet(request, member_id):
if not request.user.is_staff:
return HttpResponseForbidden()
profile = Profile.objects.get(pk=member_id)
invoices = Invoice.objects.filter(is_paid=False,
user=profile.user).order_by('due_date')
pdf_file = pdf.generate_invoice_booklet_pdf(invoices, profile.user, profile)
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s.pdf' % (
lower(profile.user.get_full_name()))
response.write(pdf_file)
return response
@login_required
def print_invoice(request, invoice_id):
if not request.user.is_staff:
return HttpResponseForbidden()
invoice = get_object_or_404(Invoice, id=invoice_id)
profile = invoice.user.profile
pdf_file = pdf.generate_invoice_pdf(invoice.due_date, invoice.value,
invoice.user, profile)
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s-%s.pdf' % (
lower(invoice.user.get_full_name()), invoice.due_date)
response.write(pdf_file)
return response
@login_required
def send_invoice(request, invoice_id):
if not request.user.is_staff:
return HttpResponseForbidden()
invoice = get_object_or_404(Invoice, id=invoice_id)
profile = invoice.user.profile
user = invoice.user
pdf_file = pdf.generate_invoice_pdf(invoice.due_date,
invoice.value,
user, profile)
email = EmailMessage(subject=settings.EMAIL_SUBJECT,
body=settings.EMAIL_BODY,
from_email=settings.EMAIL_FROM,
to=[user.email])
email.attach('%s_%s.pdf' % ( user.get_full_name(), invoice.due_date),
pdf_file, 'application/pdf')
email.send(fail_silently=False)
return render_to_response(
'invoice/email.html',
context_instance=RequestContext(request)
)
| {
"content_hash": "49c7b4408c375a6b701cd6df4b940cde",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 81,
"avg_line_length": 37.49193548387097,
"alnum_prop": 0.6283071628307163,
"repo_name": "wiliamsouza/gunclub",
"id": "543876cca1ab7b7b65249fa6765c524e161fd8bc",
"size": "4649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gunclub/invoice/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "56250"
},
{
"name": "Python",
"bytes": "45367"
}
],
"symlink_target": ""
} |
"""
Description:
* An interface is defined for creating an object.
* Comparing to simple factory, subclasses decide which class is instantiated.
@author: Paul Bodean
@date: 12/08/2017
"""
from abc import ABCMeta, abstractmethod
from typing import Union
from selenium.webdriver import Chrome, Firefox
from src.factory.pages.menu import Menu
from src.factory.pages.search import Search
class Component(object):
"""
Abstract class defining how a tested component will look
"""
@abstractmethod
def set_name(self):
pass
@abstractmethod
def get_actions(self, *args):
pass
class SearchComponent(Component, Search):
"""
Each new product will implement specific actions
"""
def set_name(self):
return 'Youtube search component'
def get_actions(self, *args: list):
"""
:type args: list
"""
if args[0] == 'click_search':
self.search()
elif args[0] == 'set_query':
self.set_query(args[1])
else:
raise NotImplemented
class MenuComponent(Component, Menu):
"""
Menu specific component are implemented
"""
def set_name(self):
return 'Youtube menu component'
def get_actions(self, *args: list):
"""
:type args: list
"""
if args[0] == 'click_menu':
self.menu_button()
elif args[0] == 'trend':
self.filter_by_trend()
elif args[0] == 'history':
self.filter_by_history()
elif args[0] == 'browse':
self.browse()
else:
raise NotImplemented
class TemplateTest(metaclass=ABCMeta):
"""
TestCase abstract class provide a factory method _create_test which should be implemented by concrete classes
"""
def __init__(self):
self.sections = dict()
self.create_test()
@abstractmethod
def create_test(self):
"""
Factory abstract method
"""
pass
def get_sections(self) -> dict:
"""
:return: all section to be tested in a TestCase
:rtype: list
"""
return self.sections
def add_sections(self, section_key: str, section: object):
"""
:param section_key: section key name
:type section_key: str
:param section: a section to be tested
:type section: object
:return: all sections to be tested
:rtype: list
"""
self.sections.update({section_key: section})
class MenuTest(TemplateTest):
"""
Implement Test Menu class
"""
def __init__(self, driver: Union[Chrome, Firefox]):
"""
:param driver: browser driver
:type driver: object
"""
self.__driver = driver
super().__init__()
def create_test(self):
"""
:return: sections to be tested
:rtype: dict
"""
self.add_sections('menu', MenuComponent(self.__driver))
class MenuAndSearchTest(TemplateTest):
"""
Implement a test case for checking menu and search
"""
def __init__(self, driver: Union[Chrome, Firefox]):
"""
:param driver: browser driver
:type driver: object
"""
self.__driver = driver
super().__init__()
def create_test(self):
"""
:return: sections to be tested
:rtype: dict
"""
self.add_sections('menu', MenuComponent(self.__driver))
self.add_sections('search', SearchComponent(self.__driver))
| {
"content_hash": "f8c44bd479da22554d6662a0a8eb4c58",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 113,
"avg_line_length": 22.70063694267516,
"alnum_prop": 0.5735129068462402,
"repo_name": "paulbodean88/automation-design-patterns",
"id": "cef3a4b7096fb1111903237bca3840746abedfac",
"size": "3564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/factory/factory_method.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40634"
}
],
"symlink_target": ""
} |
"""
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
from .exceptions import ContentDisallowed
from .serialization import prepare_accept_content
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue', 'binding']
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
*This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. Reading this article is recommended if you're
new to amqp.*
"AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
passive = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('passive', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name, ))
def declare(self, nowait=False, passive=None):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
passive = self.passive if passive is None else passive
if self.name:
return self.channel.exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
properties=None, headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties['delivery_mode'] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def binding(self, routing_key='', arguments=None, unbind_arguments=None):
return binding(self, routing_key, arguments, unbind_arguments)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (self.name or repr(''), self.type)
@property
def can_cache_declaration(self):
return self.durable and not self.auto_delete
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self, )
def __str__(self):
return '%s->%s' % (self.exchange.name, self.routing_key)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
:keyword on_declared: See :attr:`on_declared`
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
.. attribute:: on_declared
Optional callback to be applied when the queue has been
declared (the ``queue_declare`` operation is complete).
This must be a function with a signature that accepts at least 3
positional arguments: ``(name, messages, consumers)``.
"""
ContentDisallowed = ContentDisallowed
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange and self.exchange.name:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None, accept=None):
"""Poll the server for a new message.
Must return the message if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If enabled the broker will automatically
ack messages.
:keyword accept: Custom list of accepted content types.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
message.accept = prepare_accept_content(accept)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If enabled the broker will automatically ack
messages.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue {0.name} -> {bindings}'.format(
self, bindings=pretty_bindings(self.bindings),
))
return s(
'Queue {0.name} -> {0.exchange!r} -> {0.routing_key}'.format(
self))
@property
def can_cache_declaration(self):
return self.durable and not self.auto_delete
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
| {
"content_hash": "7c90a183878c24f39815c17b2404b698",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 78,
"avg_line_length": 36.710893854748605,
"alnum_prop": 0.5677002092448165,
"repo_name": "1stvamp/kombu",
"id": "5bf1470b9c8cf139ea2b455d2e934b1be7083aea",
"size": "26285",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kombu/entity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.test import TestCase, Client
from .models import BoardGame, EditorGame
class BoardGameTest(TestCase):
def test_create_a_board_game(self):
editor = EditorGame(name="Iello")
boardgame = BoardGame(name="Le village de tiercelieux", min_player=1,
max_player=18,
description="Lorem ipsum dolor sit amet",
editor=editor)
self.assertEqual(boardgame.name, "Le village de tiercelieux")
self.assertEqual(boardgame.min_player, 1)
self.assertEqual(boardgame.max_player, 18)
self.assertEqual(boardgame.description, "Lorem ipsum dolor sit amet")
class BoardGameView(TestCase):
def setUp(self):
self.client = Client()
def test_list_board_game(self):
response = self.client.get('/boardgames/')
self.assertEqual(response.status_code, 200)
class EditorGameTest(TestCase):
def test_create_a_editor_game(self):
editor = EditorGame(name="Iello")
self.assertEqual(editor.name, 'Iello')
| {
"content_hash": "3b9cc5eb0a1bb9669cd2c61d8bc3579a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 35.86666666666667,
"alnum_prop": 0.6394052044609665,
"repo_name": "fbentz/vedasboardgame",
"id": "ac19e586136c01323d4e1a97b7bf8198a2a7b63a",
"size": "1076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vedasboardgame/boardgame/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10771"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('status', '0010_auto_20150216_1614'),
]
operations = [
migrations.AlterModelOptions(
name='incidentupdate',
options={'ordering': ['created'], 'get_latest_by': 'created', 'verbose_name': 'Incident Update', 'verbose_name_plural': 'Incident Updates'},
),
]
| {
"content_hash": "599539242e82c72edd6d4e79dea7f3aa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 152,
"avg_line_length": 27.11764705882353,
"alnum_prop": 0.6247288503253796,
"repo_name": "darkpixel/statuspage",
"id": "5fae2ee5045dad4ddc48e27050dd71d0506a030d",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "status/migrations/0011_auto_20150217_1933.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17084"
},
{
"name": "Python",
"bytes": "32227"
}
],
"symlink_target": ""
} |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class PaymentLimits(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'average_amount': (int,), # noqa: E501
'maximum_amount': (int,), # noqa: E501
'monthly_amount': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'average_amount': 'average_amount', # noqa: E501
'maximum_amount': 'maximum_amount', # noqa: E501
'monthly_amount': 'monthly_amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, average_amount, maximum_amount, monthly_amount, *args, **kwargs): # noqa: E501
"""PaymentLimits - a model defined in OpenAPI
Args:
average_amount (int): Average payment amount, in dollars.
maximum_amount (int): Maximum payment amount, in dollars.
monthly_amount (int): Monthly payment amount, in dollars.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.average_amount = average_amount
self.maximum_amount = maximum_amount
self.monthly_amount = monthly_amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "bbe13ed143fe688578fae8a83a303747",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 110,
"avg_line_length": 40.26519337016575,
"alnum_prop": 0.5617453347969265,
"repo_name": "plaid/plaid-python",
"id": "c2ebfea54e04a8324ceb9049c4a2e8e8e7c724aa",
"size": "7288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/payment_limits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
"""Add Skipped enum value
Revision ID: 8b536bc5d716
Revises: 40c86deefd01
Create Date: 2020-04-27 12:12:47.075110
"""
# revision identifiers, used by Alembic.
revision = '8b536bc5d716'
down_revision = '40c86deefd01'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("COMMIT")
op.execute("ALTER TYPE dlstate_enum ADD VALUE 'skipped';")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
### end Alembic commands ###
pass | {
"content_hash": "463f741d5f8f1310a6574e9eb8724f8f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.7648068669527897,
"repo_name": "fake-name/ReadableWebProxy",
"id": "e6a21730a790b34aefe97bd8a1ada0ec691a6f72",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/2020-04-27_8b536bc5d716_add_skipped_enum_value.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.tests import app_factory
from mkt.developers.models import PreloadTestPlan
from mkt.operators.views import preloads
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
class TestPreloadCandidates(amo.tests.TestCase):
fixtures = fixture('user_operator')
def setUp(self):
self.create_switch('preload-apps')
self.url = reverse('operators.preloads')
self.user = UserProfile.objects.get()
self.app = app_factory()
def _preload_factory(self):
return PreloadTestPlan.objects.create(addon=app_factory(),
filename='tstpn')
def test_preloads(self):
plan = self._preload_factory()
req = amo.tests.req_factory_factory(self.url, user=self.user)
res = preloads(req)
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('tbody tr').length, 1)
eq_(doc('td:last-child a').attr('href'),
plan.preload_test_plan_url)
| {
"content_hash": "9104b2e242c764efbf11d5f1ca015a1a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 31.027027027027028,
"alnum_prop": 0.6567944250871081,
"repo_name": "andymckay/zamboni",
"id": "a3475beba89eaffbd659f5eff64cbd8afd542052",
"size": "1148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/operators/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357533"
},
{
"name": "JavaScript",
"bytes": "524153"
},
{
"name": "Python",
"bytes": "3863676"
},
{
"name": "Shell",
"bytes": "14980"
}
],
"symlink_target": ""
} |
"""Multiprocessing Pool test double"""
class MockTub(object):
"""Class to mock multiprocessing.Pool
"""
def __init__(self, main_worker, args):
"""Create a new instance"""
main_worker(*args)
def imap_unordered(self, func, iterable):
"""Map the func over the iterable"""
for item in iterable:
yield func(item)
def close(self):
"""The pool is closed"""
pass
def join(self):
"""Everybody out of the pool"""
pass
| {
"content_hash": "6562276a853a835beb0d7f5d423d4702",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 45,
"avg_line_length": 22.347826086956523,
"alnum_prop": 0.5603112840466926,
"repo_name": "mapbox/rio-mucho",
"id": "e1d06d93e30d254d25fa11ed639529da50f5bcb1",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riomucho/single_process_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15888"
}
],
"symlink_target": ""
} |
__doc__ = """Code by Benjamin S. Murphy
[email protected]
Dependencies:
numpy
Callable Methods:
write_asc_grid(X, Y, Z, filename='output.asc', style=1): Writes an MxN data grid
to an ASCII grid file (.*asc).
Inputs:
X (array-like, dim Nx1): X-coordinates of grid points at center
of cells.
Y (array-like, dim Mx1): Y-coordinates of grid points at center
of cells.
Z (array-like, dim MxN): Gridded data values. May be a masked array.
filename (string, optional): Name of output *.asc file.
style (int, optional): Determines how to write the *.asc file
header. Specifying 1 writes out DX, DY, XLLCENTER, YLLCENTER.
Specifying 2 writes out CELLSIZE (note DX must be the same
as DY), XLLCORNER, YLLCORNER. Default is 1.
read_asc_grid(filename, footer=0): Reads ASCII grid file (*.asc).
Inputs:
filename (string): Name of *.asc file.
footer (int, optional): Number of lines at bottom of *.asc file to skip.
Outputs:
grid_array (numpy array): MxN array of grid values,
where M is number of Y-coordinates and N is number
of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x (numpy array): 1D array of N X-coordinates.
y (numpy array): 1D array of M Y-coordinates.
CELLSIZE (tuple or float): Either a two-tuple of (x-cell size,
y-cell size), or a float that specifies the uniform cell size.
NODATA (float): Value that specifies which entries are not
actual data.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
def write_asc_grid(x, y, z, filename='output.asc', style=1):
"""Writes gridded data to ASCII grid file (*.asc)"""
if np.ma.is_masked(z):
z = np.array(z.tolist(-999.))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nrows = z.shape[0]
ncols = z.shape[1]
if z.ndim != 2:
raise ValueError("Two-dimensional grid is required to write *.asc grid.")
if x.ndim > 1 or y.ndim > 1:
raise ValueError("Dimensions of X and/or Y coordinate arrays are not as "
"expected. Could not write *.asc grid.")
if z.shape != (y.size, x.size):
print "WARNING: Grid dimensions are not as expected. " \
"Incorrect *.asc file generation may result."
if np.amin(x) != x[0] or np.amin(y) != y[0]:
print "WARNING: Order of X or Y coordinates is not as expected. " \
"Incorrect *.asc file generation may result."
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if abs((x[-1] - x[0])/(x.shape[0] - 1)) != dx or \
abs((y[-1] - y[0])/(y.shape[0] - 1)) != dy:
raise ValueError("X or Y spacing is not constant; *.asc grid cannot "
"be written.")
cellsize = -1
if style == 2:
if dx != dy:
raise ValueError("X and Y spacing is not the same. Cannot write "
"*.asc file in the specified format.")
cellsize = dx
xllcenter = x[0]
yllcenter = y[0]
xllcorner = -1 # Note that these values are flagged as -1. If there is a problem in trying
yllcorner = -1 # to write out style 2, the -1 value will appear in the output file.
if style == 2:
xllcorner = xllcenter - dx/2.0
yllcorner = yllcenter - dy/2.0
no_data = -999.
with open(filename, 'w') as f:
if style == 1:
f.write("NCOLS " + '{:<10n}'.format(ncols) + '\n')
f.write("NROWS " + '{:<10n}'.format(nrows) + '\n')
f.write("XLLCENTER " + '{:<10.2f}'.format(xllcenter) + '\n')
f.write("YLLCENTER " + '{:<10.2f}'.format(yllcenter) + '\n')
f.write("DX " + '{:<10.2f}'.format(dx) + '\n')
f.write("DY " + '{:<10.2f}'.format(dy) + '\n')
f.write("NODATA_VALUE " + '{:<10.2f}'.format(no_data) + '\n')
elif style == 2:
f.write("NCOLS " + '{:<10n}'.format(ncols) + '\n')
f.write("NROWS " + '{:<10n}'.format(nrows) + '\n')
f.write("XLLCORNER " + '{:<10.2f}'.format(xllcorner) + '\n')
f.write("YLLCORNER " + '{:<10.2f}'.format(yllcorner) + '\n')
f.write("CELLSIZE " + '{:<10.2f}'.format(cellsize) + '\n')
f.write("NODATA_VALUE " + '{:<10.2f}'.format(no_data) + '\n')
else:
raise ValueError("style kwarg must be either 1 or 2.")
for m in range(z.shape[0] - 1, -1, -1):
for n in range(z.shape[1]):
f.write('{:<16.2f}'.format(z[m, n]))
if m != 0:
f.write('\n')
def read_asc_grid(filename, footer=0):
"""Reads ASCII grid file (*.asc).
footer kwarg specifies how many lines at end of *.asc file to skip.
Returns a NumPy array of the values (dim MxN, where M is
the number of Y-coordinates and N is the number of
X-coordinates); a NumPy array of the X-coordinates (dim N);
a NumPy array of the Y-coordinates (dim M); either a tuple
of the grid cell size in the x direction and the grid cell
size in the y direction (DX, DY) or the uniform grid cell size;
and the NO_DATA value.
"""
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with open(filename, 'rU') as f:
while True:
string, value = f.readline().split()
header_lines += 1
if string.lower() == 'ncols':
ncols = int(value)
elif string.lower() == 'nrows':
nrows = int(value)
elif string.lower() == 'xllcorner':
xllcorner = float(value)
elif string.lower() == 'xllcenter':
xllcenter = float(value)
elif string.lower() == 'yllcorner':
yllcorner = float(value)
elif string.lower() == 'yllcenter':
yllcenter = float(value)
elif string.lower() == 'cellsize':
cellsize = float(value)
elif string.lower() == 'cell_size':
cellsize = float(value)
elif string.lower() == 'dx':
dx = float(value)
elif string.lower() == 'dy':
dy = float(value)
elif string.lower() == 'nodata_value':
no_data = float(value)
elif string.lower() == 'nodatavalue':
no_data = float(value)
else:
raise IOError("could not read *.asc file. Error in header.")
if (ncols is not None) and (nrows is not None) and \
(((xllcorner is not None) and (yllcorner is not None)) or
((xllcenter is not None) and (yllcenter is not None))) and \
((cellsize is not None) or ((dx is not None) and (dy is not None))) and \
(no_data is not None):
break
raw_grid_array = np.genfromtxt(filename, skip_header=header_lines, skip_footer=footer)
grid_array = np.flipud(raw_grid_array)
if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:
raise IOError("Error reading *.asc file. Encountered problem "
"with header: NCOLS and/or NROWS does not match "
"number of columns/rows in data file body.")
if xllcorner is not None and yllcorner is not None:
if dx is not None and dy is not None:
xllcenter = xllcorner + dx/2.0
yllcenter = yllcorner + dy/2.0
else:
xllcenter = xllcorner + cellsize/2.0
yllcenter = yllcorner + cellsize/2.0
if dx is not None and dy is not None:
x = np.arange(xllcenter, xllcenter + ncols*dx, dx)
y = np.arange(yllcenter, yllcenter + nrows*dy, dy)
else:
x = np.arange(xllcenter, xllcenter + ncols*cellsize, cellsize)
y = np.arange(yllcenter, yllcenter + nrows*cellsize, cellsize)
# Sometimes x and y and can be an entry too long due to imprecision in calculating
# the upper cutoff for np.arange(); this bit takes care of that potential problem.
if x.size == ncols + 1:
x = x[:-1]
if y.size == nrows + 1:
y = y[:-1]
if cellsize is None:
cellsize = (dx, dy)
return grid_array, x, y, cellsize, no_data
| {
"content_hash": "c36248b79eb1960406d8d6721364b59b",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 96,
"avg_line_length": 42.394366197183096,
"alnum_prop": 0.5287929125138427,
"repo_name": "yejingxin/PyKrige",
"id": "acad63d548372b5a42eb24daccb31775ec789a12",
"size": "9030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pykrige/kriging_tools.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "288427"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/vehicle/component/shared_dx_disruptor_array.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "73e1ce02229b5277a92b9dabd10a5a27",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.703125,
"repo_name": "anhstudios/swganh",
"id": "10abf685175c4c3bc01218fa4610df9038008443",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/vehicle/component/shared_dx_disruptor_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.hdinsight import HDInsightManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-hdinsight
# USAGE
python create_linux_hadoop_secure_hadoop.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = HDInsightManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.clusters.begin_create(
resource_group_name="rg1",
cluster_name="cluster1",
parameters={
"properties": {
"clusterDefinition": {
"configurations": {
"gateway": {
"restAuthCredential.isEnabled": True,
"restAuthCredential.password": "**********",
"restAuthCredential.username": "admin",
}
},
"kind": "Hadoop",
},
"clusterVersion": "3.5",
"computeProfile": {
"roles": [
{
"hardwareProfile": {"vmSize": "Standard_D3_V2"},
"minInstanceCount": 1,
"name": "headnode",
"osProfile": {
"linuxOperatingSystemProfile": {
"password": "**********",
"sshProfile": {"publicKeys": [{"certificateData": "**********"}]},
"username": "sshuser",
}
},
"scriptActions": [],
"targetInstanceCount": 2,
"virtualNetworkProfile": {
"id": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname",
"subnet": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname/subnets/vnetsubnet",
},
},
{
"hardwareProfile": {"vmSize": "Standard_D3_V2"},
"minInstanceCount": 1,
"name": "workernode",
"osProfile": {
"linuxOperatingSystemProfile": {
"password": "**********",
"sshProfile": {"publicKeys": [{"certificateData": "**********"}]},
"username": "sshuser",
}
},
"scriptActions": [],
"targetInstanceCount": 4,
"virtualNetworkProfile": {
"id": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname",
"subnet": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname/subnets/vnetsubnet",
},
},
{
"hardwareProfile": {"vmSize": "Small"},
"minInstanceCount": 1,
"name": "zookeepernode",
"osProfile": {
"linuxOperatingSystemProfile": {
"password": "**********",
"sshProfile": {"publicKeys": [{"certificateData": "**********"}]},
"username": "sshuser",
}
},
"scriptActions": [],
"targetInstanceCount": 3,
"virtualNetworkProfile": {
"id": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname",
"subnet": "/subscriptions/subId/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnetname/subnets/vnetsubnet",
},
},
]
},
"osType": "Linux",
"securityProfile": {
"clusterUsersGroupDNs": ["hdiusers"],
"directoryType": "ActiveDirectory",
"domain": "DomainName",
"domainUserPassword": "**********",
"domainUsername": "DomainUsername",
"ldapsUrls": ["ldaps://10.10.0.4:636"],
"organizationalUnitDN": "OU=Hadoop,DC=hdinsight,DC=test",
},
"storageProfile": {
"storageaccounts": [
{
"container": "containername",
"isDefault": True,
"key": "storage account key",
"name": "mystorage.blob.core.windows.net",
}
]
},
"tier": "Premium",
},
"tags": {"key1": "val1"},
},
).result()
print(response)
# x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/CreateLinuxHadoopSecureHadoop.json
if __name__ == "__main__":
main()
| {
"content_hash": "d04463d0767d7a9fb14c622047f1920c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 155,
"avg_line_length": 47.1171875,
"alnum_prop": 0.42099154369093017,
"repo_name": "Azure/azure-sdk-for-python",
"id": "02b7bdf5b50558837aa084a1ac8737a7fd6e0118",
"size": "6499",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/hdinsight/azure-mgmt-hdinsight/generated_samples/create_linux_hadoop_secure_hadoop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import math
#Takes an image and returns an inverted blured (noise removed) binary image in its place
def biModalInvBlur(image):
#turns all pixels that are darker than 60 -> 255, all others 0
image1 = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY_INV)
height, width = image.shape
#add border and blur to remove noise from image
tempImg = cv2.copyMakeBorder(image1[1],5,5,5,5,cv2.BORDER_CONSTANT,value=(0,0,0,0))
tempImg = cv2.medianBlur(tempImg,7)
#return bimodal image
image = tempImg[5:5+height,5:width+5]
return image
#Takes an image and returns the center of mass of the white central contour
#for this function to work the image must be binary, and the noise must be removed
#such that there is only one contour in the image
#centroid[0] is the x cooridnate
#centroid[1] is the y cooridnate
def centerMass(image):
#find the moments of the image
moments = cv2.moments(image,True)
centroid = ( moments['m10']/moments['m00'],moments['m01']/moments['m00'] )
return centroid
def undistortImg(image, intrinsicMatrix, distortionCoeffs, refinedCameraMatrix, ROI):
# undistort
undistortedImage = cv2.undistort(image, intrinsicMatrix, distortionCoeffs, None, refinedCameraMatrix)
# crop the image
x,y,w,h = ROI
undistortedImage = undistortedImage[y:y+h, x:x+w]
return undistortedImage
#alpha2angle takes in the number of pixels from the left edge of an undistorted image and returns how many radians that is from camA
#this works for LeftCam
def convert2Alpha(pixels):
#each pixel is N deg in FOV
#multiply the number of pixels by that conversion factor to get degress from the left
#convert to radians
degPerPixel = 0.574712
#X is the angle of the left edge of the picture
#X = 59.65
X = 120.34
#alpha = 20 / 33.9333333*pixels*-1
alpha = degPerPixel*pixels
#the left camera is mounted pivioted in X deg
alpha = X - alpha
return math.radians(alpha)
#this works for RightCam
def convert2Beta(pixels):
degPerPixel = 0.515464
#X is the angle of the left edge of the picture
X = 25
beta = degPerPixel*pixels
#the right camera is mounted pivioted in X deg
beta = X + beta
return math.radians(beta)
def calculateAngleAndDistance(leftImage, rightImage, leftCamera, rightCamera):
#returned values are:
# the distance for the center point of the robot to the object
# the angle for that same center point (negative is left)
#Outline of steps
#Load Intrisic Matricies
#Load Distortion Coeficcients
#Load Left Image
##Make Image Bimodal
##Undistort Image
##Find center of mass
#Load Right Image
##Make Image Bimodal
##Undistort Image
##Find center of mass
#Use law of cosines to calculate distance
bwImageLeft = biModalInvBlur(leftImage)
uImageLeft = undistortImg(bwImageLeft, leftCamera['intrinsicMatrix'], leftCamera['distortionCoeffs'], leftCamera['refinedCameraMatrix'], leftCamera['roi'])
centerLeft = centerMass(uImageLeft)
bwImageRight = biModalInvBlur(rightImage)
uImageRight = undistortImg(bwImageRight, rightCamera['intrinsicMatrix'], rightCamera['distortionCoeffs'], rightCamera['refinedCameraMatrix'], rightCamera['roi'])
centerRight = centerMass(uImageRight)
#these are radians from left and right
alpha = convert2Alpha(centerLeft[0])
beta = convert2Beta(centerRight[0])
returnObj = privateDistanceFcnLeft(alpha, beta)
return returnObj
def privateDistanceFcnLeft(alpha, beta):
S = (177.8 * math.sin(beta)) / (math.sin(math.pi - beta - alpha))
objDistance = math.sqrt(7903.21 + (S*S) - (177.8 * S * math.cos(alpha)))
tmpVar = S*math.sin(alpha)/objDistance
angle1 = math.asin(88.9*math.sin(alpha)/objDistance)
omega = math.pi - angle1 - alpha
theta = (math.pi/2) - omega
theta = math.degrees(theta)
return (objDistance, theta)
def privateDistanceFcnRight(alpha, beta):
S = (177.8 * math.sin(beta)) / (math.sin(math.pi - beta-alpha))
objDistance = math.sqrt(7903.21 + (S*S) - (177.8 * S * math.cos(beta)))
angle1 = math.asin(88.9*math.sin(beta)/objDistance)
omega = math.pi - angle1 - beta
theta = (math.pi/2) - omega
theta = math.degrees(theta)
return [objDistance, theta]
| {
"content_hash": "314c9de97ba7ad266157b54138ad5ee3",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 162,
"avg_line_length": 29.82857142857143,
"alnum_prop": 0.733955938697318,
"repo_name": "jodosh/MP4GS",
"id": "f63bcb0f2c6f3d16b62f63bbb004fd9d9021d3ca",
"size": "4176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/FinalDemo/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "1972"
},
{
"name": "C",
"bytes": "3664"
},
{
"name": "C++",
"bytes": "91061"
},
{
"name": "Eagle",
"bytes": "109350"
},
{
"name": "Processing",
"bytes": "63185"
},
{
"name": "Python",
"bytes": "39004"
}
],
"symlink_target": ""
} |
from pathlib import Path
from textwrap import dedent
import pytest
from _pytest.config import UsageError
from _pytest.config.findpaths import get_common_ancestor
from _pytest.config.findpaths import get_dirs_from_args
from _pytest.config.findpaths import load_config_dict_from_file
class TestLoadConfigDictFromFile:
def test_empty_pytest_ini(self, tmp_path: Path) -> None:
"""pytest.ini files are always considered for configuration, even if empty"""
fn = tmp_path / "pytest.ini"
fn.write_text("", encoding="utf-8")
assert load_config_dict_from_file(fn) == {}
def test_pytest_ini(self, tmp_path: Path) -> None:
"""[pytest] section in pytest.ini files is read correctly"""
fn = tmp_path / "pytest.ini"
fn.write_text("[pytest]\nx=1", encoding="utf-8")
assert load_config_dict_from_file(fn) == {"x": "1"}
def test_custom_ini(self, tmp_path: Path) -> None:
"""[pytest] section in any .ini file is read correctly"""
fn = tmp_path / "custom.ini"
fn.write_text("[pytest]\nx=1", encoding="utf-8")
assert load_config_dict_from_file(fn) == {"x": "1"}
def test_custom_ini_without_section(self, tmp_path: Path) -> None:
"""Custom .ini files without [pytest] section are not considered for configuration"""
fn = tmp_path / "custom.ini"
fn.write_text("[custom]", encoding="utf-8")
assert load_config_dict_from_file(fn) is None
def test_custom_cfg_file(self, tmp_path: Path) -> None:
"""Custom .cfg files without [tool:pytest] section are not considered for configuration"""
fn = tmp_path / "custom.cfg"
fn.write_text("[custom]", encoding="utf-8")
assert load_config_dict_from_file(fn) is None
def test_valid_cfg_file(self, tmp_path: Path) -> None:
"""Custom .cfg files with [tool:pytest] section are read correctly"""
fn = tmp_path / "custom.cfg"
fn.write_text("[tool:pytest]\nx=1", encoding="utf-8")
assert load_config_dict_from_file(fn) == {"x": "1"}
def test_unsupported_pytest_section_in_cfg_file(self, tmp_path: Path) -> None:
""".cfg files with [pytest] section are no longer supported and should fail to alert users"""
fn = tmp_path / "custom.cfg"
fn.write_text("[pytest]", encoding="utf-8")
with pytest.raises(pytest.fail.Exception):
load_config_dict_from_file(fn)
def test_invalid_toml_file(self, tmp_path: Path) -> None:
"""Invalid .toml files should raise `UsageError`."""
fn = tmp_path / "myconfig.toml"
fn.write_text("]invalid toml[", encoding="utf-8")
with pytest.raises(UsageError):
load_config_dict_from_file(fn)
def test_custom_toml_file(self, tmp_path: Path) -> None:
""".toml files without [tool.pytest.ini_options] are not considered for configuration."""
fn = tmp_path / "myconfig.toml"
fn.write_text(
dedent(
"""
[build_system]
x = 1
"""
),
encoding="utf-8",
)
assert load_config_dict_from_file(fn) is None
def test_valid_toml_file(self, tmp_path: Path) -> None:
""".toml files with [tool.pytest.ini_options] are read correctly, including changing
data types to str/list for compatibility with other configuration options."""
fn = tmp_path / "myconfig.toml"
fn.write_text(
dedent(
"""
[tool.pytest.ini_options]
x = 1
y = 20.0
values = ["tests", "integration"]
name = "foo"
heterogeneous_array = [1, "str"]
"""
),
encoding="utf-8",
)
assert load_config_dict_from_file(fn) == {
"x": "1",
"y": "20.0",
"values": ["tests", "integration"],
"name": "foo",
"heterogeneous_array": [1, "str"],
}
class TestCommonAncestor:
def test_has_ancestor(self, tmp_path: Path) -> None:
fn1 = tmp_path / "foo" / "bar" / "test_1.py"
fn1.parent.mkdir(parents=True)
fn1.touch()
fn2 = tmp_path / "foo" / "zaz" / "test_2.py"
fn2.parent.mkdir(parents=True)
fn2.touch()
assert get_common_ancestor([fn1, fn2]) == tmp_path / "foo"
assert get_common_ancestor([fn1.parent, fn2]) == tmp_path / "foo"
assert get_common_ancestor([fn1.parent, fn2.parent]) == tmp_path / "foo"
assert get_common_ancestor([fn1, fn2.parent]) == tmp_path / "foo"
def test_single_dir(self, tmp_path: Path) -> None:
assert get_common_ancestor([tmp_path]) == tmp_path
def test_single_file(self, tmp_path: Path) -> None:
fn = tmp_path / "foo.py"
fn.touch()
assert get_common_ancestor([fn]) == tmp_path
def test_get_dirs_from_args(tmp_path):
"""get_dirs_from_args() skips over non-existing directories and files"""
fn = tmp_path / "foo.py"
fn.touch()
d = tmp_path / "tests"
d.mkdir()
option = "--foobar=/foo.txt"
# xdist uses options in this format for its rsync feature (#7638)
xdist_rsync_option = "popen=c:/dest"
assert get_dirs_from_args(
[str(fn), str(tmp_path / "does_not_exist"), str(d), option, xdist_rsync_option]
) == [fn.parent, d]
| {
"content_hash": "00f63a7756f9d7d1d69ffb7e7bcb9408",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 101,
"avg_line_length": 39.84444444444444,
"alnum_prop": 0.5839375348577802,
"repo_name": "markshao/pytest",
"id": "3a2917261a255063b1c306e3b1a8c566e5b42325",
"size": "5379",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "testing/test_findpaths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2619027"
}
],
"symlink_target": ""
} |
import sys
import os
import platform
import numpy as np
# -- add development version of flopy to system path
flopypath = os.path.join('..', '..')
if flopypath not in sys.path:
print 'Adding to sys.path: ', flopypath
sys.path.append(flopypath)
import flopy
import flopy.utils as fputl
spth = os.getcwd()
mname = 'twrip.nam'
#mname = 'Oahu_01.nam'
model_ws = os.path.join('..', 'data', 'parameters')
omodel_ws = os.path.join('..', 'basic', 'data')
#mname = 'freyberg'
#bpth = os.path.join('/Users/jdhughes/Documents/Training/GW1774Materials/GitRepository/GW1774/ClassMaterials/Exercises/Data/FreybergModel')
#os.chdir(bpth)
#model_ws = os.path.join('.')
#omodel_ws = os.path.join('..', '17_streamcapture')
exe_name = 'mf2005'
version = 'mf2005'
# -- load the model
ml = flopy.modflow.Modflow.load(mname, version=version, exe_name=exe_name,
verbose=False, model_ws=model_ws)
# -- change model workspace
ml.change_model_ws(new_pth=omodel_ws)
# -- add pcg package
if mname == 'twrip.nam':
ml.remove_package('SIP')
pcg = flopy.modflow.ModflowPcg(ml)
# wel = ml.get_package('WEL')
# wd = wel.stress_period_data[0]
# wel.stress_period_data[0] = [[0, 8, 7, -5.],
# [0, 8, 9, -5.],
# [0, 8, 11, -5.]]
# -- save the model
ml.write_input()
os.chdir(spth)
print 'finished...' | {
"content_hash": "98712c2b29bb59b0838c9ad93fc149eb",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 139,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.6237410071942446,
"repo_name": "bdestombe/flopy-1",
"id": "6d2cb90414a39f0978c45784a71ccf4601a8648a",
"size": "1390",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "examples/Testing/flopy3_load_parameters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "2372593"
}
],
"symlink_target": ""
} |
import argparse
import openstack
import logging
import datetime
import time
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
PAUSE_SECONDS = 5
log = logging.getLogger('openstack-cleanup')
parser = argparse.ArgumentParser(description='Cleanup OpenStack resources')
parser.add_argument('-v', '--verbose', action='store_true',
help='Increase verbosity')
parser.add_argument('--hours', type=int, default=4,
help='Age (in hours) of VMs to cleanup (default: 4h)')
parser.add_argument('--dry-run', action='store_true',
help='Do not delete anything')
args = parser.parse_args()
oldest_allowed = datetime.datetime.now() - datetime.timedelta(hours=args.hours)
def main():
if args.dry_run:
print('Running in dry-run mode')
else:
print('This will delete resources... (ctrl+c to cancel)')
time.sleep(PAUSE_SECONDS)
conn = openstack.connect()
print('Servers...')
map_if_old(conn.compute.delete_server,
conn.compute.servers())
print('Security groups...')
map_if_old(conn.network.delete_security_group,
conn.network.security_groups())
print('Ports...')
map_if_old(conn.network.delete_port,
conn.network.ports())
print('Subnets...')
map_if_old(conn.network.delete_subnet,
conn.network.subnets())
print('Networks...')
for n in conn.network.networks():
if not n.is_router_external:
fn_if_old(conn.network.delete_network, n)
# runs the given fn to all elements of the that are older than allowed
def map_if_old(fn, items):
for item in items:
fn_if_old(fn, item)
# run the given fn function only if the passed item is older than allowed
def fn_if_old(fn, item):
created_at = datetime.datetime.strptime(item.created_at, DATE_FORMAT)
if item.name == "default": # skip default security group
return
if created_at < oldest_allowed:
print('Will delete %(name)s (%(id)s)' % item)
if not args.dry_run:
fn(item)
if __name__ == '__main__':
# execute only if run as a script
main()
| {
"content_hash": "4ca59c06d1006a9274eea7a4f515e52f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 28.18421052631579,
"alnum_prop": 0.6265172735760971,
"repo_name": "kubernetes-incubator/kargo",
"id": "9bc24d3f3796432fbf2fb8e164be2a634407fdc1",
"size": "2164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/openstack-cleanup/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "3088"
},
{
"name": "HCL",
"bytes": "25635"
},
{
"name": "Python",
"bytes": "1572832"
},
{
"name": "Shell",
"bytes": "51574"
},
{
"name": "Smarty",
"bytes": "328"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from sqlalchemy.orm import subqueryload
class ConnectorRegistry(object):
""" Central Registry for all available datasource engines"""
sources = {}
@classmethod
def register_sources(cls, datasource_config):
for module_name, class_names in datasource_config.items():
class_names = [str(s) for s in class_names]
module_obj = __import__(module_name, fromlist=class_names)
for class_name in class_names:
source_class = getattr(module_obj, class_name)
cls.sources[source_class.type] = source_class
@classmethod
def get_datasource(cls, datasource_type, datasource_id, session):
return (
session.query(cls.sources[datasource_type])
.filter_by(id=datasource_id)
.first()
)
@classmethod
def get_all_datasources(cls, session):
datasources = []
for source_type in ConnectorRegistry.sources:
source_class = ConnectorRegistry.sources[source_type]
qry = session.query(source_class)
qry = source_class.default_query(qry)
datasources.extend(qry.all())
return datasources
@classmethod
def get_datasource_by_name(cls, session, datasource_type, datasource_name,
schema, database_name):
datasource_class = ConnectorRegistry.sources[datasource_type]
datasources = session.query(datasource_class).all()
# Filter datasoures that don't have database.
db_ds = [d for d in datasources if d.database and
d.database.name == database_name and
d.name == datasource_name and schema == schema]
return db_ds[0]
@classmethod
def query_datasources_by_permissions(cls, session, database, permissions):
datasource_class = ConnectorRegistry.sources[database.type]
return (
session.query(datasource_class)
.filter_by(database_id=database.id)
.filter(datasource_class.perm.in_(permissions))
.all()
)
@classmethod
def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
)
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
datasource_class = ConnectorRegistry.sources[database.type]
return datasource_class.query_datasources_by_name(
session, database, datasource_name, schema=None)
| {
"content_hash": "317e6977273989b4627db598ccf72661",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 37.23170731707317,
"alnum_prop": 0.6265967900425811,
"repo_name": "dmigo/incubator-superset",
"id": "efafcb441739cb8c006b236cc01e6637ea27932e",
"size": "3101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/connectors/connector_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99154"
},
{
"name": "HTML",
"bytes": "100560"
},
{
"name": "JavaScript",
"bytes": "1557840"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1075386"
},
{
"name": "Shell",
"bytes": "1557"
},
{
"name": "Smarty",
"bytes": "1048"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('approvals', '0006_auto_20170612_1526'),
]
operations = [
migrations.AlterField(
model_name='approval',
name='app_type',
field=models.IntegerField(choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5 - New Application'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')]),
),
]
| {
"content_hash": "80af64d448580e86b8c08c8851f3658e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 332,
"avg_line_length": 37,
"alnum_prop": 0.5990990990990991,
"repo_name": "ropable/statdev",
"id": "c7258e0dc31bd3620013fe8c844b2f4c5e20f762",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "approvals/migrations/0007_auto_20170616_1635.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "540"
},
{
"name": "HTML",
"bytes": "368977"
},
{
"name": "Python",
"bytes": "588671"
}
],
"symlink_target": ""
} |
"""Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional function of n variables) and
the loss function rho(s) (a scalar function), `least_squares` finds a
local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| {
"content_hash": "0ce2b5f82bc1cee5b87cdbf89f3d3d02",
"timestamp": "",
"source": "github",
"line_count": 901,
"max_line_length": 79,
"avg_line_length": 40.55049944506104,
"alnum_prop": 0.606880884606963,
"repo_name": "nonhermitian/scipy",
"id": "2c04e7532a6a4bff3ae261003540b669612fc416",
"size": "36536",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "scipy/optimize/_lsq/least_squares.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4118724"
},
{
"name": "C++",
"bytes": "491714"
},
{
"name": "Fortran",
"bytes": "5574493"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "10851621"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import unittest
from conans.test.tools import TestClient, TestServer
class UserTest(unittest.TestCase):
def test_command_user(self):
""" Test that the user can be shown and changed, and it is reflected in the
user cache localdb
"""
client = TestClient()
client.run('user')
self.assertIn("ERROR: No remotes defined", client.user_io.out)
def test_with_remote_no_connect(self):
test_server = TestServer()
client = TestClient(servers={"default": test_server})
client.run('user')
self.assertIn("Current 'default' user: None (anonymous)", client.user_io.out)
client.run('user john')
self.assertIn("Change 'default' user from None (anonymous) to john", client.user_io.out)
self.assertEqual(('john', None), client.localdb.get_login(test_server.fake_url))
client.run('user will')
self.assertIn("Change 'default' user from john to will", client.user_io.out)
self.assertEqual(('will', None), client.localdb.get_login(test_server.fake_url))
client.run('user None')
self.assertIn("Change 'default' user from will to None (anonymous)", client.user_io.out)
self.assertEqual((None, None), client.localdb.get_login(test_server.fake_url))
client.run('user')
self.assertIn("Current 'default' user: None (anonymous)", client.user_io.out)
def test_command_user_with_password(self):
""" Checks the -p option, that obtains a token from the password.
Useful for integrations as travis, that interactive password is not
possible
"""
test_server = TestServer()
servers = {"default": test_server}
conan = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
conan.run('user dummy -p ping_pong2', ignore_error=True)
self.assertIn("ERROR: Wrong user or password", conan.user_io.out)
conan.run('user lasote -p mypass')
self.assertNotIn("ERROR: Wrong user or password", conan.user_io.out)
self.assertIn("Change 'default' user from None (anonymous) to lasote", conan.user_io.out)
conan.run('user none')
self.assertIn("Change 'default' user from lasote to None (anonymous)", conan.user_io.out)
self.assertEqual((None, None), conan.localdb.get_login(test_server.fake_url))
conan.run('user')
self.assertIn("Current 'default' user: None (anonymous)", conan.user_io.out)
def test_command_user_with_password_spaces(self):
""" Checks the -p option, that obtains a token from the password.
Useful for integrations as travis, that interactive password is not
possible
"""
test_server = TestServer(users={"lasote": 'my "password'})
servers = {"default": test_server}
conan = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
conan.run(r'user lasote -p="my \"password"')
self.assertNotIn("ERROR: Wrong user or password", conan.user_io.out)
self.assertIn("Change 'default' user from None (anonymous) to lasote", conan.user_io.out)
conan.run('user none')
conan.run(r'user lasote -p "my \"password"')
self.assertNotIn("ERROR: Wrong user or password", conan.user_io.out)
self.assertIn("Change 'default' user from None (anonymous) to lasote", conan.user_io.out)
def test_clean(self):
test_server = TestServer()
servers = {"default": test_server}
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
base = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "lib"
version = "0.1"
'''
files = {"conanfile.py": base}
client.save(files)
client.run("export lasote/stable")
client.run("upload lib/0.1@lasote/stable")
client.run("user")
self.assertIn("Current 'default' user: lasote", client.user_io.out)
client.run("user --clean")
client.run("user")
self.assertNotIn("lasote", client.user_io.out)
self.assertEqual("Current 'default' user: None (anonymous)\n", client.user_io.out)
client.run("upload lib/0.1@lasote/stable")
client.run("user")
self.assertIn("Current 'default' user: lasote", client.user_io.out)
| {
"content_hash": "c4eb80d5683dedadd3da872bfa0da378",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 97,
"avg_line_length": 45.589473684210525,
"alnum_prop": 0.638420688062803,
"repo_name": "Xaltotun/conan",
"id": "a03e10696238433474a52bfdcb41c0cfad6e0a3a",
"size": "4331",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "conans/test/command/user_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1565663"
},
{
"name": "Shell",
"bytes": "1148"
}
],
"symlink_target": ""
} |
"""The superclass of all handlers."""
import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
# Pattern from
# https://github.com/google/closure-library/blob/
# 3037e09cc471bfe99cb8f0ee22d9366583a20c28/closure/goog/html/safeurl.js
_SAFE_URL_PATTERN = re.compile(
r'^(?:(?:https?|mailto|ftp):|[^:/?#]*(?:[/?#]|$))', flags=re.IGNORECASE)
def add_jinja2_filter(name, fn):
_JINJA_ENVIRONMENT.filters[name] = fn
class JsonEncoder(json.JSONEncoder):
"""Json encoder."""
_EPOCH = datetime.datetime.utcfromtimestamp(0)
def default(self, o): # pylint: disable=arguments-differ,method-hidden
if isinstance(o, ndb.Model):
dict_obj = o.to_dict()
dict_obj['id'] = o.key.id()
return dict_obj
if isinstance(o, datetime.datetime):
return int((o - self._EPOCH).total_seconds())
if hasattr(o, 'to_dict'):
return o.to_dict()
if isinstance(o, cgi.FieldStorage):
return str(o)
if isinstance(o, bytes):
return o.decode('utf-8')
if isinstance(o, jira.resources.Resource):
if o.raw:
return o.raw
return json.JSONEncoder.default(self, o)
def format_time(dt):
"""Format datetime object for display."""
return '{t.day} {t:%b} {t:%y} {t:%X} PDT'.format(t=dt)
def splitlines(text):
"""Split text into lines."""
return text.splitlines()
def split_br(text):
return re.split(r'\s*<br */>\s*', text, flags=re.IGNORECASE)
def encode_json(value):
"""Dump base64-encoded JSON string (to avoid XSS)."""
return base64.b64encode(json.dumps(
value, cls=JsonEncoder).encode('utf-8')).decode('utf-8')
_JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), '..', 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
_MENU_ITEMS = []
add_jinja2_filter('json', encode_json)
add_jinja2_filter('format_time', format_time)
add_jinja2_filter('splitlines', splitlines)
add_jinja2_filter('split_br', split_br)
add_jinja2_filter('polymer_tag', lambda v: '{{%s}}' % v)
def add_menu(name, href):
"""Add menu item to the main navigation."""
_MENU_ITEMS.append(_MenuItem(name, href))
def make_login_url(dest_url):
"""Make the switch account url."""
return '/login?' + urllib.parse.urlencode({'dest': dest_url})
def make_logout_url(dest_url):
"""Make the switch account url."""
return '/logout?' + urllib.parse.urlencode({
'csrf_token': form.generate_csrf_token(),
'dest': dest_url,
})
def check_redirect_url(url):
"""Check redirect URL is safe."""
if not _SAFE_URL_PATTERN.match(url):
raise helpers.EarlyExitException('Invalid redirect.', 403)
class _MenuItem(object):
"""A menu item used for rendering an item in the main navigation."""
def __init__(self, name, href):
self.name = name
self.href = href
class Handler(MethodView):
"""A superclass for all handlers. It contains many convenient methods."""
def is_cron(self):
"""Return true if the request is from a cron job."""
return bool(request.headers.get('X-Appengine-Cron'))
def should_render_json(self):
return (self.is_json or
'application/json' in request.headers.get('accept', ''))
def render_forbidden(self, message):
"""Write HTML response for 403."""
login_url = make_login_url(dest_url=request.url)
user_email = helpers.get_user_email()
if not user_email:
return self.redirect(login_url)
contact_string = db_config.get_value('contact_string')
template_values = {
'message': message,
'user_email': helpers.get_user_email(),
'login_url': login_url,
'switch_account_url': login_url,
'logout_url': make_logout_url(dest_url=request.url),
'contact_string': contact_string,
}
return self.render('error-403.html', template_values, 403)
def _add_security_response_headers(self, response):
"""Add security-related headers to response."""
response.headers['Strict-Transport-Security'] = (
'max-age=2592000; includeSubdomains')
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'deny'
return response
def render(self, path, values=None, status=200):
"""Write HTML response."""
if values is None:
values = {}
values['menu_items'] = _MENU_ITEMS
values['is_oss_fuzz'] = utils.is_oss_fuzz()
values['is_development'] = (
environment.is_running_on_app_engine_development())
values['is_logged_in'] = bool(helpers.get_user_email())
# Only track analytics for non-admin users.
values['ga_tracking_id'] = (
local_config.GAEConfig().get('ga_tracking_id')
if not auth.is_current_user_admin() else None)
if values['is_logged_in']:
values['switch_account_url'] = make_login_url(request.url)
values['logout_url'] = make_logout_url(dest_url=request.url)
template = _JINJA_ENVIRONMENT.get_template(path)
response = Response()
response = self._add_security_response_headers(response)
response.headers['Content-Type'] = 'text/html'
response.data = template.render(values)
response.status_code = status
return response
# pylint: disable=unused-argument
def before_render_json(self, values, status):
"""A hook for modifying values before render_json."""
def render_json(self, values, status=200):
"""Write JSON response."""
response = Response()
response = self._add_security_response_headers(response)
response.headers['Content-Type'] = 'application/json'
self.before_render_json(values, status)
response.data = json.dumps(values, cls=JsonEncoder)
response.status_code = status
return response
def handle_exception(self, exception):
"""Catch exception and format it properly."""
try:
status = 500
values = {
'message': str(exception),
'email': helpers.get_user_email(),
'traceDump': traceback.format_exc(),
'status': status,
'type': exception.__class__.__name__
}
if isinstance(exception, helpers.EarlyExitException):
status = exception.status
values = exception.to_dict()
# 4XX is not our fault. Therefore, we hide the trace dump and log on
# the INFO level.
if 400 <= status <= 499:
logging.info(json.dumps(values, cls=JsonEncoder))
del values['traceDump']
else: # Other error codes should be logged with the EXCEPTION level.
logging.exception(exception)
if self.should_render_json():
return self.render_json(values, status)
if status in (403, 401):
return self.render_forbidden(str(exception))
return self.render('error.html', values, status)
except Exception:
self.handle_exception_exception()
return None
def handle_exception_exception(self):
"""Catch exception in handle_exception and format it properly."""
exception = sys.exc_info()[1]
values = {'message': str(exception), 'traceDump': traceback.format_exc()}
logging.exception(exception)
if self.should_render_json():
return self.render_json(values, 500)
return self.render('error.html', values, 500)
def redirect(self, url, **kwargs):
"""Check vaid url and redirect to it, if valid."""
url = str(url)
check_redirect_url(url)
return flask_redirect(url, **kwargs)
def dispatch_request(self, *args, **kwargs):
"""Dispatch a request and postprocess."""
self.is_json = False
try:
return super(Handler, self).dispatch_request(*args, **kwargs)
except Exception as exception:
return self.handle_exception(exception)
class GcsUploadHandler(Handler):
"""A handler which uploads files to GCS."""
def dispatch_request(self, *args, **kwargs):
"""Dispatch a request and postprocess."""
self.upload = None
return super().dispatch_request(*args, **kwargs)
def get_upload(self):
"""Get uploads."""
if self.upload:
return self.upload
upload_key = request.get('upload_key')
if not upload_key:
return None
blob_info = storage.GcsBlobInfo.from_key(upload_key)
if not blob_info:
raise helpers.EarlyExitException('Failed to upload.', 500)
self.upload = blob_info
return self.upload
| {
"content_hash": "ae9776e6458d9a31031de468b66f25a7",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 77,
"avg_line_length": 30.584192439862544,
"alnum_prop": 0.6683146067415731,
"repo_name": "google/clusterfuzz",
"id": "1f1e4daf9432f33e7df018c6c0c52bb068b8acfb",
"size": "9475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/appengine/handlers/base_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
} |
"""Nova common internal object model"""
import collections
import copy
import functools
import netaddr
from oslo import messaging
import six
from nova import context
from nova import exception
from nova.objects import fields
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
LOG = logging.getLogger('object')
class NotSpecifiedSentinel:
pass
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_%s' % name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
self._changed_fields.add(name)
try:
return setattr(self, get_attrname(name),
field.coerce(self, name, value))
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_('Error setting %(attr)s') %
{'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This will be set in the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
else:
# Add the subclass to NovaObject._obj_classes
make_class_properties(cls)
cls._obj_classes[cls.obj_name()].append(cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
# Force this to be set if it wasn't before.
self._context = ctxt
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
self[key] = field.from_primitive(self, key, value)
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
self[key] = kwargs[key]
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_('Unable to instantiate unregistered object type '
'%(objtype)s') % dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
latest = None
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
version_bits = tuple([int(x) for x in objclass.VERSION.split(".")])
if latest is None:
latest = version_bits
elif latest < version_bits:
latest = version_bits
if versionutils.is_compatible(objver, objclass.VERSION):
compatible_match = objclass
if compatible_match:
return compatible_match
latest_ver = '%i.%i' % latest
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object.
:param:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason.
"""
pass
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError('Cannot save anything in the base class')
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(self[field], NovaObject) and
self[field].obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = self[key]
return changes
def obj_reset_changes(self, fields=None):
"""Reset the list of fields that have been changed.
Note that this is NOT "revert to previous values"
"""
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
# dictish syntactic sugar
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())
def __getitem__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return getattr(self, name)
def __setitem__(self, name, value):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
setattr(self, name, value)
def __contains__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
try:
return self.obj_attr_is_set(name)
except AttributeError:
return False
def get(self, key, value=NotSpecifiedSentinel):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
if key not in self.obj_fields:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__, key))
if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key):
return value
else:
return self[key]
def update(self, updates):
"""For backwards-compatibility with dict-base objects.
NOTE(danms): May be removed in the future.
"""
for key, value in updates.items():
self[key] = value
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persisent objects.
"""
fields = {
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.BooleanField(default=False),
}
class ObjectListBase(object):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': fields.ListOfObjectsField('NovaObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __iter__(self):
"""List iterator interface."""
return iter(self.objects)
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a NovaObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
"""List membership test."""
return value in self.objects
def count(self, value):
"""List count of value occurrences."""
return self.objects.count(value)
def index(self, value):
"""List index of value."""
return self.objects.index(value)
def sort(self, cmp=None, key=None, reverse=False):
self.objects.sort(cmp=cmp, key=key, reverse=reverse)
def _attr_objects_to_primitive(self):
"""Serialization of object list."""
return [x.obj_to_primitive() for x in self.objects]
def _attr_objects_from_primitive(self, value):
"""Deserialization of object list."""
objects = []
for entity in value:
obj = NovaObject.obj_from_primitive(entity, context=self._context)
objects.append(obj)
return objects
def obj_make_compatible(self, primitive, target_version):
primitives = primitive['objects']
child_target_version = self.child_versions.get(target_version, '1.0')
for index, item in enumerate(self.objects):
self.objects[index].obj_make_compatible(
primitives[index]['nova_object.data'],
child_target_version)
primitives[index]['nova_object.version'] = child_target_version
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if iterable == set:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key, value in obj.iteritems():
result[key] = obj_to_primitive(value)
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request contextr
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
| {
"content_hash": "e773af6d4003922224befd695a700cb9",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 79,
"avg_line_length": 37.1559934318555,
"alnum_prop": 0.6057981262153085,
"repo_name": "nkrinner/nova",
"id": "c453fa011b7cf4e6276a4ee078d7e16aa12e7115",
"size": "23233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import numpy as np
import sys
def build_parameter_grid( param_grid ) :
print "Building the parameter grid..."
import ast
return [ {k: ast.literal_eval(v) for k,v in param_grid.iteritems()} ]
def grid_search( pipeline, param_grid, X_train, y_train, X_test, y_test ) :
'''
|
| Execute a grid search over the parameter grid for a
| given pipeline, and print results to stdout
|
'''
from sklearn.grid_search import GridSearchCV
from allpipeline.utils.model_info import print_model_scores
import time
gs = GridSearchCV(pipeline, param_grid, n_jobs = 1)
# Train the model on the training set
print "Running grid search..."
start_time = time.time()
gs.fit(X_train, y_train)
time_taken = time.time() - start_time
print "Finished grid search. Took", time_taken, "seconds"
print
print "Best score:", gs.best_score_
print "Best parameters set:",
best_parameters = gs.best_estimator_.get_params()
param_names = reduce(lambda x, y : x | y,
(p_grid.viewkeys()
for p_grid in param_grid))
for param_name in sorted(param_names):
print param_name, ":", best_parameters[param_name]
print
print "Scores for each parameter combination:"
# Splits into a subcycle of something like groups of 3 subsets to
# test out the parameters in the grid and select a final best
# mean.
# http://scikit-learn.org/stable/modules/cross_validation.html
for grid_score in gs.grid_scores_:
print grid_score
print
print_model_scores(gs, X_train, y_train, X_test, y_test)
| {
"content_hash": "bc1cbf6fbdf2fbe96dd2e50654527277",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 33,
"alnum_prop": 0.6406060606060606,
"repo_name": "cavestruz/StrongCNN",
"id": "c77aadcd5970c79e4a37e3c1499d22b6d725eb16",
"size": "1650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allpipeline/grid_searches/_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7822"
},
{
"name": "Python",
"bytes": "94637"
},
{
"name": "Shell",
"bytes": "4089"
}
],
"symlink_target": ""
} |
"""
LRU Cache for Image Data
"""
import hashlib
from oslo.config import cfg
from glance.common import exception
from glance.common import utils
from glance.openstack.common import importutils
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
image_cache_opts = [
cfg.StrOpt('image_cache_driver', default='sqlite',
help=_('The driver to use for image cache management.')),
cfg.IntOpt('image_cache_max_size', default=10 * (1024 ** 3), # 10 GB
help=_('The maximum size in bytes that the cache can use.')),
cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours
help=_('The amount of time to let an image remain in the '
'cache without being accessed')),
cfg.StrOpt('image_cache_dir',
help=_('Base directory that the Image Cache uses.')),
]
CONF = cfg.CONF
CONF.register_opts(image_cache_opts)
class ImageCache(object):
"""Provides an LRU cache for image data."""
def __init__(self):
self.init_driver()
def init_driver(self):
"""
Create the driver for the cache
"""
driver_name = CONF.image_cache_driver
driver_module = (__name__ + '.drivers.' + driver_name + '.Driver')
try:
self.driver_class = importutils.import_class(driver_module)
LOG.info(_("Image cache loaded driver '%s'.") %
driver_name)
except ImportError as import_err:
LOG.warn(_("Image cache driver "
"'%(driver_name)s' failed to load. "
"Got error: '%(import_err)s.") % locals())
driver_module = __name__ + '.drivers.sqlite.Driver'
LOG.info(_("Defaulting to SQLite driver."))
self.driver_class = importutils.import_class(driver_module)
self.configure_driver()
def configure_driver(self):
"""
Configure the driver for the cache and, if it fails to configure,
fall back to using the SQLite driver which has no odd dependencies
"""
try:
self.driver = self.driver_class()
self.driver.configure()
except exception.BadDriverConfiguration as config_err:
driver_module = self.driver_class.__module__
LOG.warn(_("Image cache driver "
"'%(driver_module)s' failed to configure. "
"Got error: '%(config_err)s") % locals())
LOG.info(_("Defaulting to SQLite driver."))
default_module = __name__ + '.drivers.sqlite.Driver'
self.driver_class = importutils.import_class(default_module)
self.driver = self.driver_class()
self.driver.configure()
def is_cached(self, image_id):
"""
Returns True if the image with the supplied ID has its image
file cached.
:param image_id: Image ID
"""
return self.driver.is_cached(image_id)
def is_queued(self, image_id):
"""
Returns True if the image identifier is in our cache queue.
:param image_id: Image ID
"""
return self.driver.is_queued(image_id)
def get_cache_size(self):
"""
Returns the total size in bytes of the image cache.
"""
return self.driver.get_cache_size()
def get_hit_count(self, image_id):
"""
Return the number of hits that an image has
:param image_id: Opaque image identifier
"""
return self.driver.get_hit_count(image_id)
def get_cached_images(self):
"""
Returns a list of records about cached images.
"""
return self.driver.get_cached_images()
def delete_all_cached_images(self):
"""
Removes all cached image files and any attributes about the images
and returns the number of cached image files that were deleted.
"""
return self.driver.delete_all_cached_images()
def delete_cached_image(self, image_id):
"""
Removes a specific cached image file and any attributes about the image
:param image_id: Image ID
"""
self.driver.delete_cached_image(image_id)
def delete_all_queued_images(self):
"""
Removes all queued image files and any attributes about the images
and returns the number of queued image files that were deleted.
"""
return self.driver.delete_all_queued_images()
def delete_queued_image(self, image_id):
"""
Removes a specific queued image file and any attributes about the image
:param image_id: Image ID
"""
self.driver.delete_queued_image(image_id)
def prune(self):
"""
Removes all cached image files above the cache's maximum
size. Returns a tuple containing the total number of cached
files removed and the total size of all pruned image files.
"""
max_size = CONF.image_cache_max_size
current_size = self.driver.get_cache_size()
if max_size > current_size:
LOG.debug(_("Image cache has free space, skipping prune..."))
return (0, 0)
overage = current_size - max_size
LOG.debug(_("Image cache currently %(overage)d bytes over max "
"size. Starting prune to max size of %(max_size)d ") %
locals())
total_bytes_pruned = 0
total_files_pruned = 0
entry = self.driver.get_least_recently_accessed()
while entry and current_size > max_size:
image_id, size = entry
LOG.debug(_("Pruning '%(image_id)s' to free %(size)d bytes"),
{'image_id': image_id, 'size': size})
self.driver.delete_cached_image(image_id)
total_bytes_pruned = total_bytes_pruned + size
total_files_pruned = total_files_pruned + 1
current_size = current_size - size
entry = self.driver.get_least_recently_accessed()
LOG.debug(_("Pruning finished pruning. "
"Pruned %(total_files_pruned)d and "
"%(total_bytes_pruned)d.") % locals())
return total_files_pruned, total_bytes_pruned
def clean(self, stall_time=None):
"""
Cleans up any invalid or incomplete cached images. The cache driver
decides what that means...
"""
self.driver.clean(stall_time)
def queue_image(self, image_id):
"""
This adds a image to be cache to the queue.
If the image already exists in the queue or has already been
cached, we return False, True otherwise
:param image_id: Image ID
"""
return self.driver.queue_image(image_id)
def get_caching_iter(self, image_id, image_checksum, image_iter):
"""
Returns an iterator that caches the contents of an image
while the image contents are read through the supplied
iterator.
:param image_id: Image ID
:param image_checksum: checksum expected to be generated while
iterating over image data
:param image_iter: Iterator that will read image contents
"""
if not self.driver.is_cacheable(image_id):
return image_iter
LOG.debug(_("Tee'ing image '%s' into cache"), image_id)
return self.cache_tee_iter(image_id, image_iter, image_checksum)
def cache_tee_iter(self, image_id, image_iter, image_checksum):
try:
current_checksum = hashlib.md5()
with self.driver.open_for_write(image_id) as cache_file:
for chunk in image_iter:
try:
cache_file.write(chunk)
finally:
current_checksum.update(chunk)
yield chunk
cache_file.flush()
if (image_checksum and
image_checksum != current_checksum.hexdigest()):
msg = _("Checksum verification failed. Aborted "
"caching of image '%s'.") % image_id
raise exception.GlanceException(msg)
except exception.GlanceException as e:
# image_iter has given us bad, (size_checked_iter has found a
# bad length), or corrupt data (checksum is wrong).
LOG.exception(e)
raise
except Exception as e:
LOG.exception(_("Exception encountered while tee'ing "
"image '%s' into cache: %s. Continuing "
"with response.") % (image_id, e))
# If no checksum provided continue responding even if
# caching failed.
for chunk in image_iter:
yield chunk
def cache_image_iter(self, image_id, image_iter, image_checksum=None):
"""
Cache an image with supplied iterator.
:param image_id: Image ID
:param image_file: Iterator retrieving image chunks
:param image_checksum: Checksum of image
:retval True if image file was cached, False otherwise
"""
if not self.driver.is_cacheable(image_id):
return False
for chunk in self.get_caching_iter(image_id, image_checksum,
image_iter):
pass
return True
def cache_image_file(self, image_id, image_file):
"""
Cache an image file.
:param image_id: Image ID
:param image_file: Image file to cache
:retval True if image file was cached, False otherwise
"""
CHUNKSIZE = 64 * 1024 * 1024
return self.cache_image_iter(image_id,
utils.chunkiter(image_file, CHUNKSIZE))
def open_for_read(self, image_id):
"""
Open and yield file for reading the image file for an image
with supplied identifier.
:note Upon successful reading of the image file, the image's
hit count will be incremented.
:param image_id: Image ID
"""
return self.driver.open_for_read(image_id)
def get_image_size(self, image_id):
"""
Return the size of the image file for an image with supplied
identifier.
:param image_id: Image ID
"""
return self.driver.get_image_size(image_id)
def get_queued_images(self):
"""
Returns a list of image IDs that are in the queue. The
list should be sorted by the time the image ID was inserted
into the queue.
"""
return self.driver.get_queued_images()
| {
"content_hash": "1d0368bb5b92141c547934f783e9ad4a",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 79,
"avg_line_length": 34.86407766990291,
"alnum_prop": 0.5748630836350135,
"repo_name": "ntt-sic/glance",
"id": "4f6188242e2d8b41e4915abf11b7621ac5039f30",
"size": "11448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/image_cache/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2469537"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_time_facts
short_description: Get time objects facts on Check Point over Web Services API
description:
- Get time objects facts on Check Point devices.
- All operations are performed over Web Services API.
- This module handles both operations, get a specific object and get several objects,
For getting a specific object use the parameter 'name'.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
This parameter is relevant only for getting a specific object.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
limit:
description:
- No more than that many results will be returned.
This parameter is relevant only for getting few objects.
type: int
offset:
description:
- Skip that many results before beginning to return them.
This parameter is relevant only for getting few objects.
type: int
order:
description:
- Sorts results by the given field. By default the results are sorted in the ascending order by name.
This parameter is relevant only for getting few objects.
type: list
suboptions:
ASC:
description:
- Sorts results by the given field in ascending order.
type: str
choices: ['name']
DESC:
description:
- Sorts results by the given field in descending order.
type: str
choices: ['name']
extends_documentation_fragment: checkpoint_facts
"""
EXAMPLES = """
- name: show-time
cp_mgmt_time_facts:
name: timeObject1
- name: show-times
cp_mgmt_time_facts:
details_level: standard
limit: 50
offset: 0
"""
RETURN = """
ansible_facts:
description: The checkpoint object facts.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_facts, api_call_facts
def main():
argument_spec = dict(
name=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
limit=dict(type='int'),
offset=dict(type='int'),
order=dict(type='list', options=dict(
ASC=dict(type='str', choices=['name']),
DESC=dict(type='str', choices=['name'])
))
)
argument_spec.update(checkpoint_argument_spec_for_facts)
module = AnsibleModule(argument_spec=argument_spec)
api_call_object = "time"
api_call_object_plural_version = "times"
result = api_call_facts(module, api_call_object, api_call_object_plural_version)
module.exit_json(ansible_facts=result)
if __name__ == '__main__':
main()
| {
"content_hash": "fa32789d20f26cf394728593dd2015db",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 141,
"avg_line_length": 29.813084112149532,
"alnum_prop": 0.6579937304075235,
"repo_name": "thaim/ansible",
"id": "119da09e583cbb400407147f8000489fa3a22fff",
"size": "3915",
"binary": false,
"copies": "19",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/check_point/cp_mgmt_time_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""Runs the WebDriver Java acceptance tests.
This script is called from chrome/test/chromedriver/run_all_tests.py and reports
results using the buildbot annotation scheme.
For ChromeDriver documentation, refer to http://code.google.com/p/chromedriver.
"""
import optparse
import os
import shutil
import sys
import xml.dom.minidom as minidom
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir))
import chrome_paths
import test_environment
import util
if util.IsLinux():
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
from pylib import constants
class TestResult(object):
"""A result for an attempted single test case."""
def __init__(self, name, time, failure):
"""Initializes a test result.
Args:
name: the full name of the test.
time: the amount of time the test ran, in seconds.
failure: the test error or failure message, or None if the test passed.
"""
self._name = name
self._time = time
self._failure = failure
def GetName(self):
"""Returns the test name."""
return self._name
def GetTime(self):
"""Returns the time it took to run the test."""
return self._time
def IsPass(self):
"""Returns whether the test passed."""
return self._failure is None
def GetFailureMessage(self):
"""Returns the test failure message, or None if the test passed."""
return self._failure
def _Run(java_tests_src_dir, test_filter,
chromedriver_path, chrome_path, log_path, android_package_key,
verbose, debug):
"""Run the WebDriver Java tests and return the test results.
Args:
java_tests_src_dir: the java test source code directory.
test_filter: the filter to use when choosing tests to run. Format is same
as Google C++ Test format.
chromedriver_path: path to ChromeDriver exe.
chrome_path: path to Chrome exe.
log_path: path to server log.
android_package_key: name of Chrome's Android package.
verbose: whether the output should be verbose.
debug: whether the tests should wait until attached by a debugger.
Returns:
A list of |TestResult|s.
"""
test_dir = util.MakeTempDir()
keystore_path = ('java', 'client', 'test', 'keystore')
required_dirs = [keystore_path[:-1],
('javascript',),
('third_party', 'closure', 'goog'),
('third_party', 'js')]
for required_dir in required_dirs:
os.makedirs(os.path.join(test_dir, *required_dir))
test_jar = 'test-standalone.jar'
class_path = test_jar
shutil.copyfile(os.path.join(java_tests_src_dir, 'keystore'),
os.path.join(test_dir, *keystore_path))
util.Unzip(os.path.join(java_tests_src_dir, 'common.zip'), test_dir)
shutil.copyfile(os.path.join(java_tests_src_dir, test_jar),
os.path.join(test_dir, test_jar))
sys_props = ['selenium.browser=chrome',
'webdriver.chrome.driver=' + os.path.abspath(chromedriver_path)]
if chrome_path:
sys_props += ['webdriver.chrome.binary=' + os.path.abspath(chrome_path)]
if log_path:
sys_props += ['webdriver.chrome.logfile=' + log_path]
if android_package_key:
android_package = constants.PACKAGE_INFO[android_package_key].package
sys_props += ['webdriver.chrome.android_package=' + android_package]
if android_package_key == 'chromedriver_webview_shell':
android_activity = constants.PACKAGE_INFO[android_package_key].activity
android_process = '%s:main' % android_package
sys_props += ['webdriver.chrome.android_activity=' + android_activity]
sys_props += ['webdriver.chrome.android_process=' + android_process]
if test_filter:
# Test jar actually takes a regex. Convert from glob.
test_filter = test_filter.replace('*', '.*')
sys_props += ['filter=' + test_filter]
jvm_args = []
if debug:
transport = 'dt_socket'
if util.IsWindows():
transport = 'dt_shmem'
jvm_args += ['-agentlib:jdwp=transport=%s,server=y,suspend=y,'
'address=33081' % transport]
# Unpack the sources into the test directory and add to the class path
# for ease of debugging, particularly with jdb.
util.Unzip(os.path.join(java_tests_src_dir, 'test-nodeps-srcs.jar'),
test_dir)
class_path += ':' + test_dir
return _RunAntTest(
test_dir, 'org.openqa.selenium.chrome.ChromeDriverTests',
class_path, sys_props, jvm_args, verbose)
def _RunAntTest(test_dir, test_class, class_path, sys_props, jvm_args, verbose):
"""Runs a single Ant JUnit test suite and returns the |TestResult|s.
Args:
test_dir: the directory to run the tests in.
test_class: the name of the JUnit test suite class to run.
class_path: the Java class path used when running the tests, colon delimited
sys_props: Java system properties to set when running the tests.
jvm_args: Java VM command line args to use.
verbose: whether the output should be verbose.
Returns:
A list of |TestResult|s.
"""
def _CreateBuildConfig(test_name, results_file, class_path, junit_props,
sys_props, jvm_args):
def _SystemPropToXml(prop):
key, value = prop.split('=')
return '<sysproperty key="%s" value="%s"/>' % (key, value)
def _JvmArgToXml(arg):
return '<jvmarg value="%s"/>' % arg
return '\n'.join([
'<project>',
' <target name="test">',
' <junit %s>' % ' '.join(junit_props),
' <formatter type="xml"/>',
' <classpath>',
' <pathelement path="%s"/>' % class_path,
' </classpath>',
' ' + '\n '.join(map(_SystemPropToXml, sys_props)),
' ' + '\n '.join(map(_JvmArgToXml, jvm_args)),
' <test name="%s" outfile="%s"/>' % (test_name, results_file),
' </junit>',
' </target>',
'</project>'])
def _ProcessResults(results_path):
doc = minidom.parse(results_path)
tests = []
for test in doc.getElementsByTagName('testcase'):
name = test.getAttribute('classname') + '.' + test.getAttribute('name')
time = test.getAttribute('time')
failure = None
error_nodes = test.getElementsByTagName('error')
failure_nodes = test.getElementsByTagName('failure')
if error_nodes:
failure = error_nodes[0].childNodes[0].nodeValue
elif failure_nodes:
failure = failure_nodes[0].childNodes[0].nodeValue
tests += [TestResult(name, time, failure)]
return tests
junit_props = ['printsummary="yes"',
'fork="yes"',
'haltonfailure="no"',
'haltonerror="no"']
if verbose:
junit_props += ['showoutput="yes"']
ant_file = open(os.path.join(test_dir, 'build.xml'), 'w')
ant_file.write(_CreateBuildConfig(
test_class, 'results', class_path, junit_props, sys_props, jvm_args))
ant_file.close()
if util.IsWindows():
ant_name = 'ant.bat'
else:
ant_name = 'ant'
code = util.RunCommand([ant_name, 'test'], cwd=test_dir)
if code != 0:
print 'FAILED to run java tests of %s through ant' % test_class
return
return _ProcessResults(os.path.join(test_dir, 'results.xml'))
def PrintTestResults(results):
"""Prints the given results in a format recognized by the buildbot."""
failures = []
failure_names = []
for result in results:
if not result.IsPass():
failures += [result]
failure_names += ['.'.join(result.GetName().split('.')[-2:])]
print 'Ran %s tests' % len(results)
print 'Failed %s:' % len(failures)
util.AddBuildStepText('failed %s/%s' % (len(failures), len(results)))
for result in failures:
print '=' * 80
print '=' * 10, result.GetName(), '(%ss)' % result.GetTime()
print result.GetFailureMessage()
if len(failures) < 10:
util.AddBuildStepText('.'.join(result.GetName().split('.')[-2:]))
print 'Rerun failing tests with filter:', ':'.join(failure_names)
return len(failures)
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--verbose', action='store_true', default=False,
help='Whether output should be verbose')
parser.add_option(
'', '--debug', action='store_true', default=False,
help='Whether to wait to be attached by a debugger')
parser.add_option(
'', '--chromedriver', type='string', default=None,
help='Path to a build of the chromedriver library(REQUIRED!)')
parser.add_option(
'', '--chrome', type='string', default=None,
help='Path to a build of the chrome binary')
parser.add_option(
'', '--log-path',
help='Output verbose server logs to this file')
parser.add_option(
'', '--chrome-version', default='HEAD',
help='Version of chrome. Default is \'HEAD\'')
parser.add_option(
'', '--android-package', help='Android package key')
parser.add_option(
'', '--filter', type='string', default=None,
help='Filter for specifying what tests to run, "*" will run all. E.g., '
'*testShouldReturnTitleOfPageIfSet')
parser.add_option(
'', '--also-run-disabled-tests', action='store_true', default=False,
help='Include disabled tests while running the tests')
parser.add_option(
'', '--isolate-tests', action='store_true', default=False,
help='Relaunch the jar test harness after each test')
options, _ = parser.parse_args()
options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
if options.chromedriver is None or not os.path.exists(options.chromedriver):
parser.error('chromedriver is required or the given path is invalid.' +
'Please run "%s --help" for help' % __file__)
if options.android_package:
if options.android_package not in constants.PACKAGE_INFO:
parser.error('Invalid --android-package')
if options.chrome_version != 'HEAD':
parser.error('Android does not support the --chrome-version argument.')
environment = test_environment.AndroidTestEnvironment(
options.android_package)
else:
environment = test_environment.DesktopTestEnvironment(
options.chrome_version)
try:
environment.GlobalSetUp()
# Run passed tests when filter is not provided.
if options.isolate_tests:
test_filters = environment.GetPassedJavaTests()
else:
if options.filter:
test_filter = options.filter
else:
test_filter = '*'
if not options.also_run_disabled_tests:
if '-' in test_filter:
test_filter += ':'
else:
test_filter += '-'
test_filter += ':'.join(environment.GetDisabledJavaTestMatchers())
test_filters = [test_filter]
java_tests_src_dir = os.path.join(chrome_paths.GetSrc(), 'chrome', 'test',
'chromedriver', 'third_party',
'java_tests')
if (not os.path.exists(java_tests_src_dir) or
not os.listdir(java_tests_src_dir)):
java_tests_url = ('https://chromium.googlesource.com/chromium/deps'
'/webdriver')
print ('"%s" is empty or it doesn\'t exist. ' % java_tests_src_dir +
'Need to map ' + java_tests_url + ' to '
'chrome/test/chromedriver/third_party/java_tests in .gclient.\n'
'Alternatively, do:\n'
' $ cd chrome/test/chromedriver/third_party\n'
' $ git clone %s java_tests' % java_tests_url)
return 1
results = []
for filter in test_filters:
results += _Run(
java_tests_src_dir=java_tests_src_dir,
test_filter=filter,
chromedriver_path=options.chromedriver,
chrome_path=util.GetAbsolutePathOfUserPath(options.chrome),
log_path=options.log_path,
android_package_key=options.android_package,
verbose=options.verbose,
debug=options.debug)
return PrintTestResults(results)
finally:
environment.GlobalTearDown()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "d14890bab32a8b124c53bf76f40d0d8c",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 80,
"avg_line_length": 36.83586626139817,
"alnum_prop": 0.6309101411007508,
"repo_name": "was4444/chromium.src",
"id": "1caa8213fdf5175f5a8f01b25d2cc789c0b4243c",
"size": "12304",
"binary": false,
"copies": "4",
"ref": "refs/heads/nw15",
"path": "chrome/test/chromedriver/test/run_java_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from msrest.serialization import Model
class OperationResult(Model):
_required = []
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(self, *args, **kwargs):
"""OperationResult
:param str status: The status of the request. Possible values for this
property include: 'Succeeded', 'Failed', 'canceled', 'Accepted',
'Creating', 'Created', 'Updating', 'Updated', 'Deleting', 'Deleted',
'OK'.
"""
self.status = None
super(OperationResult, self).__init__(*args, **kwargs)
| {
"content_hash": "6c66fc5e23b5f4ba05022e416158e50b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 26.772727272727273,
"alnum_prop": 0.5738539898132428,
"repo_name": "vulcansteel/autorest",
"id": "501cb16b59fa064001104dac6f09a582b925ee72",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/Paging/auto_rest_paging_test_service/models/operation_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
} |
"""
Script to Summarize statistics in the scan-build output.
Statistics are enabled by passing '-internal-stats' option to scan-build
(or '-analyzer-stats' to the analyzer).
"""
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'scan_build_output_file'
sys.exit(-1)
f = open(sys.argv[1], 'r')
Time = 0.0
TotalTime = 0.0
MaxTime = 0.0
Warnings = 0
Count = 0
FunctionsAnalyzed = 0
ReachableBlocks = 0
ReachedMaxSteps = 0
NumSteps = 0
NumInlinedCallSites = 0
NumBifurcatedCallSites = 0
MaxCFGSize = 0
for line in f:
if ("Analyzer Total Time" in line):
s = line.split()
Time = Time + float(s[6])
Count = Count + 1
if (float(s[6]) > MaxTime):
MaxTime = float(s[6])
if ("warning generated." in line) or ("warnings generated" in line):
s = line.split()
Warnings = Warnings + int(s[0])
if "The # of functions analysed (as top level)" in line:
s = line.split()
FunctionsAnalyzed = FunctionsAnalyzed + int(s[0])
if "The % of reachable basic blocks" in line:
s = line.split()
ReachableBlocks = ReachableBlocks + int(s[0])
if "The # of times we reached the max number of steps" in line:
s = line.split()
ReachedMaxSteps = ReachedMaxSteps + int(s[0])
if "The maximum number of basic blocks in a function" in line:
s = line.split()
if MaxCFGSize < int(s[0]):
MaxCFGSize = int(s[0])
if "The # of steps executed" in line:
s = line.split()
NumSteps = NumSteps + int(s[0])
if "The # of times we inlined a call" in line:
s = line.split()
NumInlinedCallSites = NumInlinedCallSites + int(s[0])
if "The # of times we split the path due \
to imprecise dynamic dispatch info" in line:
s = line.split()
NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0])
if ") Total" in line:
s = line.split()
TotalTime = TotalTime + float(s[6])
print "TU Count %d" % (Count)
print "Time %f" % (Time)
print "Warnings %d" % (Warnings)
print "Functions Analyzed %d" % (FunctionsAnalyzed)
print "Reachable Blocks %d" % (ReachableBlocks)
print "Reached Max Steps %d" % (ReachedMaxSteps)
print "Number of Steps %d" % (NumSteps)
print "Number of Inlined calls %d (bifurcated %d)" % (
NumInlinedCallSites, NumBifurcatedCallSites)
print "MaxTime %f" % (MaxTime)
print "TotalTime %f" % (TotalTime)
print "Max CFG Size %d" % (MaxCFGSize)
| {
"content_hash": "12a692b9c7ea992505730ea39ad276a4",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 76,
"avg_line_length": 36.44155844155844,
"alnum_prop": 0.5602280826799715,
"repo_name": "youtube/cobalt",
"id": "50e1cb854f4eaa1a3ac14f5c524ff838282e927f",
"size": "2829",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/clang/utils/analyzer/SumTimerInfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''Cleanup files & directories under /tmp'''
import os
import shutil
import glob
dir_root = '/tmp/'
def cleanup_work_dir(dir_root=dir_root):
'''Cleanup directories & files under the root directory
Defaults to /tmp/ which is default working directory'''
listing = glob.glob(dir_root+'*')
for l in listing:
if os.path.isdir(l):
try:
shutil.rmtree(l)
except:
pass
else:
try:
os.remove(l)
except:
pass
| {
"content_hash": "847e550aa2d86044deeda732382494bd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.5330882352941176,
"repo_name": "ucldc/harvester",
"id": "677fa19aa353eca89a47c4b910a52e162f8e647a",
"size": "544",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "harvester/cleanup_dir.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "787"
},
{
"name": "Python",
"bytes": "608203"
},
{
"name": "Shell",
"bytes": "13701"
}
],
"symlink_target": ""
} |
"""Tests for embedding layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class EmbeddingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes(use_gpu=False)
def test_embedding(self):
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'input_length': 2},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True,
'input_length': (None, 2)},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
def test_embedding_correctness(self):
with self.cached_session():
layer = keras.layers.Embedding(output_dim=2, input_dim=2)
layer.build((None, 2))
matrix = np.array([[1, 1], [2, 2]])
layer.set_weights([matrix])
inputs = keras.backend.constant([[0, 1, 0]], dtype='int32')
outputs = keras.backend.eval(layer(inputs))
self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])
@tf_test_util.run_in_graph_and_eager_modes()
def test_eager_gpu_cpu(self):
l = keras.layers.Embedding(output_dim=2, input_dim=2)
l.build((None, 2))
inputs = keras.backend.constant([[0, 1, 0]], dtype='int32')
with backprop.GradientTape() as tape:
output = l(inputs)
gs = tape.gradient(output, l.weights)
opt = adagrad.AdagradOptimizer(0.1)
opt.apply_gradients(zip(gs, l.weights))
self.assertAllEqual(len(gs), 1)
if __name__ == '__main__':
test.main()
| {
"content_hash": "12db1a60e120d4a503dd9ef4261ad588",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 65,
"avg_line_length": 31.682926829268293,
"alnum_prop": 0.6070053887605851,
"repo_name": "girving/tensorflow",
"id": "2e42e403aa3815a8530b1755bb8b271a6fe3c96e",
"size": "3287",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/embeddings_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import importlib
import logging
# HACK:
# Sphinx-autoapi doesn't like imports to excluded packages in the main module.
conf = importlib.import_module('airflow.configuration').conf # type: ignore[attr-defined]
PROVIDERS_GOOGLE_VERBOSE_LOGGING: bool = conf.getboolean(
'providers_google', 'VERBOSE_LOGGING', fallback=False
)
if PROVIDERS_GOOGLE_VERBOSE_LOGGING:
for logger_name in ["google_auth_httplib2", "httplib2", "googleapiclient"]:
logger = logging.getLogger(logger_name)
logger.handlers += [
handler for handler in logging.getLogger().handlers if handler.name in ["task", "console"]
]
logger.level = logging.DEBUG
logger.propagate = False
import httplib2
httplib2.debuglevel = 4
| {
"content_hash": "626c5f5702fd21bd63a12a1de1031cec",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 102,
"avg_line_length": 34.40909090909091,
"alnum_prop": 0.702774108322325,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "de7c3eef9373fa50e83eb83226b2c656cdb31d97",
"size": "1542",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "airflow/providers/google/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
import StringIO
from nose.tools import *
from impactlib.refresh import extract_dependencies
def test_complex1():
fp = StringIO.StringIO("""
package RealTimeCoordinationLibrary
annotation (uses(Modelica(version="3.2"), RealTimeCoordinationLibrary(version=
"1.0.2"),
Modelica_StateGraph2(version="2.0.1")),
preferredView="info",
version="1.0.2",
versionBuild=1,
versionDate="2013-04-04",
dateModified = "2012-04-04",
revisionId="$Id:: package.mo 1 2013-04-04 16:18:47Z #$",
Documentation(info="<html>
<p><b>RealTimeCoordinationLibrary</b> is a <b>free</b> Modelica package providing components to model <b>real-time</b>, <b>reactive</b>, <b>hybrid</b> and, <b>asynchronous communicating</b> systems in a convenient way with <b>statecharts</b>.</p>
<p>For an introduction, have especially a look at: </p>
<p><ul>
<li><a href=\"modelica://RealTimeCoordinationLibrary.UsersGuide.Elements\">Elements</a> provide an overview of the library inside the User's Guide.</li>
<li><a href=\"modelica://RealTimeCoordinationLibrary.Examples\">Examples</a> provide simple introductory examples as well as involved application examples. </li>
</ul></p>
<p>For an application example have a look at: <a href=\"modelica://RealTimeCoordinationLibrary.Examples.Application.BeBotSystem\">BeBotSystem</a> </p>
<p><br/><b>Licensed under the Modelica License 2</b></p>
<p><i>This Modelica package is <u>free</u> software and the use is completely at <u>your own risk</u>; it can be redistributed and/or modified under the terms of the Modelica license 2, see the license conditions (including the disclaimer of warranty) <a href=\"modelica://RealTimeCoordinationLibrary.UsersGuide.ModelicaLicense2\">here</a> or at <a href=\"http://www.Modelica.org/licenses/ModelicaLicense2\">http://www.Modelica.org/licenses/ModelicaLicense2</a>.</i> </p>
</html>", revisions="<html>
<p>Name: RealTimeCoordinationLibrary</p>
<p>Path: RealTimeCoordinationLibrary</p>
<p>Version: 1.0.2, 2013-04-04, build 1 (2013-04-04)</p>
<p>Uses:Modelica (version="3.2"), RealTimeCoordinationLibrary (version="1.0.2"), Modelica_StateGraph2 (version="2.0.1")</p>
</html>"));
end RealTimeCoordinationLibrary;
""")
deps = extract_dependencies(fp)
print "deps = "+str(deps)
exp = [("Modelica", "3.2"),
("RealTimeCoordinationLibrary", "1.0.2"),
("Modelica_StateGraph2", "2.0.1")]
exp.sort(lambda x, y: cmp(x[0], y[0]))
deps.sort(lambda x, y: cmp(x[0], y[0]))
assert_equal(exp, deps)
| {
"content_hash": "4c201870524e1b9b2345652a62784211",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 471,
"avg_line_length": 58.81818181818182,
"alnum_prop": 0.6982225656877898,
"repo_name": "FND/impact",
"id": "0828694b4ab616476a8e17848e5cf49ffdf7e615",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/TestDependencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33164"
},
{
"name": "TeX",
"bytes": "27392"
}
],
"symlink_target": ""
} |
from transformer import *
from standard_game import *
from standard_player import *
from player_vectorizer import *
| {
"content_hash": "f5cdb11fe81868f3e10999c5e44c23ba",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 31,
"avg_line_length": 29,
"alnum_prop": 0.8017241379310345,
"repo_name": "claymcleod/pyffl",
"id": "1252deb3c51b4ef390b61f49d606703dabc8891d",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transformers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2305"
},
{
"name": "Makefile",
"bytes": "136"
},
{
"name": "Python",
"bytes": "13930"
}
],
"symlink_target": ""
} |
"""
lantz.ui.layouts
~~~~~~~~~~~~~~~~
Frontends to automatically locate widgets.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz.utils.qt import QtGui
from lantz.ui.app import Frontend
class _PanelsUi(Frontend):
"""The Loop frontend provides a GUI for the Rich Backend
"""
gui = 'placeholder.ui'
auto_connect = False
#: Tuple with the columns
#: Each element can be:
#: - Frontend class: will be connected to the default backend.
#: - Front2Back(Frontend class, backend name): will be connect to a specific backend.
#: - tuple: will be iterated to obtain the rows.
parts = ()
_inner, _outer = None, None
def _add(self, layout, parts):
"""Add widgets in parts to layout.
"""
for part_name in parts:
part = getattr(self, part_name)
if isinstance(part, Frontend):
layout.addWidget(part)
elif isinstance(part, tuple):
# A tuple found in parts is considered nesting
if isinstance(layout, self._inner):
sublayout = self._outer()
elif isinstance(layout, self._outer):
sublayout = self._inner()
else:
raise ValueError('Unknown parent layout %s' % layout)
self._add(sublayout, part)
layout.setLayout(sublayout)
else:
raise ValueError('Only Frontend or tuple are valid values '
'valid for parts not %s (%s)' % (part, type(part)))
def setupUi(self):
super().setupUi()
layout = self._outer()
self._add(layout, self.parts)
self.widget.placeholder.setLayout(layout)
class VerticalUi(_PanelsUi):
"""Uses a vertical box layout to locate widgets.
"""
_inner, _outer = QtGui.QHBoxLayout, QtGui.QVBoxLayout
class HorizonalUi(_PanelsUi):
"""Uses a horizontal box layout to locate widgets.
"""
_inner, _outer = QtGui.QVBoxLayout, QtGui.QHBoxLayout
| {
"content_hash": "ad3d88e0eec347642431fb25f37a753e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 91,
"avg_line_length": 27.974025974025974,
"alnum_prop": 0.5807799442896936,
"repo_name": "LabPy/lantz_qt",
"id": "192f199fd21880ffa9a9bc1593645f54b0ca296a",
"size": "2178",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lantz_qt/blocks/layouts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13856"
},
{
"name": "JavaScript",
"bytes": "11794"
},
{
"name": "Makefile",
"bytes": "5069"
},
{
"name": "Python",
"bytes": "145910"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
} |
import os
from time import time
from flask import Flask, request
from crypto import rsa, aes
from settings import APP_STATIC
from utils import json, get_session_key, SESSION_KEY_EXPIRATION_TIME
app = Flask(__name__)
db = {
'admin': {
'password': 'secret'
},
'bob': {
'password': 'foo'
},
'alice': {
'password': 'bar'
}
}
def key_expired(created_at):
return (time() - created_at) > SESSION_KEY_EXPIRATION_TIME
@app.route('/login', methods=['POST'])
def login():
body = request.get_json()
user, password = body.get('user'), body.get('password')
if not db.get(user, {}).get('password') == password:
return json(None, 'Incorrect login or password!'), 401
try:
pub_key = int(body['key']['e']), int(body['key']['n'])
except ValueError:
return json(None, 'PublicKey(e, n) should consist of two integers!'), 400
session_key = get_session_key(aes.KEY_LENGTH)
encrypted_key = rsa.encrypt(pub_key, session_key)
if __debug__:
print('SESSION_KEY:', session_key)
db[user]['session_key'] = session_key
db[user]['created_at'] = time()
return json({'sessionKey': encrypted_key})
@app.route('/data', methods=['POST'])
def get_data():
body = request.get_json()
user = db.get(body['user'], {})
if not user.get('session_key') or key_expired(user.get('created_at')):
return json(None, 'Session key has expired!'), 401
session_key = user['session_key']
with open(os.path.join(APP_STATIC, 'data.txt'), 'rb') as f:
data = list(f.read())
encrypted_data = aes.encrypt(data, session_key)
if __debug__:
decrypted_data = aes.decrypt(encrypted_data, session_key)
print('ENCRYPTION CORRECT:', bytes(decrypted_data).startswith(bytes(data)))
return json({'encrypted': encrypted_data})
"""
Private api for sharing crypto methods with client, to avoid code duplicating
(I don't want to implement algorithms also on javascript)
"""
@app.route('/private/rsa/generate', methods=['POST'])
def get_rsa_keys():
body = request.get_json()
try:
public, private = rsa.generate_keypair(int(body['p']), int(body['q']))
except ValueError as error:
return json(None, str(error)), 400
e, n = public
d, n = private
return json({'e': e, 'd': d, 'n': n})
@app.route('/private/rsa/decrypt', methods=['POST'])
def rsa_decrypt():
body = request.get_json()
try:
priv_key = int(body['key']['d']), int(body['key']['n'])
except ValueError:
return json(None, 'PrivateKey(d, n) should consist of two integers!')
data = body['data']
return json({'decrypted': rsa.decrypt(priv_key, data)})
@app.route('/private/aes/decrypt', methods=['POST'])
def aes_decrypt():
body = request.get_json()
encrypted, session_key = body['encrypted'], body['key']
decrypted_data = bytes(aes.decrypt(encrypted, session_key)).decode('utf-8', 'ignore')
return json({'text': decrypted_data})
if __name__ == '__main__':
app.run()
| {
"content_hash": "fb86eaaf41d14ef8077b06fe03ecfd34",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 89,
"avg_line_length": 28.462962962962962,
"alnum_prop": 0.6128822381262199,
"repo_name": "Drapegnik/bsu",
"id": "e87e7a9e7590454727b6aad75477036576a60ac6",
"size": "3074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptography/lab2/server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "20778"
},
{
"name": "C",
"bytes": "7115"
},
{
"name": "C++",
"bytes": "163407"
},
{
"name": "CMake",
"bytes": "2921"
},
{
"name": "CSS",
"bytes": "1254"
},
{
"name": "HTML",
"bytes": "12563"
},
{
"name": "Java",
"bytes": "221515"
},
{
"name": "JavaScript",
"bytes": "41111"
},
{
"name": "Makefile",
"bytes": "920"
},
{
"name": "Python",
"bytes": "141657"
},
{
"name": "R",
"bytes": "7974"
},
{
"name": "Shell",
"bytes": "4159"
},
{
"name": "TeX",
"bytes": "79081"
},
{
"name": "TypeScript",
"bytes": "28274"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
import json
import os
from monty.json import MontyDecoder
from pymatgen.analysis.defects.dilute_solution_model import *
import random
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
with open(
os.path.join(test_dir, 'mp1048_defect_formation_energies.json')) as fp:
formation_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1048_raw_defect_energies.json')) as fp:
raw_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1487_raw_defect_energies.json')) as fp:
mp1487_raw_energy_dict = json.load(fp, cls=MontyDecoder)
PULL_REQ = os.environ.get("CI_PULL_REQUEST", None) or os.environ.get("TRAVIS_PULL_REQUEST", None)
# TODO (from SP): You MUST redo this entire test. The whole test is
# monstrously slow. It takes more than 10 mins to get through this test alone.
@unittest.skipIf(PULL_REQ or random.randint(0, 10) % 10 != 0,
"Pull request or random skip.")
class DiluteSolutionModelTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = raw_energy_dict['bulk_energy']
self.asites = raw_energy_dict['antisites']
self.vac = raw_energy_dict['vacancies']
self.struct = raw_energy_dict['structure']
self.T = 600
self.trial_mu = formation_energy_dict[str(self.T)]['chemical_potential']
def test_formation_energies_without_chem_pot(self):
"""
Should generate formation energies without input chempot
"""
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_formation_energies_with_chem_pot(self):
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_plot_data_without_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
def test_plot_data_with_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
# print(plot_data['y'])
@unittest.skipIf(PULL_REQ or random.randint(0, 10) % 10 != 0,
"Pull request or random skip.")
class SoluteSiteFinderTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = mp1487_raw_energy_dict['bulk_energy']
self.asites = mp1487_raw_energy_dict['antisites']
self.vac = mp1487_raw_energy_dict['vacancies']
self.solutes = mp1487_raw_energy_dict['solutes']
self.struct = mp1487_raw_energy_dict['structure']
self.T = 1000
def test_plot_data_without_chem_pot(self):
plot_data = solute_site_preference_finder(
self.struct, self.e0, self.T, self.vac, self.asites, self.solutes,
solute_concen=0.01)
self.assertIsNotNone(plot_data)
def still_wait_plot_data_with_chem_pot(self):
plot_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(plot_data)
for key, value in plot_data.items():
self.assertIsNotNone(value)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4f772e98a350289290542fc92c2667a8",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 97,
"avg_line_length": 38.9327731092437,
"alnum_prop": 0.6324195985322685,
"repo_name": "johnson1228/pymatgen",
"id": "aa7fefbb73ed49933a34d15ce349066dd2fba8a4",
"size": "4743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/defects/tests/test_dilute_solution_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "4886182"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6064350"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
import sys
import json
import bson
import cbor
# Get filename base (without extension) from argument list
fname = sys.argv[1]
# Open the JSON version of the file, and a new file
# to hold the BSON version.
fin = open(fname + '.json', 'r')
fout = open(fname + '.bson', 'wb')
# Parse the JSON string from the file into
# a Python dictionary object
data = json.load(fin)
# Write the object to the file in BSON
fout.write(bson.serialize_to_bytes(data))
# Close both files
fin.close()
fout.close()
# Open a new file for writing out the CBOR encoding
fout = open(fname + '.cbor', 'wb')
# Use CBOR codec to write to
cbor.dump(data, fout)
# Close the CBOR file
fout.close()
# Open the BSON version in read-only mode, and a new file
# for the roundtrip JSON output.
fin = open(fname + '.bson', 'rb')
fout = open(fname + '-roundtrip.json', 'w')
# Parse the BSON file into a Python dictionary object
data = bson.parse_stream(fin)
# Dump the dictionary object out in JSON format
json.dump(data, fout)
# Close both files
fin.close()
fout.close()
# #print('j2b.py: writing to ' + fname + '.bson')
# f.close()
# f = open(fname + '.bs')
# f2 = open(fname + '-roundtrip.bson', 'w')
# parsed_from_bson = bson.parse_stream(f2)
# | {
"content_hash": "4a008abe33dfda3a99d6ea60c58c6690",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 58,
"avg_line_length": 21.17241379310345,
"alnum_prop": 0.6864820846905537,
"repo_name": "hillbw/exi-test",
"id": "f468f3fbaf7cebcef2173ef0c2464caaf700d10a",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/b2j.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "526"
},
{
"name": "HTML",
"bytes": "7218"
},
{
"name": "Java",
"bytes": "582037"
},
{
"name": "Python",
"bytes": "99627"
},
{
"name": "R",
"bytes": "10535"
},
{
"name": "Shell",
"bytes": "35899"
},
{
"name": "XSLT",
"bytes": "19152"
}
],
"symlink_target": ""
} |
import socket
from oslo_service import loopingcall
from oslo_utils import timeutils
from cinder import context
from cinder import db
from cinder import objects
def get_test_admin_context():
return context.get_admin_context()
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
previous_status=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
vol['replication_extended_status'] = replication_extended_status
vol['replication_driver_data'] = replication_driver_data
vol['previous_status'] = previous_status
return db.volume_create(ctxt, vol)
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
mountpoint, mode='rw'):
now = timeutils.utcnow()
values = {}
values['volume_id'] = volume_id
values['attached_host'] = attached_host
values['mountpoint'] = mountpoint
values['attach_time'] = now
attachment = db.volume_attach(ctxt, values)
return db.volume_attached(ctxt, attachment['id'], instance_uuid,
attached_host, mountpoint, mode)
def create_snapshot(ctxt,
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
cgsnapshot_id = None,
status='creating'):
vol = db.volume_get(ctxt, volume_id)
snap = {}
snap['volume_id'] = volume_id
snap['user_id'] = ctxt.user_id
snap['project_id'] = ctxt.project_id
snap['status'] = status
snap['volume_size'] = vol['size']
snap['display_name'] = display_name
snap['display_description'] = display_description
snap['cgsnapshot_id'] = cgsnapshot_id
return db.snapshot_create(ctxt, snap)
def create_consistencygroup(ctxt,
host='test_host@fakedrv#fakepool',
name='test_cg',
description='this is a test cg',
status='available',
availability_zone='fake_az',
volume_type_id=None,
cgsnapshot_id=None,
source_cgid=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = objects.ConsistencyGroup(ctxt)
cg.host = host
cg.user_id = ctxt.user_id or 'fake_user_id'
cg.project_id = ctxt.project_id or 'fake_project_id'
cg.status = status
cg.name = name
cg.description = description
cg.availability_zone = availability_zone
if volume_type_id:
cg.volume_type_id = volume_type_id
cg.cgsnapshot_id = cgsnapshot_id
cg.source_cgid = source_cgid
for key in kwargs:
setattr(cg, key, kwargs[key])
cg.create()
return cg
def create_cgsnapshot(ctxt,
name='test_cgsnap',
description='this is a test cgsnap',
status='available',
consistencygroup_id=None,
**kwargs):
"""Create a cgsnapshot object in the DB."""
cgsnap = {}
cgsnap['user_id'] = ctxt.user_id
cgsnap['project_id'] = ctxt.project_id
cgsnap['status'] = status
cgsnap['name'] = name
cgsnap['description'] = description
cgsnap['consistencygroup_id'] = consistencygroup_id
for key in kwargs:
cgsnap[key] = kwargs[key]
return db.cgsnapshot_create(ctxt, cgsnap)
def create_backup(ctxt,
volume_id,
display_name='test_backup',
display_description='This is a test backup',
status='creating',
parent_id=None,
temp_volume_id=None,
temp_snapshot_id=None):
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = ctxt.user_id
backup['project_id'] = ctxt.project_id
backup['host'] = socket.gethostname()
backup['availability_zone'] = '1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = 'fake'
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = 'fake'
backup['parent_id'] = parent_id
backup['size'] = 5 * 1024 * 1024
backup['object_count'] = 22
backup['temp_volume_id'] = temp_volume_id
backup['temp_snapshot_id'] = temp_snapshot_id
return db.backup_create(ctxt, backup)
class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall):
def start(self, interval, **kwargs):
kwargs['initial_delay'] = 0
return super(ZeroIntervalLoopingCall, self).start(0, **kwargs)
| {
"content_hash": "f7e27385d6ad53c4e268bd6105c6bf4b",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 70,
"avg_line_length": 34.146198830409354,
"alnum_prop": 0.5762973111834219,
"repo_name": "nexusriot/cinder",
"id": "4890486ec99ae1bb21cdcb82993e2eeb50ca775b",
"size": "6457",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13069422"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
import sys
from bigdl.util.common import callBigDlFunc
if sys.version >= '3':
long = int
unicode = str
class DLImageReader:
"""
Primary DataFrame-based image loading interface, defining API to read images from files
to DataFrame.
"""
@staticmethod
def readImages(path, sc=None, minParitions = 1, bigdl_type="float"):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are supported similarly to sc.binaryFiles(path).
:param min_partitions A suggestion value of the minimal splitting number for input data.
:return DataFrame with a single column "image"; Each record in the column represents one image
record: Row (uri, height, width, channels, CvType, bytes)
"""
df = callBigDlFunc(bigdl_type, "dlReadImage", path, sc, minParitions)
df._sc._jsc = sc._jsc
return df
| {
"content_hash": "975f50d043d1ce8b7248704c61fc66db",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 102,
"avg_line_length": 40.65384615384615,
"alnum_prop": 0.6688741721854304,
"repo_name": "wzhongyuan/BigDL",
"id": "96c8c9bd71b2e1b2b43a1480d1f98cfe36b85404",
"size": "1644",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyspark/bigdl/dlframes/dl_image_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5170"
},
{
"name": "Java",
"bytes": "6829"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Python",
"bytes": "1081865"
},
{
"name": "RobotFramework",
"bytes": "30098"
},
{
"name": "Scala",
"bytes": "9624826"
},
{
"name": "Shell",
"bytes": "55733"
}
],
"symlink_target": ""
} |
import roslib; roslib.load_manifest('controller_manager')
import sys
import rospy
from controller_manager_msgs.srv import *
def list_controller_types():
rospy.wait_for_service('controller_manager/list_controller_types')
s = rospy.ServiceProxy('controller_manager/list_controller_types', ListControllerTypes)
resp = s.call(ListControllerTypesRequest())
for t in resp.types:
print t
def reload_libraries(force_kill, restore = False):
rospy.wait_for_service('controller_manager/reload_controller_libraries')
s = rospy.ServiceProxy('controller_manager/reload_controller_libraries', ReloadControllerLibraries)
list_srv = rospy.ServiceProxy('controller_manager/list_controllers', ListControllers)
load_srv = rospy.ServiceProxy('controller_manager/load_controller', LoadController)
switch_srv = rospy.ServiceProxy('controller_manager/switch_controller', SwitchController)
print "Restore:", restore
if restore:
originally = list_srv.call(ListControllersRequest())
resp = s.call(ReloadControllerLibrariesRequest(force_kill))
if resp.ok:
print "Successfully reloaded libraries"
result = True
else:
print "Failed to reload libraries. Do you still have controllers loaded?"
result = False
if restore:
for c in originally.controllers:
load_srv(c)
to_start = []
for c, s in zip(originally.controllers, originally.state):
if s == 'running':
to_start.append(c)
switch_srv(start_controllers = to_start,
stop_controllers = [],
strictness = SwitchControllerRequest.BEST_EFFORT)
print "Controllers restored to original state"
return result
def list_controllers():
rospy.wait_for_service('controller_manager/list_controllers')
s = rospy.ServiceProxy('controller_manager/list_controllers', ListControllers)
resp = s.call(ListControllersRequest())
if len(resp.controller) == 0:
print "No controllers are loaded in mechanism control"
else:
for c in resp.controller:
print '%s - %s ( %s )'%(c.name, c.hardware_interface, c.state)
def load_controller(name):
rospy.wait_for_service('controller_manager/load_controller')
s = rospy.ServiceProxy('controller_manager/load_controller', LoadController)
resp = s.call(LoadControllerRequest(name))
if resp.ok:
print "Loaded", name
return True
else:
print "Error when loading", name
return False
def unload_controller(name):
rospy.wait_for_service('controller_manager/unload_controller')
s = rospy.ServiceProxy('controller_manager/unload_controller', UnloadController)
resp = s.call(UnloadControllerRequest(name))
if resp.ok == 1:
print "Unloaded %s successfully" % name
return True
else:
print "Error when unloading", name
return False
def start_controller(name):
return start_stop_controllers([name], True)
def start_controllers(names):
return start_stop_controllers(names, True)
def stop_controller(name):
return start_stop_controllers([name], False)
def stop_controllers(names):
return start_stop_controllers(name, False)
def start_stop_controllers(names, st):
rospy.wait_for_service('controller_manager/switch_controller')
s = rospy.ServiceProxy('controller_manager/switch_controller', SwitchController)
start = []
stop = []
strictness = SwitchControllerRequest.STRICT
if st:
start = names
else:
stop = names
resp = s.call(SwitchControllerRequest(start, stop, strictness))
if resp.ok == 1:
if st:
print "Started %s successfully" % names
else:
print "Stopped %s successfully" % names
return True
else:
if st:
print "Error when starting ", names
else:
print "Error when stopping ", names
return False
| {
"content_hash": "d2fa0787e791c112c7346ab690f04abb",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 103,
"avg_line_length": 34.36206896551724,
"alnum_prop": 0.673607626693427,
"repo_name": "robotic-ultrasound-image-system/ur5",
"id": "062fba0dae271b100bc2156a70e49fd2f35f49e3",
"size": "4074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/ur5-master/ros_control-kinetic-devel/controller_manager/src/controller_manager/controller_manager_interface.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17111"
},
{
"name": "C++",
"bytes": "4358268"
},
{
"name": "CMake",
"bytes": "1608648"
},
{
"name": "Common Lisp",
"bytes": "443315"
},
{
"name": "JavaScript",
"bytes": "154418"
},
{
"name": "Makefile",
"bytes": "7723731"
},
{
"name": "Python",
"bytes": "1088943"
},
{
"name": "Shell",
"bytes": "19219"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name = "django-testauth",
version = '0.1',
description = "Django library/application for authenticating against TEST services",
author = "Andrew Williams",
author_email = "[email protected]",
url = "http://dev.pleaseignore.com/projects/django-dreddit",
keywords = "eve api eveonline",
packages = ['django-testauth',],
)
| {
"content_hash": "0320ef3d0f6c655b610502750f1be2b3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.6909547738693468,
"repo_name": "nikdoof/django-testauth",
"id": "d780a94736af30d65063b1c60c388714149a2304",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11755"
}
],
"symlink_target": ""
} |
from proteus import StepControl
from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools)
import vof_p as physics
from proteus import Context
from proteus.mprans import VOS3P
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
# time stepping
runCFL = ct.runCFL
timeIntegration = VOS3P.RKEV#BackwardEuler_cfl
stepController = StepControl.Min_dt_controller
# mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0:ct.basis}
massLumping = False
numericalFluxType = VOS3P.NumericalFlux
conservativeFlux = None
subgridError = VOS3P.SubgridError(coefficients=physics.coefficients,nd=nd)
shockCapturing = VOS3P.ShockCapturing(physics.coefficients,nd,shockCapturingFactor=ct.vos_shockCapturingFactor,lag=ct.vos_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.ExplicitLumpedMassMatrix
nonlinearSmoother = None
linearSmoother = None
matrix = LinearAlgebraTools.SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'vos_'
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
nl_atol_res = ct.vos_nl_atol_res
linTolFac = 0.0
l_atol_res = 0.1*ct.vos_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
#auxiliaryVariables = ct.domain.auxiliaryVariables['vos']
auxiliaryVariables = ct.domain.auxiliaryVariables['vos']+[ct.vos_output]
| {
"content_hash": "cbed9b8a990e0a607f197a1004557ed7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 144,
"avg_line_length": 28.753086419753085,
"alnum_prop": 0.7745813653928725,
"repo_name": "erdc-cm/air-water-vv",
"id": "261c6128c2e8f55a7e7b66d0a73c6009297fdb41",
"size": "2329",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "2d/sediment/bed_sediment/vos_n.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1128"
},
{
"name": "GLSL",
"bytes": "3787"
},
{
"name": "Jupyter Notebook",
"bytes": "8264154"
},
{
"name": "M",
"bytes": "435"
},
{
"name": "Python",
"bytes": "1992474"
},
{
"name": "Shell",
"bytes": "14414"
}
],
"symlink_target": ""
} |
from utils.views import JsonView
from utils.models import HeartBeat
from django.forms.models import model_to_dict
# Create your views here.
class HeartBeatView(JsonView):
def react(self, data):
return model_to_dict(HeartBeat.objects.latest('datetime'), exclude='datetime') | {
"content_hash": "5166ee72062c4d2176d14c9ea9aad998",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 86,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.7657342657342657,
"repo_name": "Glucksistemi/EGS-DSM",
"id": "27e1fdcfbc20cde1e229dc3681dff3b9268027d3",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "egsdsm_backend/statistics/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1112"
},
{
"name": "JavaScript",
"bytes": "62"
},
{
"name": "Python",
"bytes": "30770"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/booster/shared_bst_tiefighter_basic.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","bst_tiefighter_basic_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "5de73e4c1ee65ff3ec1140660b58b308",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.7155425219941349,
"repo_name": "anhstudios/swganh",
"id": "674a5c1e0c3cfc5275ffe6e09a749a475b0c2dd4",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/components/booster/shared_bst_tiefighter_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import datetime
class Console(object):
def __enter__(self):
self.logfile = open('/tmp/console.html', 'a', 0)
return self
def __exit__(self, etype, value, tb):
self.logfile.close()
def addLine(self, ln):
ts = datetime.datetime.now()
outln = '%s | %s' % (str(ts), ln)
self.logfile.write(outln)
def log(msg):
if not isinstance(msg, list):
msg = [msg]
with Console() as console:
for line in msg:
console.addLine("[Zuul] %s\n" % line)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True, type='raw'),
)
)
p = module.params
log(p['msg'])
module.exit_json(changed=True)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
| {
"content_hash": "eafecdf241466f86dc6ce8a896704f29",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 56,
"avg_line_length": 20.8,
"alnum_prop": 0.5444711538461539,
"repo_name": "gooddata/zuul",
"id": "4b377d9079b0cd0f337efb56ca0df96208134848",
"size": "1549",
"binary": false,
"copies": "2",
"ref": "refs/heads/gd-patches",
"path": "zuul/ansible/library/zuul_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "827"
},
{
"name": "HTML",
"bytes": "1245"
},
{
"name": "JavaScript",
"bytes": "39935"
},
{
"name": "Makefile",
"bytes": "1415"
},
{
"name": "Python",
"bytes": "858519"
},
{
"name": "Ruby",
"bytes": "5158"
},
{
"name": "Shell",
"bytes": "1227"
}
],
"symlink_target": ""
} |
from .dashboard import Dashboard, Link
from .module import Module, ModuleType
| {
"content_hash": "d134b1ab659d84841598fb48e27e9ab2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.8205128205128205,
"repo_name": "alphagov/stagecraft",
"id": "a47141b726145bd065c1262a312312bbafb12dc2",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stagecraft/apps/dashboards/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "855"
},
{
"name": "JavaScript",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "622720"
},
{
"name": "Shell",
"bytes": "14467"
}
],
"symlink_target": ""
} |
"""The tests for the Light component."""
# pylint: disable=protected-access
import unittest
import os
from homeassistant.bootstrap import setup_component
import homeassistant.loader as loader
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_ON, STATE_OFF, CONF_PLATFORM,
SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE)
import homeassistant.components.light as light
from tests.common import mock_service, get_test_home_assistant
class TestLight(unittest.TestCase):
"""Test the light module."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
if os.path.isfile(user_light_file):
os.remove(user_light_file)
def test_methods(self):
"""Test if methods call the services as expected."""
# Test is_on
self.hass.states.set('light.test', STATE_ON)
self.assertTrue(light.is_on(self.hass, 'light.test'))
self.hass.states.set('light.test', STATE_OFF)
self.assertFalse(light.is_on(self.hass, 'light.test'))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, STATE_ON)
self.assertTrue(light.is_on(self.hass))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, STATE_OFF)
self.assertFalse(light.is_on(self.hass))
# Test turn_on
turn_on_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TURN_ON)
light.turn_on(
self.hass,
entity_id='entity_id_val',
transition='transition_val',
brightness='brightness_val',
rgb_color='rgb_color_val',
xy_color='xy_color_val',
profile='profile_val',
color_name='color_name_val',
white_value='white_val')
self.hass.block_till_done()
self.assertEqual(1, len(turn_on_calls))
call = turn_on_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual('entity_id_val', call.data.get(ATTR_ENTITY_ID))
self.assertEqual(
'transition_val', call.data.get(light.ATTR_TRANSITION))
self.assertEqual(
'brightness_val', call.data.get(light.ATTR_BRIGHTNESS))
self.assertEqual('rgb_color_val', call.data.get(light.ATTR_RGB_COLOR))
self.assertEqual('xy_color_val', call.data.get(light.ATTR_XY_COLOR))
self.assertEqual('profile_val', call.data.get(light.ATTR_PROFILE))
self.assertEqual(
'color_name_val', call.data.get(light.ATTR_COLOR_NAME))
self.assertEqual('white_val', call.data.get(light.ATTR_WHITE_VALUE))
# Test turn_off
turn_off_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TURN_OFF)
light.turn_off(
self.hass, entity_id='entity_id_val', transition='transition_val')
self.hass.block_till_done()
self.assertEqual(1, len(turn_off_calls))
call = turn_off_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
# Test toggle
toggle_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TOGGLE)
light.toggle(
self.hass, entity_id='entity_id_val', transition='transition_val')
self.hass.block_till_done()
self.assertEqual(1, len(toggle_calls))
call = toggle_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TOGGLE, call.service)
self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
def test_services(self):
"""Test the provided services."""
platform = loader.get_component('light.test')
platform.init()
self.assertTrue(
setup_component(self.hass, light.DOMAIN,
{light.DOMAIN: {CONF_PLATFORM: 'test'}}))
dev1, dev2, dev3 = platform.DEVICES
# Test init
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Test basic turn_on, turn_off, toggle services
light.turn_off(self.hass, entity_id=dev1.entity_id)
light.turn_on(self.hass, entity_id=dev2.entity_id)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
# turn on all lights
light.turn_on(self.hass)
self.hass.block_till_done()
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
self.assertTrue(light.is_on(self.hass, dev3.entity_id))
# turn off all lights
light.turn_off(self.hass)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# toggle all lights
light.toggle(self.hass)
self.hass.block_till_done()
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
self.assertTrue(light.is_on(self.hass, dev3.entity_id))
# toggle all lights
light.toggle(self.hass)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Ensure all attributes process correctly
light.turn_on(self.hass, dev1.entity_id,
transition=10, brightness=20, color_name='blue')
light.turn_on(
self.hass, dev2.entity_id, rgb_color=(255, 255, 255),
white_value=255)
light.turn_on(self.hass, dev3.entity_id, xy_color=(.4, .6))
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 20,
light.ATTR_RGB_COLOR: (0, 0, 255)},
data)
_, data = dev2.last_call('turn_on')
self.assertEqual(
{light.ATTR_RGB_COLOR: (255, 255, 255),
light.ATTR_WHITE_VALUE: 255},
data)
_, data = dev3.last_call('turn_on')
self.assertEqual({light.ATTR_XY_COLOR: (.4, .6)}, data)
# One of the light profiles
prof_name, prof_x, prof_y, prof_bri = 'relax', 0.5119, 0.4147, 144
# Test light profiles
light.turn_on(self.hass, dev1.entity_id, profile=prof_name)
# Specify a profile and attributes to overwrite it
light.turn_on(
self.hass, dev2.entity_id,
profile=prof_name, brightness=100, xy_color=(.4, .6))
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_BRIGHTNESS: prof_bri,
light.ATTR_XY_COLOR: (prof_x, prof_y)},
data)
_, data = dev2.last_call('turn_on')
self.assertEqual(
{light.ATTR_BRIGHTNESS: 100,
light.ATTR_XY_COLOR: (.4, .6)},
data)
# Test shitty data
light.turn_on(self.hass)
light.turn_on(self.hass, dev1.entity_id, profile="nonexisting")
light.turn_on(self.hass, dev2.entity_id, xy_color=["bla-di-bla", 5])
light.turn_on(self.hass, dev3.entity_id, rgb_color=[255, None, 2])
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev3.last_call('turn_on')
self.assertEqual({}, data)
# faulty attributes will not trigger a service call
light.turn_on(
self.hass, dev1.entity_id,
profile=prof_name, brightness='bright', rgb_color='yellowish')
light.turn_on(
self.hass, dev2.entity_id,
white_value='high')
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({}, data)
def test_broken_light_profiles(self):
"""Test light profiles."""
platform = loader.get_component('light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
# Setup a wrong light file
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('I,WILL,NOT,WORK\n')
self.assertFalse(setup_component(
self.hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: 'test'}}
))
def test_light_profiles(self):
"""Test light profiles."""
platform = loader.get_component('light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('test,.4,.6,100\n')
self.assertTrue(setup_component(
self.hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: 'test'}}
))
dev1, _, _ = platform.DEVICES
light.turn_on(self.hass, dev1.entity_id, profile='test')
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_XY_COLOR: (.4, .6), light.ATTR_BRIGHTNESS: 100},
data)
| {
"content_hash": "d16577cdcd6ce3f4e8b86eef2c2fdfe7",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 78,
"avg_line_length": 34.11920529801324,
"alnum_prop": 0.6011257763975155,
"repo_name": "philipbl/home-assistant",
"id": "60e5b4d9ec2300b079db749fa5519423cb51e92e",
"size": "10304",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/light/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446622"
},
{
"name": "Python",
"bytes": "3985732"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from multiprocessing.patch import property
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
# forward compatibility
bytes = str
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from multiprocessing.forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return indentifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from multiprocessing import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from multiprocessing.forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
| {
"content_hash": "5ee640bc221e070fd317040f7bfcb873",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 79,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.5552584670231729,
"repo_name": "miracle2k/python-multiprocessing",
"id": "946a41e20be1730163977a2ac727ee238ac2b4ea",
"size": "8022",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/multiprocessing/process.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import functools
import pathlib
import sys
import traceback
import unittest
import xml.etree.ElementTree as ET
import zipfile
from inc_datasources import _XMLMethodNames, _localWorkspace, _outputDirectory, _daGPTools
sys.path.insert(0, _daGPTools)
import arcpy
import pandas as pd
import tempfile
from scripts import dla
from create import *
def clear_feature_classes(directory: str):
"""
the dla.gdb is the test workspace the feature classes are created in. To pull the one we want, we clear the workspace
so that the newly created one is the only one that exists. This function clears the workspace.
:param directory:
:return:
"""
arcpy.env.workspace = directory
featureclasses = arcpy.ListFeatureClasses()
if featureclasses is not None:
for featureclass in featureclasses:
arcpy.Delete_management(os.path.join(directory, featureclass))
def build_correct_fields(xml_location: str, include_globalid: bool = False):
"""
takes the xml file and creates the fields that should be in the new feature class
:param xml_location: str
:param include_globalid: bool
:return:
"""
fields = dla.getXmlElements(xml_location, "Field")
correct_fields = []
for field in fields:
if not include_globalid and str.lower(dla.getNodeValue(field, "TargetName")) != "globalid":
correct_fields.append(dla.getNodeValue(field, "TargetName"))
return correct_fields
def make_copy(directory: str, lw: dict):
"""
Copies the target feature class into the dla.gdb for comparison in the tests
:param directory: str
:param lw : dict
:return:
"""
arcpy.env.workspace = lw["Target"]
arcpy.CopyFeatures_management(lw["TargetName"], os.path.join(directory, "copy"))
def xml_compare(x1: ET, x2: ET, reporter=None):
"""
taken from:
https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70
:param x1:
:param x2:
:param reporter:
:return:
"""
if x1.tag in ['Source', 'Target'] or x2.tag in ['Source', 'Target']:
# We skip asserting the data path is correct because our xml file data paths may not match
return True
if x1.tag != x2.tag:
if reporter:
reporter('Tags do not match: %s and %s' % (x1.tag, x2.tag))
return False
for name, value in x1.attrib.items():
if x2.attrib.get(name) != value:
if reporter:
reporter('Attributes do not match: %s=%r, %s=%r'
% (name, value, name, x2.attrib.get(name)))
return False
for name in x2.attrib.keys():
if name not in x1.attrib:
if reporter:
reporter('x2 has an attribute x1 is missing: %s'
% name)
return False
if not text_compare(x1.text, x2.text):
if reporter:
reporter('text: %r != %r' % (x1.text, x2.text))
return False
if not text_compare(x1.tail, x2.tail):
if reporter:
reporter('tail: %r != %r' % (x1.tail, x2.tail))
return False
cl1 = x1.getchildren()
cl2 = x2.getchildren()
if len(cl1) != len(cl2):
if reporter:
reporter('children length differs, %i != %i'
% (len(cl1), len(cl2)))
return False
i = 0
for c1, c2 in zip(cl1, cl2):
i += 1
if not xml_compare(c1, c2, reporter=reporter):
if reporter:
reporter('children %i do not match: %s'
% (i, c1.tag))
return False
return True
def text_compare(t1: str, t2: str):
"""
taken from:
https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70
:param t1:
:param t2:
:return:
"""
if not t1 and not t2:
return True
if t1 == '*' or t2 == '*':
return True
return (t1 or '').strip() == (t2 or '').strip()
class UnitTests(unittest.TestCase):
"""
Runs the unit tests for the various functions for all test cases and data sources
"""
def __init__(self, test_object, *args, **kwargs):
super(UnitTests, self).__init__(*args, **kwargs)
self.testObject = test_object
self.local_workspace = self.testObject.local_workspace
self.localDirectory = _outputDirectory
self.sourceWorkspace = self.local_workspace["Source"]
self.targetWorkspace = self.local_workspace["Target"]
self.sourceFC = self.local_workspace["SourceName"]
self.targetFC = self.local_workspace["TargetName"]
self.localFC = list()
self.localDataPath = ""
self.localFields = tuple()
self.sourceDataPath = os.path.join(self.local_workspace["Source"], self.local_workspace["SourceName"])
self.targetDataPath = os.path.join(self.local_workspace["Target"], self.local_workspace["TargetName"])
self.sourceFields = tuple(arcpy.ListFields(self.sourceDataPath))
self.targetFields = tuple(arcpy.ListFields(self.targetDataPath))
self.methods = _XMLMethodNames
self.xmlLocation = self.local_workspace["xmlLocation"]
self.outXML = os.path.join(str(pathlib.Path(self.local_workspace["outXML"]).parent),
pathlib.Path(self.local_workspace["outXML"]).stem,
os.path.basename(self.local_workspace["outXML"]))
self.correctXML = self.local_workspace["correctXML"]
def test_create(self):
"""
Creates the feature class or xml file for testing
:return:
"""
clear_feature_classes(_outputDirectory)
self.testObject.main()
if self.testObject.title != "CreateConfig":
self.set_local_info()
def get_default_values(self):
"""
Returns a dictionary where the key is the field name and the value is that field's default value
:return: dict
"""
out_dict = dict()
for field in self.targetFields:
out_dict[field.name] = field.defaultValue
return out_dict
def set_local_info(self):
"""
Once the feature class being tested is created, sets the datapath and fields of that feature class
:return:
"""
arcpy.env.workspace = self.localDirectory
self.localFC = arcpy.ListFeatureClasses()[0]
arcpy.env.workspace = ""
self.localDataPath = os.path.join(_outputDirectory, self.localFC)
self.localFields = tuple(arcpy.ListFields(self.localDataPath))
@staticmethod
def build_data_frame(data_path: str, columns: tuple):
"""
Builds and caches a pandas DataFrame object containing the information from the specified feature class
:param data_path: str
:param columns: tupe(str)
:return: pd.DataFrame object
"""
# creates a searchCursor for a given feature class and returns an array of that table
return pd.DataFrame(list(arcpy.da.SearchCursor(data_path, columns)), columns=columns)
@functools.lru_cache()
def get_xml_parse(self):
"""
Returns and caches a SourceTargetParser object containing information in it from the specified
SourceTarget.xml file
:return: SourceTargetParser object
"""
return SourceTargetParser(self.xmlLocation)
def test_fields(self):
"""
Compares the xml file with the mutated file to ensure that the fields were correctly transferred over
and not tampered with
:return:
"""
if self.testObject.title not in ["Preview", "Stage", "Append", "Replace"]:
return
correct_fields = build_correct_fields(self.xmlLocation, self.testObject.globalIDCheck)
if self.testObject.title in ["Append", "Replace"]:
fields = arcpy.ListFields(self.targetDataPath)
else:
fields = arcpy.ListFields(self.localDataPath)
fieldnames = []
for field in fields:
if self.testObject.globalIDCheck:
if field.name.lower() not in ["", "objectid", "shape"]:
fieldnames.append(field.name)
else:
if field.name.lower() not in ["", "objectid", "shape", "globalid"]:
fieldnames.append(field.name)
for cfield in correct_fields:
self.assertIn(cfield, fieldnames)
def test_length(self):
"""
Ensures that the mutated file, depending on which it is, is the correct needed length
:return:
"""
if self.testObject.title not in ["Preview", "Stage", "Append", "Replace"]:
return
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
local_table = self.build_data_frame(self.localDataPath, tuple([field.name for field in self.localFields]))
# target_table = (list(arcpy.da.SearchCursor(self.targetDataPath, "*")))
target_table = self.build_data_frame(self.targetDataPath, tuple([field.name for field in self.targetFields]))
mode = self.testObject.title # variable assignment to help with readability
if mode == "Preview":
if len(source_table) < self.testObject.RowLimit:
self.assertEqual(len(local_table), len(source_table))
else:
self.assertEqual(len(local_table), self.testObject.RowLimit)
elif mode == "Stage":
self.assertEqual(len(local_table), len(source_table))
elif mode == "Append":
self.assertEqual(len(target_table), len(local_table) + len(source_table))
elif mode == "Replace":
self.assertEqual(len(target_table), len(local_table))
else:
self.assertIn(mode, ["Preview", "Stage", "Append", "Replace"])
def test_replace_data(self):
"""
Ensures the correct rows were appended and removed and in the correct order
:return:
"""
replaced_rows_list = []
targetfields = list()
for field in self.targetFields:
if field.name.lower() not in ['globalid', 'objectid']:
targetfields.append(field.name)
localfields = list()
for field in self.localFields:
if field.name.lower() not in ['globalid', 'objectid']:
localfields.append(field.name)
copy = self.build_data_frame(self.localDataPath, tuple(localfields)).iterrows()
target = self.build_data_frame(self.targetDataPath, tuple(targetfields)).iterrows()
replace_dict = self.get_xml_parse().parse_replace()
for copy_row, targetRow in zip(copy, target): # will iterate through until all of the copy cursor is exhausted
copy_row = copy_row[1]
targetRow = targetRow[1]
while not targetRow.equals(copy_row):
replaced_rows_list.append(copy_row)
copy_row = next(copy)
copy_row = copy_row[1]
for targetRow, copy_row in zip(target, replaced_rows_list):
# now iterates through the rows that should have been
targetRow = targetRow[1]
# these assertions make sure the targetRow SHOULD have been replaced
if replace_dict["Operator"] == "=":
self.assertEqual(targetRow[replace_dict["FieldName"]], replace_dict["Value"])
if replace_dict["Operator"] == "!=":
self.assertNotEqual(targetRow[replace_dict["FieldName"]], replace_dict["Value"])
if replace_dict["Operator"] == "Like":
self.assertIn(replace_dict["Value"], targetRow[replace_dict["FieldName"]])
self.assertTrue(targetRow.equals(copy_row))
# appended to ensure order and accuracy. Here the target cursor starts
# at where the beginning of the re-appended rows should be
def test_data(self):
"""
Ensures that the mutated file has the correct data in each row, and that the data asisstant actions were
performed correctly
:return:
"""
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
local_table = self.build_data_frame(self.localDataPath, tuple([field.name for field in self.localFields]))
target_table = self.build_data_frame(self.targetDataPath, tuple([field.name for field in self.targetFields]))
parse_object = self.get_xml_parse()
parse_object.data = parse_object.parse()
xml_fields = parse_object.get_pairings()
method_dict = parse_object.get_methods()
xml_data = parse_object.get_data()
default_values = self.get_default_values()
if self.testObject.title in ["Preview", "Stage"]: # needed so that we can use the same function to test append
target = local_table
else:
if 'GLOBALID' in target_table.columns:
target_table = target_table.drop('GLOBALID', 1) # TODO: Might need to omit other itrations of globalid
if 'GLOBALID' in local_table.columns:
local_table = local_table.drop('GLOBALID', 1) # TODO: Might need to omit other itrations of globalid
# self.assertTrue(local_table.equals(target_table.head(len(local_table))))
self.assertTrue((local_table == target_table.head(len(local_table))).all().all())
target = target_table.drop(range(len(local_table))) # ensures we are only comparing the newly appended data
for field in xml_fields.keys():
if method_dict[field] == self.methods["None"]:
self.none_test(target[field], default_values[field])
elif method_dict[field] == self.methods["Copy"]:
self.copy_test(source_table[xml_fields[field]], target[field])
elif method_dict[field] == self.methods["Set Value"]:
self.set_value_test(target[field], xml_data[field][self.methods["Set Value"]])
elif method_dict[field] == self.methods["Value Map"]:
self.value_map_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Value Map"]], xml_data[field]["Otherwise"])
elif method_dict[field] == self.methods["Change Case"]:
self.change_case_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Change Case"]])
elif method_dict[field] == self.methods["Concatenate"]:
self.concatenate_test(target[field], xml_data[field]["Separator"],
xml_data[field]["Concatenate"])
elif method_dict[field] == self.methods["Left"]:
self.left_test(source_table[xml_fields[field]], target[field], xml_data[field]["Left"])
elif method_dict[field] == self.methods["Right"]:
self.right_test(source_table[xml_fields[field]], target[field], xml_data[field]["Right"])
elif method_dict[field] == self.methods["Substring"]:
self.substring_test(source_table[xml_fields[field]], target[field], xml_data[field]["Start"],
xml_data[field]["Length"])
elif method_dict[field] == self.methods["Split"]:
self.split_test(source_table[xml_fields[field]], target[field], xml_data[field]["SplitAt"],
xml_data[field]["Part"])
elif method_dict[field] == self.methods["Conditional Value"]:
self.conditional_value_test(source_table[xml_fields[field]], target[field],
xml_data[field]["Oper"], xml_data[field]["If"], xml_data[field]["Then"],
xml_data[field]["Else"])
elif method_dict[field] == self.methods["Domain Map"]:
self.domain_map_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Domain Map"]])
else:
self.assertIn(method_dict[field], self.methods)
def none_test(self, target: pd.Series, defaultValue):
"""
Ensures that the vector is a vector of none
:param target:
:param defaultValue:
:return:
"""
self.assertTrue(len(target.unique()) == 1 and (
target.unique()[0] is None or target.unique()[0] == 'None' or target.unique()[0] == defaultValue),
target.to_string())
def copy_test(self, source: pd.Series, target: pd.Series):
"""
Ensures that the copy source got copied to the target. In other words, ensures that the two vectors are equal.
"""
self.assertTrue((source == target.astype(source.dtype)).all(),
"Mis-match bewteen these fields: " + source.name + " " + target.name)
def set_value_test(self, target: pd.Series, value: pd.Series):
"""
Ensures that the target values are all set properly
:param target:
:param value:
:return:
"""
self.assertTrue(len(target.unique()) == 1 and target.unique() == value)
def value_map_test(self, source: pd.Series, target: pd.Series, value_dict: dict, otherwise):
"""
Ensures the values are set to what they need to be based on the preset configuration in the value map
:param source:
:param target:
:param value_dict
:param otherwise
:return:
"""
for s, t in zip(source, target):
if s in value_dict:
self.assertTrue(str(t) == str(value_dict[s]), str(t) + " != " + str(value_dict[s]))
else:
self.assertTrue(str(t) == str(otherwise))
def change_case_test(self, source: pd.Series, target: pd.Series, manipulation: str):
"""
Ensures the row correctly was changed
:param source:
:param target:
:param manipulation: str
:return:
"""
if manipulation == "Uppercase":
self.assertTrue((source.str.upper() == target).all())
elif manipulation == "Lowercase":
self.assertTrue((source.str.lower() == target).all())
elif manipulation == "Capitalize":
self.assertTrue((source.str.capitalize() == target).all())
elif manipulation == "Title":
self.assertTrue((source.str.title() == target).all())
else:
self.assertIn(manipulation, ["Uppercase", "Lowercase", "Capitalize", "Title"])
def concatenate_test(self, target: pd.Series, seperator: str,
cfields: list):
"""
Ensures the row concatenates the correct field values
:param target:
:param seperator:
:param cfields:
:return:
"""
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
if seperator == "(space)":
seperator = " "
compare_column = source_table[cfields.pop(0)]
for cfield in cfields:
right = source_table[cfield].replace("NaN", "").astype(str)
compare_column = compare_column.astype(str).str.cat(right, sep=seperator)
self.assertTrue((target == compare_column).all())
def left_test(self, source: pd.Series, target: pd.Series, number: int):
"""
Ensures the correct number of charcters from the left were mapped
:param source:
:param target
:param number: int
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[:number]) == target.astype(str)).all())
def right_test(self, source: pd.Series, target: pd.Series, number: int):
"""
Ensures the correct number of characters from the right were mapped
:param source:
:param target:
:param number:
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[:-number]) == target.astype(str)).all())
def substring_test(self, source: pd.Series, target: pd.Series, start: int, length: int):
"""
Ensures the correct substring was pulled from each row
:param source:
:param target:
:param start:
:param length:
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[start:length + start]) == target.astype(str)).all())
def split_test(self, source: pd.Series, target: pd.Series, split_point: str, part: int):
"""
Ensures the correct split was made and the resulting data is correct
:param source:
:param target:
:param split_point:
:param part:
:return:
"""
for sfield, tfield in zip(source, target):
self.assertTrue(sfield.split(split_point)[part] == tfield)
def conditional_value_test(self, source: pd.Series, target: pd.Series, oper: str, if_value,
then_value, else_value):
"""
Ensures that the conditional value evaluates correctly in each row of the column
:param source:
:param target:
:param oper:
:param if_value:
:param then_value:
:param else_value:
:return:
"""
for sfield, tfield in zip(source, target):
if oper == "==":
if sfield == if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == "!'":
if sfield != if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == "<":
if sfield < if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == ">":
if sfield > if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
else:
self.assertIn(oper, ["==", "!=", "<", ">"])
def domain_map_test(self, source: pd.Series, target: pd.Series, mappings: dict):
"""
Ensures the domain map pairings are correctly mapped in the target column
:param self:
:param source:
:param target:
:param mappings:
:return:
"""
for s, t in zip(source, target):
if s in mappings:
if mappings[s] == "(None)":
# In the event that a is loaded in the xml but not mapped to any target domain, we want to
# make sure that the source and target values are the same
self.assertEqual(s, t)
self.assertEqual(mappings[s], t)
def test_xml(self):
"""
Tests to see that the newly created xml file is equal to a pre-determined correct file
:return:
"""
if self.testObject.title != "CreateConfig":
return
out_xml = ET.parse(self.outXML).getroot()
correct_xml = ET.parse(self.correctXML).getroot()
self.assertTrue(xml_compare(out_xml, correct_xml))
def destage(self):
"""
After staging is done, the xml reflects there should be a feature class that append can use to append to source.
This function deletes this line in the xml so the xml can be used again or so append can recreate the mapping.
:return:
"""
xml = ET.parse(self.xmlLocation)
root = xml.getroot()
datasets = root.getchildren()[0]
staged = datasets.getchildren()[len(datasets.getchildren()) - 1]
if staged.tag == "Staged":
datasets.remove(staged)
xml.write(self.xmlLocation)
def main(self):
"""
Runs all of the tests
:return:
"""
if self.testObject.title == "CreateConfig":
self.test_create()
self.test_xml()
return
else:
self.test_create()
self.test_length()
self.test_fields()
if self.testObject.title == 'Replace':
self.test_replace_data()
else:
self.test_data()
class SourceTargetParser(object):
"""
Class designed to store the essential parts of the xml file in readable python data structrues
"""
def __init__(self, xml_file: str):
self.xmlLocation = xml_file
self.xml = ET.parse(self.xmlLocation).getroot()
self.targetFields = []
self.methods = _XMLMethodNames # not actually the methods in this file, just the naming syntax for the xml
self.data = dict()
@functools.lru_cache()
def get_sourcefields(self):
"""
Returns and caches the source names as specified in the xml. Some might be None if there is no mapping to the
corresponding target field.
:return:
"""
sourcefields = []
fields = self.xml.find('Fields').getchildren()
for field in fields:
sourceName = field.find('SourceName').text
sourcefields.append(sourceName)
return sourcefields
def get_data(self):
"""
Returns the xml data
:return: dict
"""
return self.data
@functools.lru_cache()
def get_targetfields(self):
"""
Returns and caches the target field names as specified in the xml.
:return:
"""
targetfields = []
fields = self.xml.find('Fields').getchildren()
for field in fields:
targetName = field.find('TargetName').text
targetfields.append(targetName)
return targetfields
@functools.lru_cache()
def get_pairings(self) -> dict:
"""
Returns a dictionary where key is TargetName and value is SourceName for each field
:return: dict
"""
pairings = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
sourcename = field.find('SourceName').text
targetname = field.find('TargetName').text
pairings[targetname] = sourcename
return pairings
@functools.lru_cache()
def get_methods(self) -> dict:
"""
Returns and caches the methods in order of appearence in the xml file.
:return:
"""
method_dict = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
targetname = field.find('TargetName').text
method = field.find('Method').text
method_dict[targetname] = method
return method_dict
@functools.lru_cache()
def parse_replace(self) -> dict:
"""
Returns a dictionary with the information used by Replace By Field Value
:return: dict
"""
datasets = self.xml.find('Datasets')
replace_by = datasets.find('ReplaceBy')
if len(replace_by.getchildren()) == 0:
raise (AssertionError("ReplaceBy is empty in the XML"))
outdict = dict()
outdict["FieldName"] = replace_by.find('FieldName').text
outdict['Operator'] = replace_by.find('Operator').text
outdict['Value'] = replace_by.find('Value').text
return outdict
def parse(self):
"""
Interprets the xml file and stores the information in appropriate places
:return:
"""
data = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
target_name = field.find('TargetName').text
method = field.find('Method').text # added for visibility
if method == self.methods["Set Value"]:
data[target_name] = dict()
data[target_name][self.methods["Set Value"]] = field.find(self.methods["Set Value"]).text
elif method == self.methods["Domain Map"]:
domain_map = field.find(self.methods["Domain Map"]).getchildren()
data[target_name] = dict()
data[target_name][self.methods["Domain Map"]] = dict()
for tag in domain_map:
if tag.tag == "sValue":
svalue = tag.text
if tag.tag == "tValue":
data[target_name][self.methods["Domain Map"]][svalue] = tag.text
svalue = ""
elif method == self.methods["Value Map"]:
value_map = field.find(self.methods["Value Map"]).getchildren()
data[target_name] = dict()
data[target_name][self.methods["Value Map"]] = dict()
for tag in value_map:
if tag.tag == "sValue":
svalue = tag.text
elif tag.tag == "tValue":
data[target_name][self.methods["Value Map"]][svalue] = tag.text
svalue = ""
elif tag.tag == "Otherwise":
data[target_name]["Otherwise"] = tag.text
elif method == self.methods["Change Case"]:
data[target_name] = dict()
data[target_name][self.methods["Change Case"]] = field.find(self.methods["Change Case"]).text
elif method == self.methods["Concatenate"]:
data[target_name] = dict()
data[target_name][self.methods["Concatenate"]] = list()
data[target_name]["Separator"] = field.find("Separator").text
cfields = field.find("cFields").getchildren()
for cfield in cfields:
data[target_name][self.methods["Concatenate"]].append(cfield.find('Name').text)
elif method == self.methods["Left"]:
data[target_name] = dict()
data[target_name][self.methods["Left"]] = int(field.find(self.methods["Left"]).text)
elif method == self.methods["Right"]:
data[target_name] = dict()
data[target_name][self.methods["Right"]] = int(field.find(self.methods["Right"]).text)
elif method == self.methods["Substring"]:
data[target_name] = dict()
data[target_name]["Start"] = int(field.find('Start').text)
data[target_name]["Length"] = int(field.find('Length').text)
elif method == self.methods["Split"]:
data[target_name] = dict()
data[target_name]["SplitAt"] = field.find("SplitAt").text
data[target_name]["Part"] = int(field.find("Part").text)
elif method == self.methods["Conditional Value"]:
data[target_name] = dict()
data[target_name]["Oper"] = field.find("Oper").text.strip("\'").strip("\"")
data[target_name]["If"] = field.find("If").text.strip("\'").strip("\"")
data[target_name]["Then"] = field.find("Then").text.strip("\'").strip("\"")
data[target_name]["Else"] = field.find("Else").text.strip("\'").strip("\"")
else:
assert method in self.methods.values()
return data
def make_temp_file() -> tempfile.TemporaryDirectory:
"""
Returns a temporary directory that is used to store the local data for the tests
:return:
"""
localfolder = str(pathlib.Path(".\localData").absolute())
return tempfile.TemporaryDirectory(dir=localfolder)
def change_workspace(lw: list, tmp_name: str) -> list:
"""
Changes the data paths to reflect the new temporary file made
:param lw: list
:param tmp_name: str
:return:
"""
out_workspace = lw.copy()
for workspace in out_workspace:
the_path = ""
for part in pathlib.Path(workspace["Source"]).parts:
the_path = os.path.join(the_path, part)
if part == 'localData':
the_path = os.path.join(the_path, tmp_name)
workspace["Source"] = the_path
the_path = ""
for part in pathlib.Path(workspace["Target"]).parts:
the_path = os.path.join(the_path, part)
if part == 'localData':
the_path = os.path.join(the_path, tmp_name)
workspace["Target"] = the_path
return out_workspace
def set_up_data(tmpdir: str):
"""
Unzips all data into local directory
:param tmpdir:
:return:
"""
workspace = str(pathlib.Path(".\localData").absolute())
for file in os.listdir(workspace):
if ".zip" in file:
with zipfile.ZipFile(os.path.join(workspace, file)) as unzipper:
unzipper.extractall(tmpdir)
def change_xml_path(t_workspace: list):
"""
Changes the source and target path in the xml files for testing
:param t_workspace:
:return:
"""
for workspace in t_workspace:
xml = ET.parse(workspace["xmlLocation"])
root = xml.getroot()
datasets = root.find('Datasets').getchildren()
for field in datasets:
if field.tag == "Source":
field.text = os.path.join(workspace["Source"], workspace["SourceName"])
if field.tag == "Target":
field.text = os.path.join(workspace["Target"], workspace["TargetName"])
xml.write(workspace["xmlLocation"])
if __name__ == '__main__':
tmp = make_temp_file()
temp_workspace = change_workspace(_localWorkspace, pathlib.Path(tmp.name).stem)
set_up_data(tmp.name)
change_xml_path(temp_workspace)
try:
for local_workspace in temp_workspace:
UnitTests(Append(local_workspace)).main()
UnitTests(Replace(local_workspace)).main()
except:
traceback.print_exc()
sys.exit(-1)
finally:
try:
tmp.cleanup()
except PermissionError:
print("Unable to delete temporary folder: Permission Error")
pass
| {
"content_hash": "a73fed97d49953e56a05edbbf100c89c",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 120,
"avg_line_length": 41.52536231884058,
"alnum_prop": 0.5794142454119768,
"repo_name": "JRosenfeldIntern/data-assistant",
"id": "35795f5fdf3ca5dfa6bc0d47edaab4467d57dc7d",
"size": "34383",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "UnitTests/test_append.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "177"
},
{
"name": "C#",
"bytes": "169021"
},
{
"name": "Groovy",
"bytes": "361"
},
{
"name": "Python",
"bytes": "501364"
},
{
"name": "XSLT",
"bytes": "21618"
}
],
"symlink_target": ""
} |
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from ceilometer.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
raise failure[0], failure[1], failure[2]
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection."""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
raise rpc_common.Timeout("No consumers available")
else:
return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, namespace, args, None)
except Exception:
pass
| {
"content_hash": "99e56469d03e155b7cb7ff819d71a9c6",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 78,
"avg_line_length": 28.977777777777778,
"alnum_prop": 0.5774539877300614,
"repo_name": "rickerc/ceilometer_audit",
"id": "16caadc45d97701f221470a581e1d15262a0e097",
"size": "5876",
"binary": false,
"copies": "3",
"ref": "refs/heads/cis-havana-staging",
"path": "ceilometer/openstack/common/rpc/impl_fake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "64962"
},
{
"name": "Python",
"bytes": "1810243"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
"""Support for Envisalink devices."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_TIMEOUT, \
CONF_HOST
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'envisalink'
DATA_EVL = 'envisalink'
CONF_CODE = 'code'
CONF_EVL_KEEPALIVE = 'keepalive_interval'
CONF_EVL_PORT = 'port'
CONF_EVL_VERSION = 'evl_version'
CONF_PANEL_TYPE = 'panel_type'
CONF_PANIC = 'panic_type'
CONF_PARTITIONNAME = 'name'
CONF_PARTITIONS = 'partitions'
CONF_PASS = 'password'
CONF_USERNAME = 'user_name'
CONF_ZONEDUMP_INTERVAL = 'zonedump_interval'
CONF_ZONENAME = 'name'
CONF_ZONES = 'zones'
CONF_ZONETYPE = 'type'
DEFAULT_PORT = 4025
DEFAULT_EVL_VERSION = 3
DEFAULT_KEEPALIVE = 60
DEFAULT_ZONEDUMP_INTERVAL = 30
DEFAULT_ZONETYPE = 'opening'
DEFAULT_PANIC = 'Police'
DEFAULT_TIMEOUT = 10
SIGNAL_ZONE_UPDATE = 'envisalink.zones_updated'
SIGNAL_PARTITION_UPDATE = 'envisalink.partition_updated'
SIGNAL_KEYPAD_UPDATE = 'envisalink.keypad_updated'
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONENAME): cv.string,
vol.Optional(CONF_ZONETYPE, default=DEFAULT_ZONETYPE): cv.string})
PARTITION_SCHEMA = vol.Schema({
vol.Required(CONF_PARTITIONNAME): cv.string})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PANEL_TYPE):
vol.All(cv.string, vol.In(['HONEYWELL', 'DSC'])),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASS): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_PANIC, default=DEFAULT_PANIC): cv.string,
vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_PARTITIONS): {vol.Coerce(int): PARTITION_SCHEMA},
vol.Optional(CONF_EVL_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_EVL_VERSION, default=DEFAULT_EVL_VERSION):
vol.All(vol.Coerce(int), vol.Range(min=3, max=4)),
vol.Optional(CONF_EVL_KEEPALIVE, default=DEFAULT_KEEPALIVE):
vol.All(vol.Coerce(int), vol.Range(min=15)),
vol.Optional(
CONF_ZONEDUMP_INTERVAL,
default=DEFAULT_ZONEDUMP_INTERVAL): vol.Coerce(int),
vol.Optional(
CONF_TIMEOUT,
default=DEFAULT_TIMEOUT): vol.Coerce(int),
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_CUSTOM_FUNCTION = 'invoke_custom_function'
ATTR_CUSTOM_FUNCTION = 'pgm'
ATTR_PARTITION = 'partition'
SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_CUSTOM_FUNCTION): cv.string,
vol.Required(ATTR_PARTITION): cv.string,
})
async def async_setup(hass, config):
"""Set up for Envisalink devices."""
from pyenvisalink import EnvisalinkAlarmPanel
conf = config.get(DOMAIN)
host = conf.get(CONF_HOST)
port = conf.get(CONF_EVL_PORT)
code = conf.get(CONF_CODE)
panel_type = conf.get(CONF_PANEL_TYPE)
panic_type = conf.get(CONF_PANIC)
version = conf.get(CONF_EVL_VERSION)
user = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASS)
keep_alive = conf.get(CONF_EVL_KEEPALIVE)
zone_dump = conf.get(CONF_ZONEDUMP_INTERVAL)
zones = conf.get(CONF_ZONES)
partitions = conf.get(CONF_PARTITIONS)
connection_timeout = conf.get(CONF_TIMEOUT)
sync_connect = asyncio.Future(loop=hass.loop)
controller = EnvisalinkAlarmPanel(
host, port, panel_type, version, user, password, zone_dump,
keep_alive, hass.loop, connection_timeout)
hass.data[DATA_EVL] = controller
@callback
def login_fail_callback(data):
"""Handle when the evl rejects our login."""
_LOGGER.error("The Envisalink rejected your credentials")
if not sync_connect.done():
sync_connect.set_result(False)
@callback
def connection_fail_callback(data):
"""Network failure callback."""
_LOGGER.error("Could not establish a connection with the Envisalink")
if not sync_connect.done():
sync_connect.set_result(False)
@callback
def connection_success_callback(data):
"""Handle a successful connection."""
_LOGGER.info("Established a connection with the Envisalink")
if not sync_connect.done():
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP,
stop_envisalink)
sync_connect.set_result(True)
@callback
def zones_updated_callback(data):
"""Handle zone timer updates."""
_LOGGER.debug("Envisalink sent a zone update event. Updating zones...")
async_dispatcher_send(hass, SIGNAL_ZONE_UPDATE, data)
@callback
def alarm_data_updated_callback(data):
"""Handle non-alarm based info updates."""
_LOGGER.debug("Envisalink sent new alarm info. Updating alarms...")
async_dispatcher_send(hass, SIGNAL_KEYPAD_UPDATE, data)
@callback
def partition_updated_callback(data):
"""Handle partition changes thrown by evl (including alarms)."""
_LOGGER.debug("The envisalink sent a partition update event")
async_dispatcher_send(hass, SIGNAL_PARTITION_UPDATE, data)
@callback
def stop_envisalink(event):
"""Shutdown envisalink connection and thread on exit."""
_LOGGER.info("Shutting down Envisalink")
controller.stop()
async def handle_custom_function(call):
"""Handle custom/PGM service."""
custom_function = call.data.get(ATTR_CUSTOM_FUNCTION)
partition = call.data.get(ATTR_PARTITION)
controller.command_output(code, partition, custom_function)
controller.callback_zone_timer_dump = zones_updated_callback
controller.callback_zone_state_change = zones_updated_callback
controller.callback_partition_state_change = partition_updated_callback
controller.callback_keypad_update = alarm_data_updated_callback
controller.callback_login_failure = login_fail_callback
controller.callback_login_timeout = connection_fail_callback
controller.callback_login_success = connection_success_callback
_LOGGER.info("Start envisalink.")
controller.start()
result = await sync_connect
if not result:
return False
# Load sub-components for Envisalink
if partitions:
hass.async_create_task(async_load_platform(
hass, 'alarm_control_panel', 'envisalink', {
CONF_PARTITIONS: partitions,
CONF_CODE: code,
CONF_PANIC: panic_type
}, config
))
hass.async_create_task(async_load_platform(
hass, 'sensor', 'envisalink', {
CONF_PARTITIONS: partitions,
CONF_CODE: code
}, config
))
if zones:
hass.async_create_task(async_load_platform(
hass, 'binary_sensor', 'envisalink', {
CONF_ZONES: zones
}, config
))
hass.services.async_register(DOMAIN,
SERVICE_CUSTOM_FUNCTION,
handle_custom_function,
schema=SERVICE_SCHEMA)
return True
class EnvisalinkDevice(Entity):
"""Representation of an Envisalink device."""
def __init__(self, name, info, controller):
"""Initialize the device."""
self._controller = controller
self._info = info
self._name = name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
| {
"content_hash": "c0c434718dcabc10951e7809e9019c60",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 34.06060606060606,
"alnum_prop": 0.6562023385866802,
"repo_name": "jnewland/home-assistant",
"id": "d7a015e8e4571077d623b0a354cfb3b39db10bbb",
"size": "7868",
"binary": false,
"copies": "5",
"ref": "refs/heads/ci",
"path": "homeassistant/components/envisalink/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
} |
import sys
from copy import deepcopy
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, SimpleTestCase, override_settings
from mock import patch
from django_auth_adfs.config import django_settings
from django_auth_adfs.config import Settings
from .custom_config import Settings as CustomSettings
class SettingsTests(TestCase):
def test_no_settings(self):
settings = deepcopy(django_settings)
del settings.AUTH_ADFS
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_claim_mapping_overlapping_username_field(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["CLAIM_MAPPING"] = {"username": "samaccountname"}
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_tenant_and_server(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["TENANT_ID"] = "abc"
settings.AUTH_ADFS["SERVER"] = "abc"
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_no_tenant_but_block_guest(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["SERVER"] = "abc"
settings.AUTH_ADFS["BLOCK_GUEST_USERS"] = True
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_tenant_with_block_users(self):
settings = deepcopy(django_settings)
del settings.AUTH_ADFS["SERVER"]
settings.AUTH_ADFS["TENANT_ID"] = "abc"
settings.AUTH_ADFS["BLOCK_GUEST_USERS"] = True
with patch("django_auth_adfs.config.django_settings", settings):
current_settings = Settings()
self.assertTrue(current_settings.BLOCK_GUEST_USERS)
def test_unknown_setting(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["dummy"] = "abc"
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_required_setting(self):
settings = deepcopy(django_settings)
del settings.AUTH_ADFS["AUDIENCE"]
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
def test_default_failed_response_setting(self):
settings = deepcopy(django_settings)
with patch("django_auth_adfs.config.django_settings", settings):
s = Settings()
self.assertTrue(callable(s.CUSTOM_FAILED_RESPONSE_VIEW))
def test_dotted_path_failed_response_setting(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["CUSTOM_FAILED_RESPONSE_VIEW"] = 'tests.views.test_failed_response'
with patch("django_auth_adfs.config.django_settings", settings):
s = Settings()
self.assertTrue(callable(s.CUSTOM_FAILED_RESPONSE_VIEW))
def test_settings_version(self):
settings = deepcopy(django_settings)
current_settings = Settings()
self.assertEqual(current_settings.VERSION, "v1.0")
settings.AUTH_ADFS["TENANT_ID"] = "abc"
del settings.AUTH_ADFS["SERVER"]
settings.AUTH_ADFS["VERSION"] = "v2.0"
with patch("django_auth_adfs.config.django_settings", settings):
current_settings = Settings()
self.assertEqual(current_settings.VERSION, "v2.0")
def test_not_azure_but_version_is_set(self):
settings = deepcopy(django_settings)
settings.AUTH_ADFS["SERVER"] = "abc"
settings.AUTH_ADFS["VERSION"] = "v2.0"
with patch("django_auth_adfs.config.django_settings", settings):
with self.assertRaises(ImproperlyConfigured):
Settings()
class CustomSettingsTests(SimpleTestCase):
def setUp(self):
sys.modules.pop('django_auth_adfs.config', None)
def tearDown(self):
sys.modules.pop('django_auth_adfs.config', None)
def test_dotted_path(self):
auth_adfs = deepcopy(django_settings).AUTH_ADFS
auth_adfs['SETTINGS_CLASS'] = 'tests.custom_config.Settings'
with override_settings(AUTH_ADFS=auth_adfs):
from django_auth_adfs.config import settings
self.assertIsInstance(settings, CustomSettings)
| {
"content_hash": "1684a60d8e6f645811d91df212a0b9af",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 94,
"avg_line_length": 41.50892857142857,
"alnum_prop": 0.6601419660141966,
"repo_name": "jobec/django-auth-adfs",
"id": "96a8b98eb4c3e19b663a3447ba4fe7d65735f4af",
"size": "4649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "652"
},
{
"name": "PowerShell",
"bytes": "35585"
},
{
"name": "Python",
"bytes": "63405"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0008_auto_20160410_2229'),
]
operations = [
migrations.RemoveField(
model_name='wordcontent',
name='references',
),
migrations.AddField(
model_name='wordcontent',
name='references',
field=models.ManyToManyField(null=True, related_name='words_referenced', to='dictionary.WordContentReference'),
),
]
| {
"content_hash": "89cd3170a7086b0b5d62330e93052ba1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 123,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.6090750436300174,
"repo_name": "nirvaris/nirvaris-dictionary",
"id": "12161282f00bf925135425e32f4e1581147a1bd1",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictionary/migrations/0009_auto_20160410_2232.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4056"
},
{
"name": "HTML",
"bytes": "19724"
},
{
"name": "JavaScript",
"bytes": "882"
},
{
"name": "Python",
"bytes": "63545"
}
],
"symlink_target": ""
} |
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
# NOTE: @login_required() throws an error. Needs to be figured out if we ever want to secure this page.
def export_reaction_like_data_view():
pass
| {
"content_hash": "6e41f54c1884766578c7c04d9d3379f7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 103,
"avg_line_length": 29.625,
"alnum_prop": 0.7383966244725738,
"repo_name": "wevote/WeVoteServer",
"id": "0471da6d0f75dcdbd9f9ac460ab4458a7d0310f8",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "reaction/views_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1559624"
},
{
"name": "JavaScript",
"bytes": "26822"
},
{
"name": "Procfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "11943600"
},
{
"name": "Shell",
"bytes": "587"
}
],
"symlink_target": ""
} |
from google.cloud import gke_multicloud_v1
async def sample_update_aws_node_pool():
# Create a client
client = gke_multicloud_v1.AwsClustersAsyncClient()
# Initialize request argument(s)
aws_node_pool = gke_multicloud_v1.AwsNodePool()
aws_node_pool.version = "version_value"
aws_node_pool.config.iam_instance_profile = "iam_instance_profile_value"
aws_node_pool.config.config_encryption.kms_key_arn = "kms_key_arn_value"
aws_node_pool.autoscaling.min_node_count = 1489
aws_node_pool.autoscaling.max_node_count = 1491
aws_node_pool.subnet_id = "subnet_id_value"
aws_node_pool.max_pods_constraint.max_pods_per_node = 1798
request = gke_multicloud_v1.UpdateAwsNodePoolRequest(
aws_node_pool=aws_node_pool,
)
# Make the request
operation = client.update_aws_node_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END gkemulticloud_v1_generated_AwsClusters_UpdateAwsNodePool_async]
| {
"content_hash": "9dec4bafcfb39a74211ad98691edfad5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 33.28125,
"alnum_prop": 0.7173708920187793,
"repo_name": "googleapis/python-gke-multicloud",
"id": "94817ec2ee0a0309f3eac3994bac5aeffe36810c",
"size": "2470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/gkemulticloud_v1_generated_aws_clusters_update_aws_node_pool_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1629065"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension(
"enet",
extra_compile_args=["-O3"],
sources=["enet.pyx"],
libraries=["enet"])]
setup(
name = 'enet',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| {
"content_hash": "11dbabc9b8b5dabd0c42e75c20db6436",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 41,
"avg_line_length": 21.9375,
"alnum_prop": 0.6353276353276354,
"repo_name": "agrif/pyenet",
"id": "33e418422a778d9cc943749d65e0b6f8cd15e350",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40495"
}
],
"symlink_target": ""
} |
import os
import sys
from accounts.models import User
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test.utils import override_settings
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, \
WebDriverException
from unittest import skipUnless
import pytest
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from accounts.authentication import WocatCMSAuthenticationBackend
from accounts.client import WocatWebsiteUserClient
from qcat.tests import TEST_CACHES
from unittest.mock import patch
from accounts.tests.test_models import create_new_user
from sample.tests.test_views import route_questionnaire_details as \
route_questionnaire_details_sample
loginRouteName = 'login'
def check_firefox_path():
"""
Check if a path for Firefox to be used by Selenium is specified in
the (local) settings.
Returns:
``bool``. Returns ``True`` if the setting
``TESTING_FIREFOX_PATH`` is set. Returns ``False`` if the
setting is not present or empty.
"""
try:
if settings.TESTING_FIREFOX_PATH != '':
return True
except:
pass
return False
@skipUnless(check_firefox_path(), "Firefox path not specified")
@override_settings(
CACHES=TEST_CACHES,
DEBUG=True,
LANGUAGES=(('en', 'English'), ('es', 'Spanish'), ('fr', 'French')),
)
@pytest.mark.functional
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
"""
Use FF as browser for functional tests.
Create a virtual display, so the browser doesn't keep popping up.
"""
if '-pop' not in sys.argv[1:] and settings.TESTING_POP_BROWSER is False:
self.display = Display(visible=0, size=(1600, 900))
self.display.start()
self.browser = webdriver.Chrome(
executable_path=settings.TESTING_CHROMEDRIVER_PATH)
self.browser.implicitly_wait(3)
def tearDown(self):
# self.save_failed_screenshots()
self.browser.quit()
if '-pop' not in sys.argv[1:] and settings.TESTING_POP_BROWSER is False:
self.display.stop()
def save_failed_screenshots(self):
if self._outcome.errors:
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'failed_screenshots')
if not os.path.exists(path):
os.makedirs(path)
file = os.path.join(path, f'failed_{self.id()}.png')
self.browser.save_screenshot(file)
def create_new_user(
self, email: str='[email protected]', last_name: str='foo',
first_name: str='bar', groups: list=None) -> User:
defaults = {
'firstname': first_name,
'lastname': last_name
}
user, __ = User.objects.get_or_create(email=email, defaults=defaults)
for group in groups or []:
# 1. Translators
# 2. Administrators
# 3. Reviewers
# 4. Publishers
# 5. WOCAT Secretariat
user.groups.add(Group.objects.get(name=group))
return user
def findByNot(self, by, el):
try:
self.browser.implicitly_wait(0)
if by == 'class_name':
self.browser.find_element_by_class_name(el)
elif by == 'link_text':
self.browser.find_element_by_link_text(el)
elif by == 'name':
self.browser.find_element_by_name(el)
elif by == 'xpath':
self.browser.find_element_by_xpath(el)
elif by == 'id':
self.browser.find_element_by_id(el)
else:
self.fail('Argument "by" = "%s" is not valid.' % by)
self.fail('Element %s was found when it should not be' % el)
except NoSuchElementException:
pass
def findBy(self, by, el, base=None, wait=False):
if base is None:
base = self.browser
if wait is True:
self.wait_for(by, el)
f = None
try:
if by == 'class_name':
f = base.find_element_by_class_name(el)
elif by == 'link_text':
f = base.find_element_by_link_text(el)
elif by == 'name':
f = base.find_element_by_name(el)
elif by == 'xpath':
f = base.find_element_by_xpath(el)
elif by == 'id':
f = base.find_element_by_id(el)
else:
self.fail('Argument "by" = "%s" is not valid.' % by)
except NoSuchElementException:
self.fail('Element %s was not found by %s' % (el, by))
return f
def findManyBy(self, by, el, base=None):
if base is None:
base = self.browser
f = None
try:
if by == 'class_name':
f = base.find_elements_by_class_name(el)
elif by == 'link_text':
f = base.find_elements_by_link_text(el)
elif by == 'name':
f = base.find_elements_by_name(el)
elif by == 'xpath':
f = base.find_elements_by_xpath(el)
elif by == 'id':
f = base.find_elements_by_id(el)
else:
self.fail('Argument "by" = "%s" is not valid.' % by)
except NoSuchElementException:
self.fail('Elements %s were not found by %s' % (el, by))
return f
def wait_for(self, by, el, visibility=True):
if by == 'class_name':
locator = By.CLASS_NAME
elif by == 'xpath':
locator = By.XPATH
elif by == 'id':
locator = By.ID
elif by == 'name':
locator = By.NAME
else:
self.fail('Argument "by" = "%s" is not valid.' % by)
if visibility is True:
condition = EC.visibility_of_element_located((locator, el))
else:
condition = EC.invisibility_of_element_located((locator, el))
WebDriverWait(self.browser, 10).until(condition)
def changeHiddenInput(self, el, val):
self.browser.execute_script('''
var elem = arguments[0];
var value = arguments[1];
elem.value = value;
''', el, val)
def rearrangeFormHeader(self):
"""
Use this function to rearrange the fixed header of the form if it is
blocking certain elements, namely when using headless browser for
testing. Sets the header to "position: relative".
"""
form_header = self.findBy(
'xpath', '//header[contains(@class, "wizard-header")]')
self.browser.execute_script(
'arguments[0].style.position = "relative";', form_header)
def rearrangeStickyMenu(self):
"""
Use this function to rearrange the fixed sticky menu if it is blocking
certain elements, namely when using headless browser for testing. Sets
it to "position: relative".
"""
pass
# sticky = self.findBy('class_name', 'sticky-menu-outer')
# self.browser.execute_script(
# 'arguments[0].style.position = "absolute";', sticky)
def rearrange_notifications(self):
notifications = self.findBy('class_name', 'notification-group')
self.browser.execute_script(
'arguments[0].style.position = "relative";', notifications)
def screenshot(self, filename='screenshot.png'):
self.browser.save_screenshot(filename)
def form_click_add_more(self, questiongroup_keyword):
self.findBy(
'xpath',
'//a[@data-add-item and @data-questiongroup-keyword="{}"]'.format(
questiongroup_keyword)).click()
def review_action(
self, action, exists_only=False, exists_not=False,
expected_msg_class='success'):
"""
Handle review actions which trigger a modal.
Args:
action: One of
- 'edit'
- 'view'
- 'submit'
⁻ 'review'
- 'publish'
- 'reject'
- 'flag-unccd'
- 'unflag-unccd'
exists_only: Only check that the modal is opened without triggering
the action.
expected_msg_class: str.
Returns:
"""
if action == 'view':
btn = self.findBy(
'xpath', '//form[@id="review_form"]//a[text()="View"]')
if exists_only is True:
return btn
btn.click()
return
if exists_not is True:
self.findByNot(
'xpath', '//a[@data-reveal-id="confirm-{}"]'.format(action))
return
self.findBy(
'xpath', '//a[@data-reveal-id="confirm-{}"]'.format(action)).click()
btn_xpath = '//button[@name="{}"]'.format(action)
if action == 'edit':
# No button for "edit"
btn_xpath = '//a[@type="submit"]'
WebDriverWait(self.browser, 10).until(
EC.visibility_of_element_located(
(By.XPATH, btn_xpath)))
if action == 'reject':
self.findBy('name', 'reject-message').send_keys("spam")
if exists_only is True:
self.findBy(
'xpath', '//div[contains(@class, "reveal-modal") and contains('
'@class, "open")]//a[contains(@class, '
'"close-reveal-modal")]', wait=True).click()
import time; time.sleep(1)
return
self.wait_for('xpath', btn_xpath)
# Sometimes, the click on the button in the modal happens too fast
# (modal not yet correctly displayed), which results in an error saying
# another (underlying) element would receive the click. In this case,
# simply try again (hopefully modal showing by now)
try:
self.findBy('xpath', btn_xpath).click()
except WebDriverException:
self.findBy('xpath', btn_xpath).click()
self.findBy(
'xpath', '//div[contains(@class, "{}")]'.format(expected_msg_class))
if action not in ['reject', 'delete']:
self.toggle_all_sections()
def submit_form_step(self):
self.findBy('id', 'button-submit').click()
self.findBy('xpath', '//div[contains(@class, "success")]')
self.toggle_all_sections()
def click_edit_section(
self, section_identifier, return_button=False, exists_not=False):
btn_xpath = '//a[contains(@href, "/edit/") and contains(@href, "{}")]'.\
format(section_identifier)
if exists_not is True:
self.findByNot('xpath', btn_xpath)
return
self.wait_for('xpath', btn_xpath)
btn = self.findBy('xpath', btn_xpath)
if return_button is True:
return btn
# Clicking does work reliably. Instead opening the URL manually.
self.browser.get(btn.get_attribute('href'))
self.rearrangeFormHeader()
def toggle_all_sections(self):
self.wait_for('class_name', 'js-expand-all-sections')
# Remove all notifications so the buttons to expand the sections are
# clickable
self.hide_notifications()
links = self.findManyBy('class_name', 'js-expand-all-sections')
for link in reversed(links):
link.click()
def open_questionnaire_details(self, configuration, identifier=None):
route = route_questionnaire_details_sample
self.browser.get(self.live_server_url + reverse(
route, kwargs={'identifier': identifier}))
self.toggle_all_sections()
def toggle_selected_advanced_filters(self, display: bool=True) -> None:
"""Toggle the panel with selected advanced filters"""
filter_panel_xpath = '//div[contains(@class, "selected-advanced-filters")]'
filter_panel = self.findBy('xpath', filter_panel_xpath)
if filter_panel.is_displayed() != display:
self.findBy('xpath',
'//a[@data-toggle="js-selected-advanced-filters"]').click()
self.wait_for('xpath', filter_panel_xpath)
def open_advanced_filter(self, configuration: str) -> None:
"""
Assuming that you are on search page, click the link to open the
advanced filter of a given configuration
"""
self.findBy('xpath',
f'//a[contains(@class, "js-filter-advanced-type") and '
f'@data-type="{configuration}"]').click()
def add_advanced_filter(self, key: str, value: str) -> None:
"""Add a new advanced filter"""
# Toggle the filter panel if it is not open yet
self.toggle_selected_advanced_filters(display=True)
# Select the last <select> available
filter_row_xpath = '(//div[contains(@class, "selected-advanced-filters")]/div[contains(@class, "js-filter-item")])[last()]'
filter_row = self.findBy('xpath', filter_row_xpath)
filter_select_xpath = f'//select[contains(@class, "filter-key-select")]'
select = Select(self.findBy('xpath', filter_select_xpath, base=filter_row))
# If it already has a key selected, click "add filter" to add a new row
# and select the <select> again
if select.first_selected_option.text != '---':
self.findBy('id', 'filter-add-new').click()
filter_row = self.findBy('xpath', filter_row_xpath)
select = Select(
self.findBy('xpath', filter_select_xpath, base=filter_row))
# Select the key, wait for the values to be loaded and select one
select.select_by_value(key)
self.wait_for('xpath', filter_row_xpath + '//div[contains(@class, "loading-indicator-filter-key")]', visibility=False)
self.findBy('xpath', f'//div[contains(@class, "filter-value-column")]//input[@value="{value}"]', base=filter_row).click()
self.apply_filter()
def remove_filter(self, index):
"""
Remove the filter at a given (0-based) index. If index is None, all
filters are removed!
"""
curr_index = index
if curr_index is None:
curr_index = 0
self.findBy(
'xpath',
f'(//ul[@class="filter-list"]/li/span/a)[{curr_index + 1}]/'
f'*[contains(@class, "icon")]').click()
self.wait_for('class_name', 'loading-indicator', visibility=False)
if index is None:
try:
self.remove_filter(index=None)
except AssertionError:
pass
def get_active_filters(self, has_any=None) -> list:
"""
Return a list of all active filters. If has_any is a boolean, it is
checked whether there are any active filters or not.
"""
active_filters = self.findManyBy(
'xpath', '//div[@id="active-filters"]//li')
if has_any is not None:
active_filter_panel = self.findBy(
'xpath', '//div[@id="active-filters"]/div')
self.assertEqual(has_any, active_filter_panel.is_displayed())
if has_any is False:
self.assertEqual(len(active_filters), 0)
else:
self.assertNotEqual(len(active_filters), 0)
return active_filters
def apply_filter(self):
self.findBy(
'xpath', '//input[contains(@class, "search-submit")]').click()
self.wait_for('class_name', 'loading-indicator', visibility=False)
def check_list_results(self, expected: list, count: bool=True):
"""
Args:
expected: list of dicts. Can contain
- title
- description
- translations (list)
"""
if count is True:
list_entries = self.findManyBy(
'xpath', '//article[contains(@class, "tech-item")]')
self.assertEqual(len(list_entries), len(expected))
for i, e in enumerate(expected):
i_xpath = i + 1
if e.get('title') is not None:
title = e['title']
self.findBy(
'xpath',
f'(//article[contains(@class, "tech-item")])[{i_xpath}]//'
f'a[contains(text(), "{title}")]')
if e.get('description'):
description = e['description']
self.findBy(
'xpath',
f'(//article[contains(@class, "tech-item")])[{i_xpath}]//'
f'p[contains(text(), "{description}")]')
if e.get('status'):
status = e['status']
xpath = f'(//article[contains(@class, "tech-item")])[{i_xpath}]' \
f'//span[contains(@class, "tech-status") and ' \
f'contains(@class, "is-{status}")]'
if status == 'public':
self.findByNot('xpath', xpath)
else:
self.findBy('xpath', xpath)
for lang in e.get('translations', []):
self.findBy(
'xpath',
f'(//article[contains(@class, "tech-item")])[{i_xpath}]//'
f'a[contains(text(), "{lang}")]')
def get_compiler(self) -> str:
"""From the details view, return the name of the compiler"""
return self.findBy(
'xpath',
'//ul[@class="tech-infos"]/li/span[text()="Compiler:"]/../a'
).text
def get_editors(self) -> list:
"""From the details view, return the names of the editors"""
editors = []
for el in self.findManyBy(
'xpath',
'//ul[@class="tech-infos"]/li/span[text()="Editors:"]/../a'):
editors.append(el.text)
return editors
def checkOnPage(self, text):
xpath = '//*[text()[contains(.,"{}")]]'.format(text)
WebDriverWait(self.browser, 10).until(
EC.visibility_of_element_located(
(By.XPATH, xpath)))
def scroll_to_element(self, el):
self.browser.execute_script("return arguments[0].scrollIntoView();", el)
def set_input_value(self, element, value):
if not isinstance(element, WebElement):
element = self.findBy('id', element)
self.browser.execute_script("""
var element = arguments[0];
element.setAttribute('value', '{}')
""".format(value), element)
def get_text_excluding_children(self, element):
return self.browser.execute_script("""
return jQuery(arguments[0]).contents().filter(function() {
return this.nodeType == Node.TEXT_NODE;
}).text();
""", element)
def hide_notifications(self):
for el in self.findManyBy(
'xpath', '//div[contains(@class, "notification alert-box")]'):
self.browser.execute_script("""
var element = arguments[0];
element.parentNode.removeChild(element);
""", el)
def select_chosen_element(self, chosen_id: str, chosen_value: str):
chosen_el = self.findBy('xpath', '//div[@id="{}"]'.format(chosen_id))
self.scroll_to_element(chosen_el)
chosen_el.click()
self.findBy(
'xpath', '//div[@id="{}"]//ul[@class="chosen-results"]/li[text()='
'"{}"]'.format(chosen_id, chosen_value)).click()
def clickUserMenu(self, user):
self.findBy(
'xpath', '//li[contains(@class, "has-dropdown")]/a[contains(text(),'
' "{}")]'.format(user)).click()
def changeLanguage(self, locale):
self.findBy(
'xpath', '//li[contains(@class, "has-dropdown") and contains('
'@class, "top-bar-lang")]/a').click()
self.findBy('xpath', '//a[@data-language="{}"]'.format(locale)).click()
def doLogin(self, user=None):
"""
A user is required for the login, this is a convenience wrapper to
login a non-specified user.
"""
self.doLogout()
self._doLogin(user or create_new_user())
@patch.object(WocatCMSAuthenticationBackend, 'authenticate')
@patch.object(WocatWebsiteUserClient, 'get_and_update_django_user')
@patch('django.contrib.auth.authenticate')
def _doLogin(self, user, mock_authenticate,
mock_cms_get_and_update_django_user, mock_cms_authenticate):
"""
Mock the authentication to return the given user and put it to the
session - django.contrib.auth.login handles this.
Set the cookie so the custom middleware doesn't force-validate the login
against the login API.
"""
auth_user = user
auth_user.backend = 'accounts.authentication.WocatCMSAuthenticationBackend'
mock_authenticate.return_value = user
mock_authenticate.__name__ = ''
mock_cms_authenticate.return_value = user
mock_cms_get_and_update_django_user.return_value = user
self.client.login(username='spam', password='eggs')
# note the difference: self.client != self.browser, copy the cookie.
self.browser.add_cookie({
'name': 'sessionid',
'value': self.client.cookies['sessionid'].value
})
self.browser.get(self.live_server_url + reverse(loginRouteName))
def doLogout(self):
try:
self.browser.find_element_by_xpath(
'//li[contains(@class, "user-menu")]/a').click()
self.browser.find_element_by_xpath(
'//ul[@class="dropdown"]/li/a[contains(@href, "/accounts/logout/")]').click()
except NoSuchElementException:
pass
self.browser.delete_cookie('fe_typo_user')
self.browser.get(self.live_server_url + '/404_no_such_url/')
def get_mock_remote_user_client_search(self, name):
"""
Simulate a response from the remote auth provider when searching for a
user. Searches in the local database instead and returns all users
matching the query (case insensitve search by first or last name).
Usually, you will want to use this in combination with
get_mock_remote_user_client_user_information (see below) to also
correctly update the user in the database (e.g. when changing compiler
or adding reviewer)
Usage in tests:
Patch the remote client's search_users function in the view and set this
function as side_effect. Similar with remote client's
get_user_information.
Example:
@patch('questionnaire.utils.remote_user_client.get_user_information')
@patch('accounts.views.remote_user_client.search_users')
def test_something(self, mock_search_users, mock_user_information):
mock_search_users.side_effect = self.get_mock_remote_user_client_search
mock_user_information.side_effect = self.get_mock_remote_user_client_user_information
# ...
"""
users = User.objects.filter(
Q(firstname__icontains=name) | Q(lastname__icontains=name))
return {
'success': True,
'users': [
self._mock_remote_client_user_details(user)
for user in users
]
}
def get_mock_remote_user_client_user_information(self, user_id):
"""
Simulate a response from the remote auth provider when retrieving the
user details needed to update the user. Searches in the local database
instead (lookup by ID).
Usually, you will want to use this in combination with
get_mock_remote_user_client_search (see above).
Usage in tests:
Patch the remote client's get_user_information function where it is used
(e.g. in questionnaire.utils when changing users of a questionnaire) and
set this function as side_effect. Similar with remote client's
search_users.
Example:
@patch('questionnaire.utils.remote_user_client.get_user_information')
@patch('accounts.views.remote_user_client.search_users')
def test_something(self, mock_search_users, mock_user_information):
mock_search_users.side_effect = self.get_mock_remote_user_client_search
mock_user_information.side_effect = self.get_mock_remote_user_client_user_information
# ...
"""
user = User.objects.get(pk=user_id)
return self._mock_remote_client_user_details(user)
def _mock_remote_client_user_details(self, user):
# Helper to format the user details similar to the remote client's
# reponse.
return {
'uid': user.pk,
'username': user.email,
'email': user.email,
'first_name': user.firstname,
'last_name': user.lastname,
}
def dropImage(self, dropzone_id):
self.browser.execute_script(
"function base64toBlob(b64Data, contentType, sliceSize) "
"{contentType = contentType || '';sliceSize = sliceSize || 512;"
"var byteCharacters = atob(b64Data);var byteArrays = [];for (var "
"offset = 0; offset < byteCharacters.length; offset += sliceSize) "
"{var slice = byteCharacters.slice(offset, offset + sliceSize);"
"var byteNumbers = new Array(slice.length);for (var i = 0; i < "
"slice.length; i++) {byteNumbers[i] = slice.charCodeAt(i);}var "
"byteArray = new Uint8Array(byteNumbers);byteArrays.push("
"byteArray);}var blob = new Blob(byteArrays, {type: "
"contentType});return blob;}var base64Image = "
"'R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/"
"f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/"
"gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5"
"NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lz"
"YLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImI"
"N+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QP"
"IGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl"
"5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMm"
"ILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8a"
"J9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5"
"x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4n"
"EhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTIS"
"KMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6"
"ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUA"
"AuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijao"
"kTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6"
"awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHw"
"EFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpaji"
"ihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55"
"sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35"
"nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4Kaim"
"jDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w"
"57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374w"
"bujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1"
"lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgc"
"KE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5Dy"
"AQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVog"
"V+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt"
"88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO2"
"6X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJH"
"gxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJ"
"CKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJou"
"yGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChS"
"IQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0"
"pCZbEhAAOw==';var dz = Dropzone.forElement('#%s'); dz.addFile("
"base64toBlob(base64Image, 'image/gif'));" % dropzone_id)
# Wait for preview image
self.wait_for(
'xpath',
f'//div[@id="preview-{dropzone_id}"]/div[@class="image-preview"]/img')
| {
"content_hash": "513b1ccc395d853bfdf9c4847c1df24f",
"timestamp": "",
"source": "github",
"line_count": 703,
"max_line_length": 131,
"avg_line_length": 42.502133712660026,
"alnum_prop": 0.5932594799022725,
"repo_name": "CDE-UNIBE/qcat",
"id": "3446ce0bd7781f249500790d349012606de46b68",
"size": "29881",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "functional_tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1098"
},
{
"name": "HTML",
"bytes": "823938"
},
{
"name": "Handlebars",
"bytes": "224139"
},
{
"name": "JavaScript",
"bytes": "153067"
},
{
"name": "Python",
"bytes": "3515948"
},
{
"name": "SCSS",
"bytes": "165400"
},
{
"name": "Shell",
"bytes": "1943"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from sys import version_info
import flask_login
from flask_login import login_required, current_user, logout_user
from flask import flash
from wtforms import (
Form, PasswordField, StringField)
from wtforms.validators import InputRequired
from flask import url_for, redirect
from flask_bcrypt import generate_password_hash, check_password_hash
from sqlalchemy import (
Column, String, DateTime)
from sqlalchemy.ext.hybrid import hybrid_property
from airflow import settings
from airflow import models
from airflow import configuration
import logging
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() bellow
login_manager.login_message = None
LOG = logging.getLogger(__name__)
PY3 = version_info[0] == 3
class AuthenticationError(Exception):
pass
class PasswordUser(models.User):
_password = Column('password', String(255))
def __init__(self, user):
self.user = user
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = generate_password_hash(plaintext, 12)
if PY3:
self._password = str(self._password, 'utf-8')
def authenticate(self, plaintext):
return check_password_hash(self._password, plaintext)
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return str(self.id)
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
@login_manager.user_loader
def load_user(userid):
LOG.debug("Loading user %s", userid)
if not userid or userid == 'None':
return None
session = settings.Session()
user = session.query(models.User).filter(models.User.id == int(userid)).first()
session.expunge_all()
session.commit()
session.close()
return PasswordUser(user)
def login(self, request):
if current_user.is_authenticated():
flash("You are already logged in")
return redirect(url_for('admin.index'))
username = None
password = None
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
try:
session = settings.Session()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
session.close()
raise AuthenticationError()
if not user.authenticate(password):
session.close()
raise AuthenticationError()
LOG.info("User %s successfully authenticated", username)
flask_login.login_user(user)
session.commit()
session.close()
return redirect(request.args.get("next") or url_for("admin.index"))
except AuthenticationError:
flash("Incorrect login details")
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
class LoginForm(Form):
username = StringField('Username', [InputRequired()])
password = PasswordField('Password', [InputRequired()])
| {
"content_hash": "924567c1d797b31db4948904060da3ca",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 83,
"avg_line_length": 27.085106382978722,
"alnum_prop": 0.6423147420790782,
"repo_name": "yiqingj/airflow",
"id": "66b1df1d86c6e5397e9a7dba2ad2b513af9943ea",
"size": "4386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/auth/backends/password_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56952"
},
{
"name": "HTML",
"bytes": "129811"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1219864"
},
{
"name": "Shell",
"bytes": "17782"
}
],
"symlink_target": ""
} |
"""Starter script for the Solum Worker service."""
import os
import shlex
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
import solum
from solum.common.rpc import service as rpc_service
from solum.common import service
from solum.common import trace_data
from solum.common import utils
from solum.i18n import _
from solum.worker.handlers import default as default_handler
from solum.worker.handlers import noop as noop_handler
from solum.worker.handlers import shell as shell_handler
LOG = logging.getLogger(__name__)
cli_opts = [
cfg.IntOpt('run-container-cmd-as', metavar='UID', default=65533,
help='Run commands in containers as the user assigned '
'with the UID, which can be used to constrain resource, '
'e.g. disk usage, on a worker host.'),
]
def main():
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
cfg.CONF.register_cli_opts(cli_opts)
service.prepare_service(sys.argv)
solum.TLS.trace = trace_data.TraceData()
LOG.info(_('Starting server in PID %s') % os.getpid())
LOG.debug("Configuration:")
logging.setup(cfg.CONF, 'solum')
cfg.CONF.import_opt('topic', 'solum.worker.config', group='worker')
cfg.CONF.import_opt('host', 'solum.worker.config', group='worker')
cfg.CONF.import_opt('handler', 'solum.worker.config', group='worker')
handlers = {
'noop': noop_handler.Handler,
'default': default_handler.Handler,
'shell': shell_handler.Handler,
}
endpoints = [
handlers[cfg.CONF.worker.handler](),
]
server = rpc_service.Service(cfg.CONF.worker.topic,
cfg.CONF.worker.host, endpoints)
server.serve()
| {
"content_hash": "5c65ab15dc532cba766aec88b91b7bdc",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 30.982758620689655,
"alnum_prop": 0.6755703951029494,
"repo_name": "stackforge/solum",
"id": "d2f7c9ddd9ea8df6218a7c6902107ef877e1da77",
"size": "2408",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "solum/cmd/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
} |
__all__ = ["Dtool_ObjectToDict", "Dtool_funcToMethod", "Dtool_PreloadDLL"]
import imp, sys, os
# The following code exists to work around a problem that exists
# with Python 2.5 or greater.
# Specifically, Python 2.5 is designed to import files named *.pyd
# only; it will not import files named *.dll (or *.so). We work
# around this problem by explicitly preloading all of the dll's we
# expect to need.
dll_suffix = ''
if sys.platform == "win32":
# On Windows, dynamic libraries end in ".dll".
dll_ext = '.dll'
module_ext = '.pyd'
# We allow the caller to preload dll_suffix into the sys module.
dll_suffix = getattr(sys, 'dll_suffix', None)
if dll_suffix is None:
# Otherwise, we try to determine it from the executable name:
# python_d.exe implies _d across the board.
dll_suffix = ''
if sys.executable.endswith('_d.exe'):
dll_suffix = '_d'
elif sys.platform == "darwin":
# On OSX, the dynamic libraries usually end in .dylib, but
# sometimes we need .so.
try:
from direct.extensions_native.extensions_darwin import dll_ext
except ImportError:
dll_ext = '.dylib'
module_ext = '.so'
else:
# On most other UNIX systems (including linux), .so is used.
dll_ext = '.so'
module_ext = '.so'
if sys.platform == "win32":
# On Windows, we must furthermore ensure that the PATH is modified
# to locate all of the DLL files.
# First, search for the directory that contains all of our compiled
# modules.
target = None
filename = "libpandaexpress%s%s" % (dll_suffix, dll_ext)
for dir in sys.path + [sys.prefix]:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
target = dir
if target == None:
message = "Cannot find %s" % (filename)
raise ImportError(message)
# And add that directory to the system path.
path = os.environ["PATH"]
if not path.startswith(target + ";"):
os.environ["PATH"] = target + ";" + path
def Dtool_FindModule(module):
# Finds a .pyd module on the Python path.
filename = module.replace('.', os.path.sep) + module_ext
for dir in sys.path:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
return lib
return None
def Dtool_PreloadDLL(module):
if module in sys.modules:
return
# First find it as a .pyd module on the Python path.
if Dtool_FindModule(module):
# OK, we should have no problem importing it as is.
return
# Nope, we'll need to search for a dynamic lib and preload it.
# Search for the appropriate directory.
target = None
filename = module.replace('.', os.path.sep) + dll_suffix + dll_ext
for dir in sys.path + [sys.prefix]:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
target = dir
break
if target is None:
message = "DLL loader cannot find %s." % (module)
raise ImportError(message)
# Now import the file explicitly.
pathname = os.path.join(target, filename)
imp.load_dynamic(module, pathname)
# Nowadays, we can compile libpandaexpress with libpanda into a
# .pyd file called panda3d/core.pyd which can be imported without
# any difficulty. Let's see if this is the case.
# In order to support things like py2exe that play games with the
# physical python files on disk, we can't entirely rely on
# Dtool_FindModule to find our panda3d.core module. However, we
# should be able to import it. To differentiate the old-style Panda
# build (with .dll's) from the new-style Panda build (with .pyd's), we
# first try to import panda3d.core directly; if it succeeds we're in a
# new-style build, and if it fails we must be in an old-style build.
try:
from panda3d.core import *
except ImportError:
Dtool_PreloadDLL("libpandaexpress")
from libpandaexpress import *
def Dtool_ObjectToDict(cls, name, obj):
cls.DtoolClassDict[name] = obj;
def Dtool_funcToMethod(func, cls, method_name=None):
"""Adds func to class so it is an accessible method; use method_name to specify the name to be used for calling the method.
The new method is accessible to any instance immediately."""
if sys.version_info < (3, 0):
func.im_class = cls
func.im_func = func
func.im_self = None
if not method_name:
method_name = func.__name__
cls.DtoolClassDict[method_name] = func;
| {
"content_hash": "323ecfe07f0856eb097f0a8027f1f570",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 127,
"avg_line_length": 34.83720930232558,
"alnum_prop": 0.6515353805073432,
"repo_name": "ee08b397/panda3d",
"id": "9424b640ce1fefd77cf53676d219acad43fe4801",
"size": "4505",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "direct/src/extensions_native/extension_native_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4044"
},
{
"name": "C",
"bytes": "6621685"
},
{
"name": "C++",
"bytes": "31403451"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "8017"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "1461"
},
{
"name": "Objective-C",
"bytes": "15068"
},
{
"name": "Objective-C++",
"bytes": "298229"
},
{
"name": "Pascal",
"bytes": "467818"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "337716"
},
{
"name": "Python",
"bytes": "5837581"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8340 import *
class agilent8341A(agilentBase8340):
"Agilent 8341A IVI RF sweep generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8341A')
super(agilent8341A, self).__init__(*args, **kwargs)
self._frequency_low = 10e6
self._frequency_high = 26.5e9
| {
"content_hash": "06f9ce522b020d6fa6f1dec91c063fe4",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 36.09756097560975,
"alnum_prop": 0.7614864864864865,
"repo_name": "margguo/python-ivi",
"id": "9a22eb7b33b44b60c71513b1e56fd858ddd18fec",
"size": "1480",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent8341A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1738999"
}
],
"symlink_target": ""
} |
"""Tests for advanced user interactions."""
import os
import time
import unittest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webdriver import WebDriver
class AdvancedUserInteractionTest(unittest.TestCase):
def performDragAndDropWithMouse(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("draggableLists")
dragReporter = self.driver.find_element_by_id("dragging_reports")
toDrag = self.driver.find_element_by_id("rightitem-3")
dragInto = self.driver.find_element_by_id("sortable1")
holdItem = ActionChains(self.driver).click_and_hold(toDrag)
moveToSpecificItem = ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_id("leftitem-4"))
moveToOtherList = ActionChains(self.driver).move_to_element(dragInto)
drop = ActionChains(self.driver).release(dragInto)
self.assertEqual("Nothing happened.", dragReporter.text)
holdItem.perform()
moveToSpecificItem.perform()
moveToOtherList.perform()
self.assertEqual("Nothing happened. DragOut", dragReporter.text)
drop.perform()
def testDraggingElementWithMouseMovesItToAnotherList(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self.performDragAndDropWithMouse()
dragInto = self.driver.find_element_by_id("sortable1")
self.assertEqual(6, len(dragInto.find_elements_by_tag_name("li")))
def _testDraggingElementWithMouseFiresEvents(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface.
Disabled since this test doesn't work with HTMLUNIT.
"""
self.performDragAndDropWithMouse()
dragReporter = self.driver.find_element_by_id("dragging_reports")
self.assertEqual("Nothing happened. DragOut DropIn RightItem 3", dragReporter.text)
def _isElementAvailable(self, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
self.driver.find_element_by_id(id)
return True
except:
return False
def testDragAndDrop(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("droppableItems")
waitEndTime = time.time() + 15
while (not self._isElementAvailable("draggable") and
time.time() < waitEndTime):
time.sleep(0.2)
if not self._isElementAvailable("draggable"):
raise "Could not find draggable element after 15 seconds."
toDrag = self.driver.find_element_by_id("draggable")
dropInto = self.driver.find_element_by_id("droppable")
holdDrag = ActionChains(self.driver) \
.click_and_hold(toDrag)
move = ActionChains(self.driver) \
.move_to_element(dropInto)
drop = ActionChains(self.driver).release(dropInto)
holdDrag.perform()
move.perform()
drop.perform()
dropInto = self.driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
self.assertEqual("Dropped!", text)
def testDoubleClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toDoubleClick = self.driver.find_element_by_id("doubleClickField")
dblClick = ActionChains(self.driver) \
.double_click(toDoubleClick)
dblClick.perform()
self.assertEqual("DoubleClicked", toDoubleClick.get_attribute('value'))
def testContextClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toContextClick = self.driver.find_element_by_id("doubleClickField")
contextClick = ActionChains(self.driver) \
.context_click(toContextClick)
contextClick.perform()
self.assertEqual("ContextClicked",
toContextClick.get_attribute('value'))
def testMoveAndClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toClick = self.driver.find_element_by_id("clickField")
click = ActionChains(self.driver) \
.move_to_element(toClick) \
.click()
click.perform()
self.assertEqual("Clicked", toClick.get_attribute('value'))
def testCannotMoveToANullLocator(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
try:
move = ActionChains(self.driver) \
.move_to_element(None)
move.perform()
self.fail("Shouldn't be allowed to click on null element.")
except AttributeError:
pass # Expected.
try:
ActionChains(self.driver).click().perform()
self.fail("Shouldn't be allowed to click without a context.")
except WebDriverException:
pass # Expected.
def _testClickingOnFormElements(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest.
Disabled since this test doesn't work with HTMLUNIT.
"""
self._loadPage("formSelectionPage")
options = self.driver.find_elements_by_tag_name("option")
selectThreeOptions = ActionChains(self.driver) \
.click(options[1]) \
.key_down(Keys.SHIFT) \
.click(options[2]) \
.click(options[3]) \
.key_up(Keys.SHIFT)
selectThreeOptions.perform()
showButton = self.driver.find_element_by_name("showselected")
showButton.click()
resultElement = self.driver.find_element_by_id("result")
self.assertEqual("roquefort parmigiano cheddar", resultElement.text)
def testSelectingMultipleItems(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest."""
self._loadPage("selectableItems")
reportingElement = self.driver.find_element_by_id("infodiv")
self.assertEqual("no info", reportingElement.text)
listItems = self.driver.find_elements_by_tag_name("li")
selectThreeItems = ActionChains(self.driver) \
.key_down(Keys.CONTROL) \
.click(listItems[1]) \
.click(listItems[3]) \
.click(listItems[5]) \
.key_up(Keys.CONTROL)
selectThreeItems.perform()
self.assertEqual("#item2 #item4 #item6", reportingElement.text)
# Now click on another element, make sure that's the only one selected.
actionsBuilder = ActionChains(self.driver)
actionsBuilder.click(listItems[6]).perform()
self.assertEqual("#item7", reportingElement.text)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| {
"content_hash": "bd563938b2305759112f5b5201034482",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 91,
"avg_line_length": 39.63636363636363,
"alnum_prop": 0.6604155423637345,
"repo_name": "akiellor/selenium",
"id": "4a082a67256a138d953833524f9d6dd8cf7a7e8b",
"size": "8045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/interactions_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "22777"
},
{
"name": "C",
"bytes": "13787069"
},
{
"name": "C#",
"bytes": "1592944"
},
{
"name": "C++",
"bytes": "39839762"
},
{
"name": "Java",
"bytes": "5948691"
},
{
"name": "JavaScript",
"bytes": "15038006"
},
{
"name": "Objective-C",
"bytes": "331601"
},
{
"name": "Python",
"bytes": "544265"
},
{
"name": "Ruby",
"bytes": "557579"
},
{
"name": "Shell",
"bytes": "21701"
}
],
"symlink_target": ""
} |
"""
Tests for leveldb with plyvel as a backend.
This suite is skipped if plyvel is not installed.
"""
import pytest
from levelpy.leveldb import LevelDB
from fixtures import leveldir # noqa
@pytest.fixture(scope='module')
def backend_class_str():
return "leveldb.LevelDB"
@pytest.fixture(scope='module')
def backend_package(backend_class_str):
pkg_name = ".".join(backend_class_str.split(".")[:-1])
return pytest.importorskip(pkg_name)
@pytest.fixture(scope='module')
def backend_class(backend_package, backend_class_str):
classname = backend_class_str.split(".")[-1]
return getattr(backend_package, classname)
@pytest.fixture
def backend(backend_class, leveldir):
return backend_class(leveldir, create_if_missing=True)
def test_class(backend_class):
assert backend_class is not None
def test_constructor(leveldir, backend_class_str, backend_class):
lvl = LevelDB(leveldir, backend_class_str, create_if_missing=True)
assert isinstance(lvl, LevelDB)
assert isinstance(lvl._db, backend_class)
def test_constructor_with_class_obj(leveldir, backend_class):
lvl = LevelDB(leveldir, backend_class, create_if_missing=True)
assert isinstance(lvl, LevelDB)
assert isinstance(lvl._db, backend_class)
def test_constructor_with_premade_backend(backend):
lvl = LevelDB(backend)
assert lvl.Put == backend.Put
assert lvl.Get == backend.Get
assert lvl.Delete == backend.Delete
assert lvl.Write == backend.Write
assert lvl.RangeIter == backend.RangeIter
assert lvl.GetStats == backend.GetStats
assert lvl.CreateSnapshot == backend.CreateSnapshot
assert lvl.path is None
def test_backend_package(backend_package):
assert backend_package is not None
| {
"content_hash": "254ec116c5644eefe27c51a03beb5c86",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 27.90625,
"alnum_prop": 0.713885778275476,
"repo_name": "akubera/levelpy",
"id": "3e753493c1006f912f0af831eeb09bd5d4d6292c",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_with_leveldb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66520"
}
],
"symlink_target": ""
} |
"""Keras string lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.layers.preprocessing import table_utils
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.experimental.preprocessing.StringLookup", v1=[])
class StringLookup(index_lookup.IndexLookup):
"""Maps strings from a vocabulary to integer indices.
This layer translates a set of arbitrary strings into an integer output via a
table-based vocabulary lookup.
The vocabulary for the layer can be supplied on construction or learned via
`adapt()`. During `adapt()`, the layer will analyze a data set, determine the
frequency of individual strings tokens, and create a vocabulary from them. If
the vocabulary is capped in size, the most frequent tokens will be used to
create the vocabulary and all others will be treated as out-of-vocabulary
(OOV).
There are two possible output modes for the layer.
When `output_mode` is `"int"`,
input strings are converted to their index in the vocabulary (an integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input strings
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode` is
`"int"`, the vocabulary will begin with the mask token (if set), followed by
OOV indices, followed by the rest of the vocabulary. When `output_mode` is
`"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with OOV
indices and instances of the mask token will be dropped.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this size
includes the OOV and mask tokens. Default to None.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling the layer.
Defaults to 1.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to None,
no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to `"[UNK]"`.
vocabulary: An optional list of tokens, or a path to a text file containing
a vocabulary to load into this layer. The file should contain one token
per line. If the list or file contains the same token multiple times, an
error will be thrown.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
dense `Tensor`. Defaults to False.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = StringLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[1, 3, 4],
[4, 0, 2]])>
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by analyzing
the dataset.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
['[UNK]', 'd', 'z', 'c', 'b', 'a']
Note that the OOV token [UNK] has been added to the vocabulary. The remaining
tokens are sorted by frequency ('d', which has 2 occurrences, is first) then
by inverse sort order.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[5, 3, 1],
[1, 2, 4]])>
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV indices.
When a layer is created with more than one OOV index, any OOV values are
hashed into the number of OOV buckets, distributing OOV values in a
deterministic fashion across the set.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["m", "z", "b"]])
>>> layer = StringLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[0, 1, 3]])>
Note that the output for OOV value 'm' is 0, while the output for OOV value
'z' is 1. The in-vocab terms have their output index increased by 1 from
earlier examples (a maps to 2, etc) in order to make space for the extra OOV
value.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant(["a", "b", "c", "d", "z"])
>>> layer = StringLookup(vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)>
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = StringLookup(vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)>
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = StringLookup(vocabulary=vocab, output_mode='count')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)>
**TF-IDF output**
Configure the layer with `output_mode='tf_idf'`. As with multi_hot output, the
first `num_oov_indices` dimensions in the output represent OOV values.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be provided
along with the vocabulary. Note that the `idf_weight` for OOV values will
default to the average of all idf weights passed in.
>>> vocab = ["a", "b", "c", "d"]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = StringLookup(output_mode='tf_idf')
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
To specify the idf weights for oov values, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = ["[UNK]", "a", "b", "c", "d"]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = StringLookup(output_mode='tf_idf')
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
When adapting the layer in tf_idf mode, each input sample will be considered a
document, and idf weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to strings using this layer. (You
can also use adapt() with inverse=True, but for simplicity we'll pass the
vocab in this example.)
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
>>> layer = StringLookup(vocabulary=vocab, invert=True)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = StringLookup(vocabulary=vocab)
>>> i_layer = StringLookup(vocabulary=vocab, invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
In this example, the input value 'z' resulted in an output of '[UNK]', since
1000 was not in the vocabulary - it got represented as an OOV, and all OOV
values are returned as '[OOV}' in the inverse layer. Also, note that for the
inverse to work, you must have already set the forward layer vocabulary
either directly or via adapt() before calling get_vocabulary().
"""
def __init__(self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[UNK]",
vocabulary=None,
encoding=None,
invert=False,
output_mode=index_lookup.INT,
sparse=False,
pad_to_max_tokens=False,
**kwargs):
allowed_dtypes = [dtypes.string]
if "dtype" in kwargs and kwargs["dtype"] not in allowed_dtypes:
raise ValueError("The value of the dtype argument for StringLookup may "
"only be one of %s." % (allowed_dtypes,))
if "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
if encoding is None:
encoding = "utf-8"
self.encoding = encoding
super(StringLookup, self).__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell("StringLookup").set(True)
def get_config(self):
config = {"encoding": self.encoding}
base_config = super(StringLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_vocabulary(self, vocabulary, idf_weights=None):
if isinstance(vocabulary, str):
if self.output_mode == index_lookup.TF_IDF:
raise RuntimeError("Setting vocabulary directly from a file is not "
"supported in TF-IDF mode, since this layer cannot "
"read files containing TF-IDF weight data. Please "
"read the file using Python and set the vocabulary "
"and weights by passing lists or arrays to the "
"set_vocabulary function's `vocabulary` and "
"`idf_weights` args.")
vocabulary = table_utils.get_vocabulary_from_file(vocabulary,
self.encoding)
super().set_vocabulary(vocabulary, idf_weights=idf_weights)
# Overriden methods from IndexLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
vocabulary = vocabulary.numpy()
return np.array([compat.as_text(x, self.encoding) for x in vocabulary])
| {
"content_hash": "5598fc02efbf2b0e5d09ea564818ed35",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 80,
"avg_line_length": 43.848024316109424,
"alnum_prop": 0.6335089421877166,
"repo_name": "sarvex/tensorflow",
"id": "1a0bd1894e36718e8972dd6c444e6b6045a16237",
"size": "15115",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow/python/keras/layers/preprocessing/string_lookup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
COMPLETED = "response_completed"
DISQUALIFIED = "response_disqualified"
UPDATED = "response_updated"
# Object Types
SURVEY = "survey"
COLLECTOR = "collector"
| {
"content_hash": "85c4fd7c078b7773c07b6d34cefd4e0e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.7610062893081762,
"repo_name": "Administrate/surveymonkey",
"id": "fd0436cb97b7962bbf8cb03297c5db633653c4c8",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surveymonkey/webhooks/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "111039"
}
],
"symlink_target": ""
} |
import sys
import cv2.cv as cv
import urllib2
wndname = "Distance transform"
tbarname = "Threshold"
# The output images
dist = 0
dist8u1 = 0
dist8u2 = 0
dist8u = 0
dist32s = 0
gray = 0
edge = 0
# define a trackbar callback
def on_trackbar(edge_thresh):
cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh), cv.CV_THRESH_BINARY)
#Distance transform
cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5)
cv.ConvertScale(dist, dist, 5000.0, 0)
cv.Pow(dist, dist, 0.5)
cv.ConvertScale(dist, dist32s, 1.0, 0.5)
cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u1, 1, 0)
cv.ConvertScale(dist32s, dist32s, -1, 0)
cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u2, 1, 0)
cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u)
cv.ShowImage(wndname, dist8u)
if __name__ == "__main__":
edge_thresh = 100
if len(sys.argv) > 1:
gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'https://raw.github.com/Itseez/opencv/master/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
gray = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Create the output image
dist = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32F, 1)
dist8u1 = cv.CloneImage(gray)
dist8u2 = cv.CloneImage(gray)
dist8u = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_8U, 3)
dist32s = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32S, 1)
# Convert to grayscale
edge = cv.CloneImage(gray)
# Create a window
cv.NamedWindow(wndname, 1)
# create a toolbar
cv.CreateTrackbar(tbarname, wndname, edge_thresh, 255, on_trackbar)
# Show the image
on_trackbar(edge_thresh)
# Wait for a key stroke; the same function arranges events processing
cv.WaitKey(0)
cv.DestroyAllWindows()
| {
"content_hash": "9bf76096701d932b6121039801dc98cf",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 29.323943661971832,
"alnum_prop": 0.6729106628242075,
"repo_name": "grace-/opencv-3.0.0-cvpr",
"id": "c7a61c6dbb190a29fbe069734c6ca5d2169bbc20",
"size": "2100",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "opencv/samples/python/distrans.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7270"
},
{
"name": "C",
"bytes": "11151404"
},
{
"name": "C++",
"bytes": "26086147"
},
{
"name": "Clojure",
"bytes": "1479"
},
{
"name": "Cuda",
"bytes": "1621124"
},
{
"name": "Java",
"bytes": "797237"
},
{
"name": "JavaScript",
"bytes": "350"
},
{
"name": "Objective-C",
"bytes": "170045"
},
{
"name": "Objective-C++",
"bytes": "161912"
},
{
"name": "Python",
"bytes": "1076646"
},
{
"name": "Scala",
"bytes": "5632"
},
{
"name": "Shell",
"bytes": "10357"
},
{
"name": "TeX",
"bytes": "48565"
}
],
"symlink_target": ""
} |
"""
Adds the concept of partner lists.
"""
from lino import ad, _
class Plugin(ad.Plugin):
"See :class:`lino.core.Plugin`."
verbose_name = _("Lists")
partner_model = 'contacts.Partner'
menu_group = 'contacts'
def on_site_startup(self, site):
self.partner_model = site.models.resolve(self.partner_model)
super(Plugin, self).on_site_startup(site)
def setup_main_menu(self, site, user_type, m):
# mg = site.plugins.contacts
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('lists.Lists')
def setup_config_menu(self, site, user_type, m):
mg = self.get_menu_group()
# mg = site.plugins.contacts
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('lists.ListTypes')
def setup_explorer_menu(self, site, user_type, m):
mg = self.get_menu_group()
# mg = site.plugins.contacts
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('lists.AllMembers')
| {
"content_hash": "c251289cfc8c69232b2e20643aa70924",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 68,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.6113243761996161,
"repo_name": "lino-framework/xl",
"id": "b079f2e931cf93043bf6ef0d4dfdf624c6a88f0e",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/lists/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
} |
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils import JSONType
from sqlalchemy.orm.exc import NoResultFound
from flask_dance.utils import FakeCache, first, getattrd
from flask_dance.consumer.backend import BaseBackend
try:
from flask_login import AnonymousUserMixin
except ImportError:
AnonymousUserMixin = None
class OAuthConsumerMixin(object):
"""
A :ref:`SQLAlchemy declarative mixin <sqlalchemy:declarative_mixins>` with
some suggested columns for a model to store OAuth tokens:
``id``
an integer primary key
``provider``
a short name to indicate which OAuth provider issued
this token
``created_at``
an automatically generated datetime that indicates when
the OAuth provider issued this token
``token``
a :class:`JSON <sqlalchemy_utils.types.json.JSONType>` field to store
the actual token received from the OAuth provider
"""
@declared_attr
def __tablename__(cls):
return "flask_dance_{}".format(cls.__name__.lower())
id = Column(Integer, primary_key=True)
provider = Column(String(50))
created_at = Column(DateTime, default=datetime.utcnow)
token = Column(MutableDict.as_mutable(JSONType))
def __repr__(self):
parts = []
parts.append(self.__class__.__name__)
if self.id:
parts.append("id={}".format(self.id))
if self.provider:
parts.append('provider="{}"'.format(self.provider))
return "<{}>".format(" ".join(parts))
class SQLAlchemyBackend(BaseBackend):
"""
Stores and retrieves OAuth tokens using a relational database through
the `SQLAlchemy`_ ORM.
.. _SQLAlchemy: http://www.sqlalchemy.org/
"""
def __init__(self, model, session,
user=None, user_id=None, anon_user=None, cache=None):
"""
Args:
model: The SQLAlchemy model class that represents the OAuth token
table in the database. At a minimum, it must have a
``provider`` column and a ``token`` column. If tokens are to be
associated with individual users in the application, it must
also have a ``user`` relationship to your User model.
It is recommended, though not required, that your model class
inherit from
:class:`~flask_dance.consumer.storage.sqla.OAuthConsumerMixin`.
session:
The :class:`SQLAlchemy session <sqlalchemy.orm.session.Session>`
for the database. If you're using `Flask-SQLAlchemy`_, this is
``db.session``.
user:
If you want OAuth tokens to be associated with individual users
in your application, this is a reference to the user that you
want to use for the current request. It can be an actual User
object, a function that returns a User object, or a proxy to the
User object. If you're using `Flask-Login`_, this is
:attr:`~flask.ext.login.current_user`.
user_id:
If you want to pass an identifier for a user instead of an actual
User object, use this argument instead. Sometimes it can save
a database query or two. If both ``user`` and ``user_id`` are
provided, ``user_id`` will take precendence.
anon_user:
If anonymous users are represented by a class in your application,
provide that class here. If you are using `Flask-Login`_,
anonymous users are represented by the
:class:`flask_login.AnonymousUserMixin` class, but you don't have
to provide that -- Flask-Dance treats it as the default.
cache:
An instance of `Flask-Cache`_. Providing a caching system is
highly recommended, but not required.
.. _Flask-SQLAlchemy: http://pythonhosted.org/Flask-SQLAlchemy/
.. _Flask-Login: https://flask-login.readthedocs.org/
.. _Flask-Cache: http://pythonhosted.org/Flask-Cache/
"""
self.model = model
self.session = session
self.user = user
self.user_id = user_id
self.anon_user = anon_user or AnonymousUserMixin
self.cache = cache or FakeCache()
def make_cache_key(self, blueprint, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if not uid:
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
uid = getattr(u, "id", u)
return "flask_dance_token|{name}|{user_id}".format(
name=blueprint.name, user_id=uid,
)
def get(self, blueprint, user=None, user_id=None):
# check cache
cache_key = self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
token = self.cache.get(cache_key)
if token:
return token
# if not cached, make database queries
query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
try:
token = query.one().token
except NoResultFound:
token = None
# cache the result
self.cache.set(cache_key, token)
return token
def set(self, blueprint, token, user=None, user_id=None):
# if there was an existing model, delete it
existing_query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
# check for user ID
has_user_id = hasattr(self.model, "user_id")
if has_user_id:
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if uid:
existing_query = existing_query.filter_by(user_id=uid)
# check for user (relationship property)
has_user = hasattr(self.model, "user")
if has_user:
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
if u:
existing_query = existing_query.filter_by(user=u)
# queue up delete query -- won't be run until commit()
existing_query.delete()
# create a new model for this token
kwargs = {
"provider": blueprint.name,
"token": token,
}
if has_user_id and uid:
kwargs["user_id"] = uid
if has_user and u:
kwargs["user"] = u
self.session.add(self.model(**kwargs))
# commit to delete and add simultaneously
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id
))
def delete(self, blueprint, user=None, user_id=None):
query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
query.delete()
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id,
))
def _get_real_user(user, anon_user=None):
"""
Given a "user" that could be:
* a real user object
* a function that returns a real user object
* a LocalProxy to a real user object (like Flask-Login's ``current_user``)
This function returns the real user object, regardless of which we have.
"""
if hasattr(user, "_get_current_object"):
# this is a proxy
user = user._get_current_object()
if callable(user):
# this is a function
user = user()
if anon_user and isinstance(user, anon_user):
return None
return user
| {
"content_hash": "712a1d6d86501dee9cc3276bd9d3d2fa",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 88,
"avg_line_length": 40.03404255319149,
"alnum_prop": 0.594281462585034,
"repo_name": "nickdirienzo/flask-dance",
"id": "34b1c0873ea78070eccad0f680732905a11cdb80",
"size": "9408",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_dance/consumer/backend/sqla.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181560"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from datetime import datetime, timedelta
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
import django_filters
from rest_framework.views import APIView
from rest_framework.response import Response
from remo.api.views import BaseReadOnlyModelViewset
from remo.api.serializers import BaseKPISerializer
from remo.base.utils import get_quarter
from remo.reports.api.serializers import (ActivitiesDetailedSerializer,
ActivitiesSerializer)
from remo.reports.models import NGReport
KPI_WEEKS = 12
class ActivitiesFilter(django_filters.FilterSet):
user = django_filters.CharFilter(name='user__userprofile')
activity = django_filters.CharFilter(name='activity__name')
initiative = django_filters.CharFilter(name='campaign__name')
mentor = django_filters.CharFilter(name='mentor__userprofile')
category = django_filters.CharFilter(name='functional_areas__name')
class Meta:
model = NGReport
fields = ('is_passive', 'event', 'activity_description', 'report_date',
'location', 'longitude', 'latitude', 'link',
'link_description')
class ActivitiesViewSet(BaseReadOnlyModelViewset):
"""Return a list of activities."""
serializer_class = ActivitiesSerializer
model = NGReport
queryset = NGReport.objects.all()
filter_class = ActivitiesFilter
def retrieve(self, request, pk):
report = get_object_or_404(self.get_queryset(), pk=pk)
serializer = ActivitiesDetailedSerializer(report,
context={'request': request})
return Response(serializer.data)
def get_queryset(self):
orderby = self.request.query_params.get('orderby', 'DESC')
if orderby == 'ASC':
self.queryset = self.queryset.order_by('report_date')
return self.queryset
class ActivitiesKPIFilter(django_filters.FilterSet):
"""Filter for activities KPI endpoint."""
category = django_filters.CharFilter(name='functional_areas__name')
initiative = django_filters.CharFilter(name='campaign__name')
class Meta:
model = NGReport
fields = ['country', 'category', 'initiative']
class ActivitiesKPIView(APIView):
def get(self, request):
"""Returns serialized data for Activities KPI"""
qs = NGReport.objects.filter(report_date__lte=now())
activities = ActivitiesKPIFilter(request.query_params, queryset=qs)
weeks = int(request.query_params.get('weeks', KPI_WEEKS))
# Total number of activities to day
total = activities.qs.count()
# Quarter calculations
current_quarter_start = get_quarter()[1]
# Total number of activities for current quarter
quarter_total = activities.qs.filter(
report_date__gte=current_quarter_start).count()
# Total number of activities for the previous quarter
previous_quarter_end = current_quarter_start - timedelta(days=1)
previous_quarter_start = get_quarter(previous_quarter_end)[1]
previous_quarter_total = activities.qs.filter(
report_date__range=[previous_quarter_start,
previous_quarter_end]).count()
diff = quarter_total - previous_quarter_total
try:
# Percentage change of activities since start of quarter
percent_quarter = diff / float(previous_quarter_total)
except ZeroDivisionError:
if diff > 0:
percent_quarter = 100
else:
percent_quarter = 0
# Week calculations
today = datetime.combine(now().date(), datetime.min.time())
current_week_start = today - timedelta(days=now().weekday())
prev_week_start = current_week_start - timedelta(weeks=1)
# Total number of activities this week
week_total = activities.qs.filter(
report_date__gte=current_week_start).count()
query_range = [prev_week_start, current_week_start]
# Total number of activities for previous week
prev_week_total = activities.qs.filter(
report_date__range=query_range).count()
diff = week_total - prev_week_total
try:
# Percentage change of activities compared with previous week
percent_week = diff / float(prev_week_total)
except ZeroDivisionError:
if diff > 0:
percent_week = 100
else:
percent_week = 0
weekly_count = []
for i in range(weeks):
start = current_week_start - timedelta(weeks=i)
end = start + timedelta(weeks=1)
# Total number of activities (per week) for previous weeks
count = activities.qs.filter(
report_date__range=[start, end]).count()
weekly_count.append({'week': weeks - i, 'activities': count})
kwargs = {
'total': total,
'quarter_total': quarter_total,
'quarter_growth_percentage': percent_quarter * 100,
'week_total': week_total,
'week_growth_percentage': percent_week * 100,
'total_per_week': weekly_count
}
kpi = namedtuple('ActivitiesKPI', kwargs.keys())(*kwargs.values())
serializer = BaseKPISerializer(kpi)
return Response(serializer.data)
| {
"content_hash": "87523e5793ee83020b523304ffc2d4df",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 36.76510067114094,
"alnum_prop": 0.6341730558598029,
"repo_name": "akatsoulas/remo",
"id": "19a0f59168fbc4a50c8fe88d567d691ea05c10f5",
"size": "5478",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/reports/api/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "235676"
},
{
"name": "HTML",
"bytes": "340694"
},
{
"name": "JavaScript",
"bytes": "288997"
},
{
"name": "Python",
"bytes": "763700"
},
{
"name": "Shell",
"bytes": "648"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
} |
"""
Ganglia module for getting latest instance count
"""
import os
import time
import threading
import traceback
import sys
from nova import flags
from nova import db
from nova import context
from nova import log as logging
from nova import utils
from nova import version
from nova.compute import manager as compute_manager
__worker__ = None
__lock__ = threading.Lock()
FLAGS = flags.FLAGS
args = ['compute-metrics']
utils.default_flagfile(args=args)
print args
flags.FLAGS(args)
print FLAGS.sql_connection
class UpdateComputeNodeStatusThread(threading.Thread):
"""Updates compute node status."""
def __init__(self, params):
print 'starting init'
threading.Thread.__init__(self)
self.manager = compute_manager.ComputeManager()
self.running = False
self.shuttingdown = False
self.refresh_rate = int(params['refresh_rate'])
self.status = {}
self._update_hypervisor()
print 'finished init'
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
__lock__.acquire()
self.update_status()
__lock__.release()
time.sleep(self.refresh_rate)
self.running = False
def update_status(self):
print 'starting update'
for updater in (self._update_count, self._update_status):
try:
print 'updating using %s' % updater
updater()
except:
traceback.print_exc()
print 'end update: %s' % self.status
def status_of(self, name):
val = None
if name in self.status:
__lock__.acquire()
val = self.status[name]
__lock__.release()
return val
def _update_count(self):
print 'updating instances'
self.status['nova_compute_instance_count'] = \
len(self.manager.driver.list_instances())
def _update_status(self):
ctxt = context.get_admin_context()
services = db.service_get_all_by_host(ctxt, FLAGS.host)
up_count = 0
compute_alive = False
for svc in services:
now = utils.utcnow()
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
compute_alive = compute_alive or svc['topic'] == 'compute'
up_count += alive
self.status['nova_registered_services'] = len(services)
self.status['nova_compute_is_running'] = compute_alive and 'OK' or 'NO'
self.status['nova_running_services'] = up_count
def _update_hypervisor(self):
status = type(self.manager.driver).__name__
try:
hyperv = self.manager.driver.get_hypervisor_type()
status += ' with %s' % (hyperv)
except:
pass
self.status['nova_compute_driver'] = status
def version_handler(name):
return version.canonical_version_string()
def hypervisor_getter(worker):
global _hypervisor_name
return _hypervisor_name
def metric_init(params):
global __worker__
if not 'refresh_rate' in params:
params['refresh_rate'] = 60
__worker__ = UpdateComputeNodeStatusThread(params)
__worker__.start()
status_of = __worker__.status_of
instances = {'name': 'nova_compute_instance_count',
'call_back': status_of,
'time_max': 90,
'value_type': 'uint',
'units': '',
'slope': 'both',
'format': '%d',
'description': 'Openstack Instance Count',
'groups': 'openstack-compute'}
version = {'name': 'openstack_version',
'call_back': version_handler,
'time_max': 90,
'value_type': 'string',
'units': '',
'slope': 'zero',
'format': '%s',
'description': 'Openstack Version',
'groups': 'openstack-compute'}
compute = {'name': 'nova_compute_is_running',
'call_back': status_of,
'time_max': 90,
'value_type': 'string',
'units': '',
'slope': 'zero',
'format': '%s',
'description': 'Openstack Nova compute is running',
'groups': 'openstack-compute'}
hypervisor = {'name': 'nova_compute_driver',
'call_back': status_of,
'time_max': 90,
'value_type': 'string',
'units': '',
'slope': 'zero',
'format': '%s',
'description': 'Openstack Nova compute driver',
'groups': 'openstack-compute'}
run_services = {'name': 'nova_running_services',
'call_back': status_of,
'time_max': 90,
'value_type': 'uint',
'units': '',
'slope': 'both',
'format': '%d',
'description': 'Openstack Nova running services',
'groups': 'openstack-compute'}
reg_services = {'name': 'nova_registered_services',
'call_back': status_of,
'time_max': 90,
'value_type': 'uint',
'units': '',
'slope': 'both',
'format': '%d',
'description': 'Openstacl Nova Registered services',
'groups': 'openstack-compute'}
return [instances, version, compute, hypervisor,
run_services, reg_services]
def metric_cleanup():
"""Clean up the metric module."""
__worker__.shutdown()
if __name__ == '__main__':
try:
metric_init({})
k = 'c_instance_count'
v = status_of(k)
print 'value for %s is %u' % (k, v)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
finally:
metric_cleanup()
| {
"content_hash": "8370fce4502cb279585c31f7bc97c778",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 29.41346153846154,
"alnum_prop": 0.5232101994115724,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "2f20a18e4ebaf0322f77dafd1cef96c7ec584530",
"size": "6924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/compute-metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
''' Base class for preprocessing of RC files.
'''
from __future__ import print_function
class PreProcessor(object):
''' Base class for preprocessing of the RC file data before being
output through the RC2GRD tool. You should implement this class if
you have specific constructs in your RC files that GRIT cannot handle.'''
def Process(self, rctext, rcpath):
''' Processes the data in rctext.
Args:
rctext: string containing the contents of the RC file being processed
rcpath: the path used to access the file.
Return:
The processed text.
'''
raise NotImplementedError()
| {
"content_hash": "43ad55b4bf44d22e6f760a9289fe4f21",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7085346215780999,
"repo_name": "nwjs/chromium.src",
"id": "494fe7921f561dedd0f6c3b8fdcb659b17f36042",
"size": "762",
"binary": false,
"copies": "8",
"ref": "refs/heads/nw70",
"path": "tools/grit/grit/tool/preprocess_interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.