repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aptivate/econsensus | deploy/bootstrap.py | 5 | 2581 | #!/usr/bin/env python
"""
bootstrap.py will set up a virtualenv for you and update it as required.
Usage:
bootstrap.py # update virtualenv
bootstrap.py fake # just update the virtualenv timestamps
bootstrap.py clean # delete the virtualenv
bootstrap.py -h | --help # print this message and exit
Options for the plain command:
-f, --force # do the virtualenv update even if it is up to date
-r, --full-rebuild # delete the virtualenv before rebuilding
-q, --quiet # don't ask for user input
"""
# a script to set up the virtualenv so we can use fabric and tasks
import sys
import getopt
import ve_mgr
def print_help_text():
print __doc__
def print_error_msg(error_msg):
print error_msg
print_help_text()
return 2
def main(argv):
# check python version is high enough
ve_mgr.check_python_version(2, 6, __file__)
force_update = False
full_rebuild = False
fake_update = False
clean_ve = False
if argv:
try:
opts, args = getopt.getopt(argv[1:], 'hfqr',
['help', 'force', 'quiet', 'full-rebuild'])
except getopt.error, msg:
return print_error_msg('Bad options: %s' % msg)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print_help_text()
return 0
if o in ("-f", "--force"):
force_update = True
if o in ("-r", "--full-rebuild"):
full_rebuild = True
if len(args) > 1:
return print_error_msg(
"Can only have one argument - you had %s" % (' '.join(args)))
if len(args) == 1:
if args[0] == 'fake':
fake_update = True
elif args[0] == 'clean':
clean_ve = True
# check for incompatible flags
if force_update and fake_update:
return print_error_msg("Cannot use --force with fake")
if full_rebuild and fake_update:
return print_error_msg("Cannot use --full-rebuild with fake")
if full_rebuild and clean_ve:
return print_error_msg("Cannot use --full-rebuild with clean")
updater = ve_mgr.UpdateVE()
if fake_update:
return updater.update_ve_timestamp()
elif clean_ve:
return updater.delete_virtualenv()
else:
updater.update_git_submodule()
return updater.update_ve(full_rebuild, force_update)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-3.0 | 2,062,151,340,668,008,200 | 30.096386 | 81 | 0.563735 | false |
tomspur/shedskin | examples/sat.py | 6 | 16409 | """
Internal implementation of a SAT solver, used by L{solver.SATSolver}.
This is not part of the public API.
"""
from __future__ import print_function
# Copyright (C) 2010, Thomas Leonard
# See the README file for details, or visit http://0install.net.
# The design of this solver is very heavily based on the one described in
# the MiniSat paper "An Extensible SAT-solver [extended version 1.2]"
# http://minisat.se/Papers.html
#
# The main differences are:
#
# - We care about which solution we find (not just "satisfiable" or "not").
# - We take care to be deterministic (always select the same versions given
# the same input). We do not do random restarts, etc.
# - We add an AtMostOneClause (the paper suggests this in the Excercises, and
# it's very useful for our purposes).
#
# modified by [email protected] to work with shedskin
TRUE, FALSE, NONE = 1, 0, -1
DEBUG=False
def debug(msg):
print("SAT:", msg)
# variables are numbered from 0
# literals have the same number as the corresponding variable,
# except they for negatives they are (-1-v):
#
# Variable Literal not(Literal)
# 0 0 -1
# 1 1 -2
def neg(lit):
return -1 - lit
def watch_index(lit):
if lit >= 0:
return lit * 2
return neg(lit) * 2 + 1
class UnionClause:
def __init__(self, lits, solver):
self.lits = lits
self.solver = solver
# Try to infer new facts.
# We can do this only when all of our literals are False except one,
# which is undecided. That is,
# False... or X or False... = True => X = True
#
# To get notified when this happens, we tell the solver to
# watch two of our undecided literals. Watching two undecided
# literals is sufficient. When one changes we check the state
# again. If we still have two or more undecided then we switch
# to watching them, otherwise we propagate.
#
# Returns False on conflict.
def propagate(self, lit):
# value[get(lit)] has just become False
#debug("%s: noticed %s has become False" % (self, solver.name_lit(neg(lit))))
# For simplicity, only handle the case where self.lits[1]
# is the one that just got set to False, so that:
# - value[lits[0]] = None | True
# - value[lits[1]] = False
# If it's the other way around, just swap them before we start.
if self.lits[0] == neg(lit):
self.lits[0], self.lits[1] = self.lits[1], self.lits[0]
if self.solver.lit_value(self.lits[0]) == TRUE:
# We're already satisfied. Do nothing.
self.solver.watch_lit(lit, self)
return True
assert self.solver.lit_value(self.lits[1]) == FALSE
# Find a new literal to watch now that lits[1] is resolved,
# swap it with lits[1], and start watching it.
for i in range(2, len(self.lits)):
value = self.solver.lit_value(self.lits[i])
if value != FALSE:
# Could be None or True. If it's True then we've already done our job,
# so this means we don't get notified unless we backtrack, which is fine.
self.lits[1], self.lits[i] = self.lits[i], self.lits[1]
self.solver.watch_lit(neg(self.lits[1]), self)
return True
# Only lits[0], is now undefined.
self.solver.watch_lit(lit, self)
return self.solver.enqueue(self.lits[0], self)
def undo(self, lit): pass
# Why is lit True?
# Or, why are we causing a conflict (if lit is None)?
def cacl_reason(self, lit):
assert lit is self.lits[0]
# The cause is everything except lit.
return [neg(l) for l in self.lits if l is not lit]
def cacl_reason2(self):
return [neg(l) for l in self.lits]
def __repr__(self):
return "<some: %s>" % (', '.join(self.solver.name_lits(self.lits)))
# Using an array of VarInfo objects is less efficient than using multiple arrays, but
# easier for me to understand.
class VarInfo(object):
__slots__ = ['value', 'reason', 'reason_txt', 'level', 'undo', 'obj']
def __init__(self, obj):
self.value = NONE # True/False/None
self.reason = None # The constraint that implied our value, if True or False
self.reason_txt = None # The constraint that implied our value, if True or False
self.level = -1 # The decision level at which we got a value (when not None)
self.undo = [] # Constraints to update if we become unbound (by backtracking)
self.obj = obj # The object this corresponds to (for our caller and for debugging)
def __repr__(self):
return '%s=%s' % (self.name, {NONE: 'None', TRUE: 'True', FALSE: 'False'}[self.value])
@property
def name(self):
return str(self.obj)
class SATProblem(object):
def __init__(self):
# Propagation
self.watches = [] # watches[2i,2i+1] = constraints to check when literal[i] becomes True/False
self.propQ = [] # propagation queue
# Assignments
self.assigns = [] # [VarInfo]
self.trail = [] # order of assignments
self.trail_lim = [] # decision levels
self.toplevel_conflict = False
def get_decision_level(self):
return len(self.trail_lim)
def add_variable(self, obj):
if DEBUG: debug("add_variable('%s')" % obj)
index = len(self.assigns)
self.watches += [[], []] # Add watch lists for X and not(X)
self.assigns.append(VarInfo(obj))
return index
# lit is now True
# reason is the clause that is asserting this
# Returns False if this immediately causes a conflict.
def enqueue(self, lit, reason=None, reason_txt=None):
if reason:
if DEBUG: debug("%s => %s" % (reason, self.name_lit(lit)))
else:
if DEBUG: debug("%s => %s" % (reason_txt, self.name_lit(lit)))
old_value = self.lit_value(lit)
if old_value != NONE:
if old_value == FALSE:
# Conflict
return False
else:
# Already set (shouldn't happen)
return True
if lit < 0:
var_info = self.assigns[neg(lit)]
var_info.value = FALSE
else:
var_info = self.assigns[lit]
var_info.value = TRUE
var_info.level = self.get_decision_level()
var_info.reason = reason
var_info.reason_txt = reason_txt
self.trail.append(lit)
self.propQ.append(lit)
return True
# Pop most recent assignment from self.trail
def undo_one(self):
lit = self.trail[-1]
if DEBUG: debug("(pop %s)" % self.name_lit(lit))
var_info = self.get_varinfo_for_lit(lit)
var_info.value = NONE
var_info.reason = None
var_info.reason_txt = None
var_info.level = -1
self.trail.pop()
# while var_info.undo:
# var_info.undo.pop().undo(lit)
def cancel(self):
n_this_level = len(self.trail) - self.trail_lim[-1]
if DEBUG: debug("backtracking from level %d (%d assignments)" %
(self.get_decision_level(), n_this_level))
while n_this_level != 0:
self.undo_one()
n_this_level -= 1
self.trail_lim.pop()
def cancel_until(self, level):
while self.get_decision_level() > level:
self.cancel()
# Process the propQ.
# Returns None when done, or the clause that caused a conflict.
def propagate(self):
#debug("propagate: queue length = %d", len(self.propQ))
while self.propQ:
lit = self.propQ[0]
del self.propQ[0]
wi = watch_index(lit)
watches = self.watches[wi]
self.watches[wi] = []
if DEBUG: debug("%s -> True : watches: %s" % (self.name_lit(lit), watches))
# Notifiy all watchers
for i in range(len(watches)):
clause = watches[i]
if not clause.propagate(lit):
# Conflict
# Re-add remaining watches
self.watches[wi] += watches[i+1:]
# No point processing the rest of the queue as
# we'll have to backtrack now.
self.propQ = []
return clause
return None
def impossible(self):
self.toplevel_conflict = True
def get_varinfo_for_lit(self, lit):
if lit >= 0:
return self.assigns[lit]
else:
return self.assigns[neg(lit)]
def lit_value(self, lit):
if lit >= 0:
value = self.assigns[lit].value
return value
else:
v = -1 - lit
value = self.assigns[v].value
if value == TRUE:
return FALSE
elif value == FALSE:
return TRUE
else:
return NONE
# Call cb when lit becomes True
def watch_lit(self, lit, cb):
#debug("%s is watching for %s to become True" % (cb, self.name_lit(lit)))
self.watches[watch_index(lit)].append(cb)
# Returns the new clause if one was added, True if none was added
# because this clause is trivially True, or False if the clause is
# False.
def _add_clause(self, lits, learnt):
clause = UnionClause(lits, self)
if learnt:
# lits[0] is None because we just backtracked.
# Start watching the next literal that we will
# backtrack over.
best_level = -1
best_i = 1
for i in range(1, len(lits)):
level = self.get_varinfo_for_lit(lits[i]).level
if level > best_level:
best_level = level
best_i = i
lits[1], lits[best_i] = lits[best_i], lits[1]
# Watch the first two literals in the clause (both must be
# undefined at this point).
for lit in lits[:2]:
self.watch_lit(neg(lit), clause)
return clause
def name_lits(self, lst):
return [self.name_lit(l) for l in lst]
# For nicer debug messages
def name_lit(self, lit):
if lit >= 0:
return self.assigns[lit].name
return "not(%s)" % self.assigns[neg(lit)].name
def add_clause(self, lits):
# Public interface. Only used before the solve starts.
assert lits
if DEBUG: debug("add_clause([%s])" % ', '.join(self.name_lits(lits)))
self._add_clause(lits, learnt = False)
def analyse(self, cause):
# After trying some assignments, we've discovered a conflict.
# e.g.
# - we selected A then B then C
# - from A, B, C we got X, Y
# - we have a rule: not(A) or not(X) or not(Y)
#
# The simplest thing to do would be:
# 1. add the rule "not(A) or not(B) or not(C)"
# 2. unassign C
#
# Then we we'd deduce not(C) and we could try something else.
# However, that would be inefficient. We want to learn a more
# general rule that will help us with the rest of the problem.
#
# We take the clause that caused the conflict ("cause") and
# ask it for its cause. In this case:
#
# A and X and Y => conflict
#
# Since X and Y followed logically from A, B, C there's no
# point learning this rule; we need to know to avoid A, B, C
# *before* choosing C. We ask the two variables deduced at the
# current level (X and Y) what caused them, and work backwards.
# e.g.
#
# X: A and C => X
# Y: C => Y
#
# Combining these, we get the cause of the conflict in terms of
# things we knew before the current decision level:
#
# A and X and Y => conflict
# A and (A and C) and (C) => conflict
# A and C => conflict
#
# We can then learn (record) the more general rule:
#
# not(A) or not(C)
#
# Then, in future, whenever A is selected we can remove C and
# everything that depends on it from consideration.
learnt = [0] # The general rule we're learning
btlevel = 0 # The deepest decision in learnt
p = 0 # The literal we want to expand now
first = True
seen = set() # The variables involved in the conflict
counter = 0
while True:
# cause is the reason why p is True (i.e. it enqueued it).
# The first time, p is None, which requests the reason
# why it is conflicting.
if first:
if DEBUG: debug("Why did %s make us fail?" % cause)
p_reason = cause.cacl_reason2()
if DEBUG: debug("Because: %s => conflict" % (' and '.join(self.name_lits(p_reason))))
else:
if DEBUG: debug("Why did %s lead to %s?" % (cause, self.name_lit(p)))
p_reason = cause.cacl_reason(p)
if DEBUG: debug("Because: %s => %s" % (' and '.join(self.name_lits(p_reason)), self.name_lit(p)))
# p_reason is in the form (A and B and ...)
# p_reason => p
# Check each of the variables in p_reason that we haven't
# already considered:
# - if the variable was assigned at the current level,
# mark it for expansion
# - otherwise, add it to learnt
for lit in p_reason:
var_info = self.get_varinfo_for_lit(lit)
if var_info not in seen:
seen.add(var_info)
if var_info.level == self.get_decision_level():
# We deduced this var since the last decision.
# It must be in self.trail, so we'll get to it
# soon. Remember not to stop until we've processed it.
counter += 1
elif var_info.level > 0:
# We won't expand lit, just remember it.
# (we could expand it if it's not a decision, but
# apparently not doing so is useful)
learnt.append(neg(lit))
btlevel = max(btlevel, var_info.level)
# else we already considered the cause of this assignment
# At this point, counter is the number of assigned
# variables in self.trail at the current decision level that
# we've seen. That is, the number left to process. Pop
# the next one off self.trail (as well as any unrelated
# variables before it; everything up to the previous
# decision has to go anyway).
# On the first time round the loop, we must find the
# conflict depends on at least one assignment at the
# current level. Otherwise, simply setting the decision
# variable caused a clause to conflict, in which case
# the clause should have asserted not(decision-variable)
# before we ever made the decision.
# On later times round the loop, counter was already >
# 0 before we started iterating over p_reason.
assert counter > 0
while True:
p = self.trail[-1]
first = False
var_info = self.get_varinfo_for_lit(p)
cause = var_info.reason
self.undo_one()
if var_info in seen:
break
if DEBUG: debug("(irrelevant)")
counter -= 1
if counter <= 0:
assert counter == 0
# If counter = 0 then we still have one more
# literal (p) at the current level that we
# could expand. However, apparently it's best
# to leave this unprocessed (says the minisat
# paper).
break
# p is the literal we decided to stop processing on. It's either
# a derived variable at the current level, or the decision that
# led to this level. Since we're not going to expand it, add it
# directly to the learnt clause.
learnt[0] = neg(p)
if DEBUG: debug("Learnt: %s" % (' or '.join(self.name_lits(learnt))))
return learnt, btlevel
def solve(self):
# Check whether we detected a trivial problem
# during setup.
if self.toplevel_conflict:
if DEBUG: debug("FAIL: toplevel_conflict before starting solve!")
return False
while True:
# Use logical deduction to simplify the clauses
# and assign literals where there is only one possibility.
conflicting_clause = self.propagate()
if not conflicting_clause:
if DEBUG: debug("new state: %s" % self.assigns)
if all(info.value != NONE for info in self.assigns):
# Everything is assigned without conflicts
if DEBUG: debug("SUCCESS!")
return True
else:
# Pick a variable and try assigning it one way.
# If it leads to a conflict, we'll backtrack and
# try it the other way.
for lit, assign in enumerate(self.assigns):
if assign.value == NONE:
break
#print "TRYING:", self.name_lit(lit)
assert self.lit_value(lit) == NONE
self.trail_lim.append(len(self.trail))
r = self.enqueue(lit, reason_txt = "considering")
assert r is True
else:
if self.get_decision_level() == 0:
if DEBUG: debug("FAIL: conflict found at top level")
return False
else:
# Figure out the root cause of this failure.
learnt, backtrack_level = self.analyse(conflicting_clause)
self.cancel_until(backtrack_level)
if len(learnt) == 1:
# A clause with only a single literal is represented
# as an assignment rather than as a clause.
reason = "learnt"
self.enqueue(learnt[0], reason_txt=reason)
else:
c = self._add_clause(learnt, learnt = True)
# Everything except the first literal in learnt is known to
# be False, so the first must be True.
e = self.enqueue(learnt[0], c)
assert e is True
def main():
cnf = [l.strip().split() for l in file('testdata/hole007.cnf') if l[0] not in 'c%0\n']
clauses = [[int(x) for x in m[:-1]] for m in cnf if m[0] != 'p']
nrofvars = [int(n[2]) for n in cnf if n[0] == 'p'][0]
p = SATProblem()
for i in range(nrofvars):
p.add_variable(i)
for cl in clauses:
p.add_clause([i-1 if i > 0 else neg(-i-1) for i in cl])
print(p.solve())
if __name__ == '__main__':
main()
| gpl-3.0 | -1,293,952,344,537,491,000 | 30.555769 | 101 | 0.647389 | false |
rafaelvieiras/PseudoTV_Live | plugin.video.pseudotv.live/resources/lib/overlay.py | 1 | 5261 | # Copyright (C) 2020 Lunatixz
# This file is part of PseudoTV Live.
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from resources.lib.globals import *
class GUI(xbmcgui.WindowXML):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXML.__init__(self, *args, **kwargs)
self.myPlayer = MY_PLAYER
self.pvritem = {}
self.listitems = []
self.maxDays = getSettingInt('Max_Days')
self.bugToggleThread = threading.Timer(CHANNELBUG_CHECK_TIME, self.bugToggle)
# self.onNextThread = threading.Timer(NOTIFICATION_CHECK_TIME, self.checkOnNext)
def log(self, msg, level=xbmc.LOGDEBUG):
return log('%s: %s'%(self.__class__.__name__,msg),level)
def onInit(self):
self.log('onInit')
self.listitems = []
self.nowitem = []
self.nextitems = []
self.pvritem = getCurrentChannelItem()
self.container = self.getControl(40000)
self.container.reset()
self.channelbug = self.getControl(41004)
self.channelbug.setImage(self.pvritem.get('icon',LOGO))
if self.load():
self.bugToggle()
# self.checkOnNext()
else: self.closeOverlay()
def reset(self):
self.log('reset')
self.onInit()
def load(self):
self.log('load')
try:
if not self.pvritem or not isPseudoTV(): return False
ruleslist = []#check overlay channel rules.
self.nowitem = self.pvritem.get('broadcastnow',{}) # current item
self.nextitems = self.pvritem.get('broadcastnext',[])[slice(0, PAGE_LIMIT)] # list of upcoming items, truncate for speed.
self.listitems.append(buildItemListItem(loadJSON(self.nowitem.get('writer',{}))))
self.listitems.extend([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in self.nextitems])
self.container.addItems(self.listitems)
return True
except Exception as e:
self.log("load, Failed! " + str(e), xbmc.LOGERROR)
return False
def bugToggle(self, state=True):
self.log('bugToggle, state = %s'%(state))
try:
if self.bugToggleThread.isAlive():
self.bugToggleThread.cancel()
self.channelbug.setVisible(state)
wait = {True:float(random.randint(10,30)),False:float(random.randint(900,1800))}[state]
nstate = not bool(state)
self.bugToggleThread = threading.Timer(wait, self.bugToggle, [nstate])
self.bugToggleThread.name = "bugToggleThread"
self.bugToggleThread.start()
except Exception as e: self.log("bugToggle, Failed! " + str(e), xbmc.LOGERROR)
# def getCurrentPosition(self):
# self.log('getCurrentPosition')
# for idx, listitem in enumerate(self.nextitems): #todo rework to include start times?
# if listitem.getVideoInfoTag().getTitle() == self.myPlayer.getVideoInfoTag().getTitle():
# return idx
def checkOnNext(self):
self.log('checkOnNext')
# if self.onNextThread.isAlive():
# self.onNextThread.cancel()
# pos = self.getCurrentPosition()
# print(self.pvritem)
# print(pos,self.listitems[pos])
# print(self.listitems[pos].getPath())
# print(self.listitems[pos].getVideoInfoTag().getDuration())
# print(self.listitems[pos].getVideoInfoTag().getTitle())
# print(self.listitems[pos].getProperty('duration'))
# self.onNextThread = threading.Timer(NOTIFICATION_CHECK_TIME, self.checkOnNext)
# self.onNextThread.name = "onNextThread"
# self.onNextThread.start()
# timedif = self.listitems[self.getCurrentPosition()].getProperty('runtime') - self.myPlayer.getTime()
# def onNextToggle(self, state=True):
# self.log('onNextToggle, state = %s'%(state))
# # if self.notificationShowedNotif == False and timedif < NOTIFICATION_TIME_BEFORE_END and timedif > NOTIFICATION_DISPLAY_TIME:
# # nextshow =
def closeOverlay(self):
self.log('closeOverlay')
if self.bugToggleThread.isAlive():
self.bugToggleThread.cancel()
# if self.onNextThread.isAlive():
# self.onNextThread.cancel()
self.close()
def onAction(self, act):
self.closeOverlay()
def onClick(self, controlId):
self.closeOverlay() | gpl-3.0 | 8,106,963,545,393,538,000 | 37.408759 | 136 | 0.611101 | false |
OpenPaymentPlatform/python | opp/utils.py | 1 | 14760 | # coding=utf-8
__author__ = 'PAY.ON'
# Successful result codes
SUCCESSFUL = ("000.000.000",
"000.100.110",
"000.100.111",
"000.100.112",
"000.300.000",
"000.600.000",
"000.400.000",
"000.400.010",
"000.400.020",
"000.400.030",
"000.400.040",
"000.400.050",
"000.400.060",
"000.400.070",
"000.400.080",
"000.400.090",
"000.400.100",
"000.200.000",
"000.100.200",
"000.100.201",
"000.100.202",
"000.100.203",
"000.100.204",
"000.100.205",
"000.100.206",
"000.100.207",
"000.100.208",
"000.100.209",
"000.100.210",
"000.100.220",
"000.100.221",
"000.100.222",
"000.100.223",
"000.100.224",
"000.100.225",
"000.100.226",
"000.100.227",
"000.100.228",
"000.100.229",
"000.100.230",
"000.100.299")
# Rejected result codes
REJECTED = ("000.400.101",
"000.400.102",
"000.400.103",
"000.400.104",
"000.400.105",
"000.400.107",
"000.400.108",
"000.400.106",
"000.400.200")
# External Bank result codes
EXTERNAL_BANK = ("800.400.500",
"800.100.100",
"800.100.150",
"800.100.151",
"800.100.152",
"800.100.153",
"800.100.154",
"800.100.155",
"800.100.156",
"800.100.157",
"800.100.158",
"800.100.159",
"800.100.160",
"800.100.161",
"800.100.162",
"800.100.163",
"800.100.164",
"800.100.165",
"800.100.166",
"800.100.167",
"800.100.168",
"800.100.169",
"800.100.170",
"800.100.171",
"800.100.172",
"800.100.173",
"800.100.174",
"800.100.175",
"800.100.176",
"800.100.177",
"800.100.190",
"800.100.191",
"800.100.192",
"800.100.195",
"800.100.196",
"800.100.197",
"800.100.198",
"800.100.500",
"800.100.501",
"800.700.101",
"800.700.201",
"800.700.500",
"800.800.102",
"800.800.202",
"800.800.302",
"800.900.101",
"800.900.200",
"800.900.401",
"800.900.450",
"800.100.402",
"800.500.110",
"900.100.100",
"900.100.200",
"900.100.201",
"900.100.202",
"900.100.300",
"900.100.400",
"900.100.500",
"900.200.100",
"900.300.600",
"900.400.100",
"900.100.600",
"999.999.999",
"800.500.100",
"800.600.100",
"800.700.100",
"600.100.100",
"800.800.800",
"800.800.801",
"800.900.100",
"100.397.101",
"100.396.101",
"100.396.102",
"100.396.103",
"100.396.104",
"100.396.106",
"100.396.201",
"100.395.501",
"100.395.502",
"100.395.101",
"100.395.102")
# Risk result codes
RISK = ("100.400.500",
"100.400.080",
"100.400.081",
"100.400.083",
"100.400.084",
"100.400.085",
"100.400.086",
"100.400.087",
"100.400.091",
"100.400.100",
"100.400.300",
"100.400.301",
"100.400.302",
"100.400.303",
"100.400.304",
"100.400.305",
"100.400.306",
"100.400.307",
"100.400.308",
"100.400.309",
"100.400.310",
"800.400.100",
"800.400.101",
"800.400.102",
"800.400.103",
"800.400.104",
"800.400.105",
"800.400.110",
"800.400.200",
"100.370.100",
"100.370.110",
"100.370.111",
"100.380.100",
"100.380.110",
"100.400.000",
"100.400.001",
"100.400.002",
"100.400.005",
"100.400.007",
"100.400.020",
"100.400.021",
"100.400.030",
"100.400.039",
"100.400.040",
"100.400.041",
"100.400.042",
"100.400.043",
"100.400.044",
"100.400.045",
"100.400.051",
"100.400.060",
"100.400.061",
"100.400.063",
"100.400.064",
"100.400.065",
"100.400.071",
"100.400.141",
"100.400.142",
"100.400.143",
"100.400.144",
"100.400.145",
"100.400.146",
"100.400.147",
"100.400.148",
"100.400.149",
"100.400.150",
"100.400.151",
"100.400.241",
"100.400.242",
"100.400.243",
"100.400.120",
"100.400.121",
"100.400.122",
"100.400.123",
"100.400.130",
"100.400.139",
"100.400.140",
"100.400.260",
"100,400,319",
"100.380.501",
"100.380.401",
"100.390.101",
"100.390.102",
"100.390.103",
"100.390.104",
"100.390.105",
"100.390.106",
"100.390.107",
"100.390.108",
"100.390.109",
"100.390.110",
"100.390.111",
"100.390.112",
"100.390.113",
"100.100.701",
"800.300.101",
"800.300.102",
"800.300.200",
"800.300.301",
"800.300.302",
"800.300.401",
"800.300.500",
"800.300.501",
"800.200.159",
"800.200.160",
"800.200.165",
"800.200.202",
"800.200.208",
"800.200.220",
"800.110.100",
"800.120.100",
"800.120.101",
"800.120.102",
"800.120.103",
"800.120.200",
"800.120.201",
"800.120.202",
"800.120.203",
"800.120.300",
"800.120.401",
"800.160.100",
"800.160.110",
"800.160.120",
"800.160.130",
"800.130.100",
"800.140.100",
"800.140.101",
"800.140.110",
"800.140.111",
"800.140.112",
"800.140.113",
"800.150.100",
"100.550.310",
"100.550.311",
"100.550.312",
"800.400.150",
"800.400.151")
# Validation result codes
VALIDATION = ("600.200.100",
"600.200.200",
"600.200.201",
"600.200.202",
"600.200.300",
"600.200.310",
"600.200.400",
"600.200.500",
"600.200.600",
"600.200.700",
"600.200.800",
"600.200.810",
"500.100.201",
"500.100.202",
"500.100.203",
"500.100.301",
"500.100.302",
"500.100.303",
"500.100.304",
"500.100.401",
"500.100.402",
"500.100.403",
"500.200.101",
"800.121.100",
"100.150.100",
"100.150.101",
"100.150.200",
"100.150.201",
"100.150.202",
"100.150.203",
"100.150.204",
"100.150.205",
"100.150.300",
"100.200.200",
"100.350.100",
"100.350.101",
"100.350.200",
"100.350.201",
"100.350.301",
"100.350.302",
"100.350.303",
"100.350.310",
"100.350.311",
"100.350.312",
"100.350.313",
"100.350.314",
"100.350.315",
"100.350.400",
"100.350.500",
"100.350.600",
"100.350.601",
"100.350.610",
"100.250.100",
"100.250.105",
"100.250.106",
"100.250.107",
"100.250.110",
"100.250.111",
"100.250.120",
"100.250.121",
"100.250.122",
"100.250.123",
"100.250.124",
"100.250.125",
"100.250.250",
"100.360.201",
"100.360.300",
"100.360.303",
"100.360.400",
"100.212.103",
"700.100.100",
"700.100.200",
"700.100.300",
"700.100.400",
"700.100.500",
"700.100.600",
"700.100.700",
"700.100.701",
"700.100.710",
"700.400.700",
"700.300.100",
"700.300.200",
"700.300.300",
"700.300.400",
"700.300.500",
"700.300.600",
"700.300.700",
"700.400.000",
"700.400.100",
"700.400.101",
"700.400.200",
"700.400.300",
"700.400.400",
"700.400.402",
"700.400.410",
"700.400.420",
"700.400.510",
"700.400.520",
"700.400.530",
"700.400.540",
"700.400.550",
"700.400.560",
"700.400.561",
"700.400.562",
"700.400.570",
"700.450.001",
"700.450.001",
"200.100.201",
"200.100.300",
"200.100.301",
"200.100.302",
"200.100.401",
"200.100.402",
"200.100.403",
"200.100.501",
"200.100.502",
"200.100.503",
"200.100.504",
"200.100.101",
"200.100.102",
"200.100.103",
"200.200.106",
"100.500.101",
"100.500.201",
"100.500.301",
"100.500.302",
"100.900.500",
"800.900.201",
"800.900.300",
"800.900.301",
"100.300.101",
"100.300.200",
"100.300.300",
"100.300.400",
"100.300.401",
"100.300.402",
"100.300.501",
"100.300.600",
"100.300.601",
"100.300.700",
"100.300.701",
"100.370.101",
"100.370.102",
"100.370.121",
"100.370.122",
"100.370.123",
"100.370.124",
"100.370.125",
"100.370.131",
"100.370.132",
"100.600.500",
"200.100.150",
"200.100.151",
"200.100.199",
"200.300.403",
"200.300.404",
"200.300.405",
"200.300.406",
"200.300.407",
"800.900.302",
"800.900.303",
"100.800.100",
"100.800.101",
"100.800.102",
"100.800.200",
"100.800.201",
"100.800.202",
"100.800.300",
"100.800.301",
"100.800.302",
"100.800.400",
"100.800.401",
"100.800.500",
"100.800.501",
"100.900.100",
"100.900.101",
"100.900.105",
"100.900.200",
"100.900.300",
"100.900.301",
"100.900.400",
"100.900.401",
"100.900.450",
"100.700.100",
"100.700.101",
"100.700.200",
"100.700.201",
"100.700.300",
"100.700.400",
"100.700.500",
"100.700.800",
"100.700.801",
"100.700.802",
"100.700.810",
"100.100.400",
"100.100.401",
"100.100.402",
"100.100.100",
"100.100.101",
"100.100.200",
"100.100.201",
"100.100.300",
"100.100.301",
"100.100.303",
"100.100.304",
"100.100.500",
"100.100.501",
"100.100.600",
"100.100.601",
"100.100.650",
"100.100.651",
"100.100.700",
"100.200.100",
"100.200.103",
"100.200.104",
"100.210.101",
"100.210.102",
"100.211.101",
"100.211.102",
"100.211.103",
"100.211.104",
"100.211.105",
"100.211.106",
"100.212.101",
"100.212.102",
"100.550.300",
"100.550.301",
"100.550.303",
"100.550.400",
"100.550.401",
"100.550.601",
"100.550.603",
"100.550.605",
"100.380.101",
"100.380.201",
"100.380.305",
"100.380.306")
CODE_SUCCESSFUL = 'SUCCESSFUL'
CODE_REJECTED = 'REJECTED'
CODE_EXTERNAL_BANK = 'EXTERNAL_BANK'
CODE_RISK = 'RISK'
CODE_VALIDATION = 'VALIDATION'
RESULT_CODES_GROUPS = {
SUCCESSFUL: CODE_SUCCESSFUL,
REJECTED: CODE_REJECTED,
EXTERNAL_BANK: CODE_EXTERNAL_BANK,
RISK: CODE_RISK,
VALIDATION: CODE_VALIDATION
}
# check to which code group input belongs
def check_result_code(result_code):
for group in RESULT_CODES_GROUPS:
if result_code in group:
return RESULT_CODES_GROUPS[group]
return "UNKNOWN"
| mit | -6,080,162,814,292,131,000 | 26.384045 | 45 | 0.344851 | false |
Wikia/sparrow | tests/mocks/chrome.py | 2 | 1123 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import mock
class ChromeMock(object):
@staticmethod
def create(*args, **kwargs):
chrome_mock = mock.MagicMock()
chrome_mock.execute_script.side_effect = ChromeMock.__navigation_timing_api_side_effect
return chrome_mock
@staticmethod
def __navigation_timing_api_side_effect(*args, **kwargs):
if args[0] == 'return new Date().getTime()':
return 0
if args[0] == 'return document.readyState':
return 'complete'
if args[0] == 'return window.performance.timing.loadEventEnd':
return 7
elif args[0] == 'return window.performance.timing':
return dict(
navigationStart=1,
responseStart=2,
responseEnd=3,
domInteractive=4,
domComplete=5,
domContentLoadedEventEnd=6,
loadEventEnd=7
)
else:
raise Exception('Invalid __navigation_timing_api_side_effect invocation') | mit | -388,273,292,583,746,300 | 32.058824 | 95 | 0.579697 | false |
zdohnal/system-config-printer | statereason.py | 2 | 6994 | #!/usr/bin/python3
## Copyright (C) 2007, 2008, 2009, 2010, 2012, 2013, 2014 Red Hat, Inc.
## Authors:
## Tim Waugh <[email protected]>
## Jiri Popelka <[email protected]>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups
import os
import config
import gettext
gettext.install(domain=config.PACKAGE, localedir=config.localedir)
class StateReason:
REPORT=1
WARNING=2
ERROR=3
LEVEL_ICON={
REPORT: "dialog-information",
WARNING: "dialog-warning",
ERROR: "dialog-error"
}
def __init__(self, printer, reason, ppdcache=None):
self.printer = printer
self.reason = reason
self.level = None
self.canonical_reason = None
self._ppd = None
if ppdcache:
ppdcache.fetch_ppd (printer, self._got_ppd)
def _got_ppd (self, name, result, exc):
self._ppd = result
def get_printer (self):
return self.printer
def get_level (self):
if self.level is not None:
return self.level
if (self.reason.endswith ("-report") or
self.reason in ["connecting-to-device",
"cups-ipp-missing-cancel-job",
"cups-ipp-missing-get-job-attributes",
"cups-ipp-missing-get-printer-attributes",
"cups-ipp-missing-job-history",
"cups-ipp-missing-job-id",
"cups-ipp-missing-job-state",
"cups-ipp-missing-operations-supported",
"cups-ipp-missing-print-job",
"cups-ipp-missing-printer-is-accepting-jobs",
"cups-ipp-missing-printer-state-reasons",
"cups-ipp-missing-send-document",
"cups-ipp-missing-validate-job",
"cups-ipp-wrong-http-version"]):
self.level = self.REPORT
elif self.reason.endswith ("-warning"):
self.level = self.WARNING
else:
self.level = self.ERROR
return self.level
def get_reason (self):
if self.canonical_reason:
return self.canonical_reason
level = self.get_level ()
reason = self.reason
if level == self.WARNING and reason.endswith ("-warning"):
reason = reason[:-8]
elif level == self.ERROR and reason.endswith ("-error"):
reason = reason[:-6]
self.canonical_reason = reason
return self.canonical_reason
def __repr__ (self):
self.get_level()
if self.level == self.REPORT:
level = "REPORT"
elif self.level == self.WARNING:
level = "WARNING"
else:
level = "ERROR"
return "<statereason.StateReason (%s,%s,%s)>" % (level,
self.get_printer (),
self.get_reason ())
def get_description (self):
messages = {
'toner-low': (_("Toner low"),
_("Printer '%s' is low on toner.")),
'toner-empty': (_("Toner empty"),
_("Printer '%s' has no toner left.")),
'cover-open': (_("Cover open"),
_("The cover is open on printer '%s'.")),
'door-open': (_("Door open"),
_("The door is open on printer '%s'.")),
'media-low': (_("Paper low"),
_("Printer '%s' is low on paper.")),
'media-empty': (_("Out of paper"),
_("Printer '%s' is out of paper.")),
'marker-supply-low': (_("Ink low"),
_("Printer '%s' is low on ink.")),
'marker-supply-empty': (_("Ink empty"),
_("Printer '%s' has no ink left.")),
'offline': (_("Printer off-line"),
_("Printer '%s' is currently off-line.")),
'connecting-to-device': (_("Not connected?"),
_("Printer '%s' may not be connected.")),
'other': (_("Printer error"),
_("There is a problem on printer '%s'.")),
'cups-missing-filter': (_("Printer configuration error"),
_("There is a missing print filter for "
"printer '%s'.")),
}
try:
(title, text) = messages[self.get_reason ()]
try:
text = text % self.get_printer ()
except TypeError:
# Probably an incorrect translation, missing a '%s'.
pass
except KeyError:
if self.get_level () == self.REPORT:
title = _("Printer report")
elif self.get_level () == self.WARNING:
title = _("Printer warning")
elif self.get_level () == self.ERROR:
title = _("Printer error")
reason = self.get_reason ()
if self._ppd:
try:
schemes = ["text", "http", "help", "file"]
localized_reason = ""
for scheme in schemes:
lreason = self._ppd.localizeIPPReason(self.reason,
scheme)
if lreason is not None:
localized_reason = localized_reason + lreason + ", "
if localized_reason != "":
reason = localized_reason[:-2]
except RuntimeError:
pass
text = (_("Printer '%s': '%s'.") % (self.get_printer (), reason))
return (title, text)
def get_tuple (self):
return (self.get_level (), self.get_printer (), self.get_reason ())
def __eq__(self, other):
if type (other) != type (self):
return False
return self.get_level () == other.get_level ()
def __lt__(self, other):
if type (other) != type (self):
return False
return self.get_level () < other.get_level ()
| gpl-2.0 | -1,565,716,293,651,410,400 | 37.855556 | 82 | 0.489277 | false |
jlecount-sungevity/holmium.core | tests/iframe_tests.py | 3 | 2664 | import unittest
import mock
from holmium.core import Page, Element, Elements, Section, Locators
from tests.utils import get_driver, make_temp_page
class BasicSectionIframe(Section):
element = Element( Locators.CLASS_NAME, "frame_el")
class BasicPageIframe(Page):
element = Element( Locators.CLASS_NAME, "frame_el" )
elements = Elements( Locators.CLASS_NAME, "frame_el" )
frame_1 = BasicSectionIframe(Locators.CLASS_NAME, "section", "frame_1")
frame_2 = BasicSectionIframe(Locators.CLASS_NAME, "section", "frame_2")
class BasicPage(Page):
element = Element( Locators.ID, "test_id" )
class IFrameTest(unittest.TestCase):
def setUp(self):
self.driver = get_driver()
def test_basic_po_with_frame(self):
frame1 = "<html><body><div class='section'><div class='frame_el'>frame 1 el</div></div></body></html>"
frame2 = "<html><body><div class='section'><div class='frame_el'>frame 2 el</div></div></body></html>"
uri_frame_1 = make_temp_page(frame1)
uri_frame_2 = make_temp_page(frame2)
p1 = '<html><body><iframe id="frame_1" src="%s"/></body></html>' % uri_frame_1
p2 = '<html><body><iframe id="frame_1" src="%s"></iframe><iframe id="frame_2" src="%s"></iframe></body></html>' % ( uri_frame_1, uri_frame_2)
driver = get_driver()
uri = make_temp_page(p1)
with mock.patch("holmium.core.pageobject.log") as log:
p = BasicPageIframe(driver, uri, iframe="frame_1")
self.assertEqual(p.element.text, "frame 1 el")
self.assertEqual(p.frame_1.element.text, "frame 1 el")
self.assertTrue(p.frame_2.element == None)
uri = make_temp_page(p2)
driver.get(uri)
self.assertTrue(p.frame_2.element is not None)
self.assertEqual(p.frame_2.element.text, "frame 2 el")
self.assertEqual(p.elements[0].text, "frame 1 el")
self.assertEqual(log.error.call_count, 1)
def test_mocked_basic_po_with_frame(self):
with mock.patch('selenium.webdriver.Firefox') as driver:
with mock.patch('selenium.webdriver.remote.webelement.WebElement') as element:
element.tag_name = "div"
element.text = "test_text"
driver.find_element.return_value = element
po = BasicPage( driver , iframe='frame')
self.assertEqual( "test_text", po.element.text)
driver.switch_to.frame.assert_called_with("frame")
self.assertEqual(driver.switch_to.frame.call_count, 1)
self.assertEqual(driver.switch_to.default_content.call_count, 1)
| mit | -736,457,207,736,824,200 | 45.736842 | 149 | 0.623498 | false |
colloquium/spacewalk | contrib/swapi.py | 1 | 20969 | #! /usr/bin/python
#
# Like utils/spacewalk-api, call Spacewalk/RHN RPC API from command line.
#
# Copyright (C) 2010 Satoru SATOH <[email protected]>
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
#
# [Features]
#
# * Can call every Spacewalk/RHN RPC APIs with or without arguments from
# command line.
#
# * If API needs arguments, you can pass them in comma separated strings or
# JSON data.
#
# * API call results are output in JSON by default to enable post-processing of
# that data by this script itself or another program.
#
# * Result outputs are easily customizable in python string format expression
# as needed.
#
# * Utilize config file to save authentication parameters to cut out the need
# of typing these parameters every time.
#
# * API call results are cached by default and it will drastically reduce the
# time to get same resutls next time.
#
# * Can call an API with multiple different arguments sets at once.
#
import ConfigParser as configparser
import cPickle as pickle
import commands
import datetime
import getpass
import logging
import optparse
import os
import random
import re
import simplejson
import sys
import time
import unittest
import xmlrpclib
try:
import hashlib # python 2.5+
def hexdigest(s):
return hashlib.md5(s).hexdigest()
except ImportError:
import md5
def hexdigest(s):
return md5.md5(s).hexdigest()
"""
Examples:
$ ./swapi.py --args=10821 packages.listDependencies
[
{
"dependency": "/usr/bin/perl",
"dependency_modifier": " ",
"dependency_type": "requires"
},
... (snip) ...
{
"dependency": "cvsmapfs",
"dependency_modifier": "= 1.3-7",
"dependency_type": "provides"
}
]
$ ./swapi.py --list-args="10821,10822,10823" packages.getDetails
[
{
"package_size": "15653",
"package_arch_label": "noarch",
"package_cookie": "porkchop.redhat.com 964488467",
"package_md5sum": "44971f49f5a521464c70038bd9641a8b",
"package_summary": "Extension for CVS to handle links\n",
"package_name": "cvsmapfs",
"package_epoch": "",
"package_checksums": {
"md5": "44971f49f5a521464c70038bd9641a8b"
},
... (snip) ...
{
"package_size": "3110234",
"package_arch_label": "i386",
"package_cookie": "porkchop.redhat.com 964465421",
"package_md5sum": "1919a8e06ee5c0916685cd04dff20776",
"package_summary": "SNNS documents\n",
"package_name": "SNNS-doc",
"package_epoch": "",
"package_checksums": {
"md5": "1919a8e06ee5c0916685cd04dff20776"
},
"package_payload_size": "5475688",
"package_version": "4.2",
"package_license": "Free Software",
"package_vendor": "Red Hat, Inc.",
"package_release": "7",
"package_last_modified_date": "2006-08-22 21:56:01.0",
"package_description": "This package includes the documents in html and postscript for SNNS.\n",
"package_id": 10823,
"providing_channels": [
"redhat-powertools-i386-7.0",
"redhat-powertools-i386-7.1"
],
"package_build_host": "porky.devel.redhat.com",
"package_build_date": "2000-07-24 19:07:23.0",
"package_file": "SNNS-doc-4.2-7.i386.rpm"
}
]
$ ./swapi.py -vv --args=10821 \
> -F "%(dependency)s:%(dependency_type)s" packages.listDependencies
DEBUG:root: config_file = /home/ssato/.swapi/config
DEBUG:root: profile = 'None'
DEBUG:root: Call: api=packages.listDependencies, args=(10821,)
DEBUG:root: Loading cache: method=packages.listDependencies, args=(10821,)
DEBUG:root: Found query result cache
/usr/bin/perl:requires
cvs:requires
perl:requires
rpmlib(CompressedFileNames):requires
rpmlib(PayloadFilesHavePrefix):requires
cvsmapfs:provides
$
$ ./swapi.py -A '["rhel-i386-server-5","2010-04-01 08:00:00"]' \
> --format "%(package_name)s" channel.software.listAllPackages
kdebase
kdebase-devel
kexec-tools
krb5-devel
krb5-libs
krb5-server
krb5-workstation
lvm2
nss_db
sudo
wireshark
wireshark-gnome
$
$ ./swapi.py -A 10170***** -I 0 system.getDetails
[{"building": "", "city": "", "location_aware_download": "true", "base_entitlement": "enterprise_entitled", "description": "Initial Registration Parameters:\nOS: redhat-release\nRelease: 5Server\nCPU Arch: i686-redhat-linux", "address1": "", "address2": "", "auto_errata_update": "false", "state": "", "profile_name": "rhel-5-3-guest-1.net-1.local", "country": "", "rack": "", "room": ""}]
$ ./swapi.py -A '[10170*****,{"city": "tokyo", "rack": "ep7"}]' system.setDetails
[
1
]
$ ./swapi.py -A 10170***** -I 0 system.getDetails
[{"building": "", "city": "", "location_aware_download": "true", "base_entitlement": "enterprise_entitled", "description": "Initial Registration Parameters:\nOS: redhat-release\nRelease: 5Server\nCPU Arch: i686-redhat-linux", "address1": "", "address2": "", "auto_errata_update": "false", "state": "", "profile_name": "rhel-5-3-guest-1.net-1.local", "country": "", "rack": "", "room": ""}]
$ ./swapi.py -A 10170***** -I 0 --no-cache system.getDetails
[{"building": "", "city": "tokyo", "location_aware_download": "true", "base_entitlement": "enterprise_entitled", "description": "Initial Registration Parameters:\nOS: redhat-release\nRelease: 5Server\nCPU Arch: i686-redhat-linux", "address1": "", "address2": "", "auto_errata_update": "false", "state": "", "profile_name": "rhel-5-3-guest-1.net-1.local", "country": "", "rack": "ep7", "room": ""}]
$
"""
PROTO = 'https'
TIMEOUT = 900
CONFIG_DIR = os.path.join(os.environ.get('HOME', '.'), '.swapi')
CONFIG = os.path.join(CONFIG_DIR, 'config')
CACHE_DIR = os.path.join(CONFIG_DIR, 'cache')
CACHE_EXPIRING_DATES = 1 # [days]
def str_to_id(s):
return hexdigest(s)
def object_to_id(obj):
"""Object -> id.
NOTE: Object must be able to convert to str (i.e. implements __str__).
>>> object_to_id("test")
'098f6bcd4621d373cade4e832627b4f6'
>>> object_to_id({'a':"test"})
'c5b846ec3b2f1a5b7c44c91678a61f47'
>>> object_to_id(['a','b','c'])
'eea457285a61f212e4bbaaf890263ab4'
"""
return str_to_id(str(obj))
def run(cmd_str):
return commands.getstatusoutput(cmd_str)
class Cache(object):
"""Pickle module based data caching backend.
"""
def __init__(self, domain, expire=CACHE_EXPIRING_DATES, cache_topdir=CACHE_DIR):
"""Initialize domain-local caching parameters.
@domain a str represents target domain
@expire time period to expire cache in date (>= 0).
0 indicates disabling cache.
@cache_topdir topdir to save cache files
"""
self.domain = domain
self.domain_id = str_to_id(domain)
self.cache_dir = os.path.join(cache_topdir, self.domain_id)
self.expire_dates = self.set_expire(expire)
def set_expire(self, dates):
return (dates > 0 and dates or 0)
def dir(self, obj):
"""Resolve the dir in which cache file of the object is saved.
"""
return os.path.join(self.cache_dir, object_to_id(obj))
def path(self, obj):
"""Resolve path to cache file of the object.
"""
return os.path.join(self.dir(obj), 'cache.pkl')
def load(self, obj):
try:
return pickle.load(open(self.path(obj), 'rb'))
except:
return None
def save(self, obj, data, protocol=pickle.HIGHEST_PROTOCOL):
"""
@obj object of which obj_id will be used as key of the cached data
@data data to saved in cache
"""
cache_dir = self.dir(obj)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir, mode=0700)
cache_path = self.path(obj)
try:
# TODO: How to detect errors during/after pickle.dump.
pickle.dump(data, open(cache_path, 'wb'), protocol)
return True
except:
return False
def needs_update(self, obj):
if self.expire_dates == 0:
return True
try:
mtime = os.stat(self.path(obj)).st_mtime
except OSError: # It indicates that the cache file cannot be updated.
return True # FIXME: How to handle the above case?
cur_time = datetime.datetime.now()
cache_mtime = datetime.datetime.fromtimestamp(mtime)
delta = cur_time - cache_mtime # TODO: How to do if it's negative value?
return (delta >= datetime.timedelta(self.expire_dates))
class RpcApi(object):
"""Spacewalk / RHN XML-RPC API server object.
"""
def __init__(self, conn_params, enable_cache=True, expire=1):
"""
@conn_params Connection parameters: server, userid, password, timeout, protocol.
@enable_cache Whether to enable query result cache or not.
@expire Cache expiration date
"""
self.url = "%(protocol)s://%(server)s/rpc/api" % conn_params
self.userid = conn_params.get('userid')
self.passwd = conn_params.get('password')
self.timeout = conn_params.get('timeout')
self.sid = False
self.cache = (enable_cache and Cache("%s:%s" % (self.url, self.userid), expire) or False)
def __del__(self):
self.logout()
def login(self):
self.server = xmlrpclib.ServerProxy(self.url)
self.sid = self.server.auth.login(self.userid, self.passwd, self.timeout)
def logout(self):
if self.sid:
self.server.auth.logout(self.sid)
def call(self, method_name, *args):
logging.debug(" Call: api=%s, args=%s" % (method_name, str(args)))
try:
if self.cache:
key = (method_name, args)
if not self.cache.needs_update(key):
ret = self.cache.load(key)
logging.debug(" Loading cache: method=%s, args=%s" % (method_name, str(args)))
if ret is not None:
logging.debug(" Found query result cache")
return ret
logging.debug(" No query result cache found.")
if not self.sid:
self.login()
method = getattr(self.server, method_name)
# wait a little to avoid DoS attack to the server if called
# multiple times.
time.sleep(random.random())
# Special cases which do not need session_id parameter:
# api.{getVersion, systemVersion} and auth.login.
if re.match(r'^(api.|proxy.|auth.login)', method_name):
ret = method(*args)
else:
ret = method(self.sid, *args)
if self.cache:
self.cache.save(key, ret)
return ret
except xmlrpclib.Fault, m:
raise RuntimeError("rpc: method '%s', args '%s'\nError message: %s" % (method_name, str(args), m))
def multicall(self, method_name, argsets):
"""Quick hack to implement XML-RPC's multicall like function.
@see xmlrpclib.MultiCall
"""
return [self.call(method_name, arg) for arg in argsets]
def __parse(arg):
"""
>>> __parse('1234567')
1234567
>>> __parse('abcXYZ012')
'abcXYZ012'
>>> __parse('{"channelLabel": "foo-i386-5"}')
{'channelLabel': 'foo-i386-5'}
"""
try:
if re.match(r'[1-9]\d*', arg):
return int(arg)
elif re.match(r'{.*}', arg):
return simplejson.loads(arg) # retry with simplejson
else:
return str(arg)
except ValueError:
return str(arg)
def parse_api_args(args, arg_sep=','):
"""
Simple JSON-like expression parser.
@args options.args :: string
@return rpc arg objects, [arg] :: [string]
>>> parse_api_args('')
[]
>>> parse_api_args('1234567')
[1234567]
>>> parse_api_args('abcXYZ012')
['abcXYZ012']
>>> parse_api_args('{"channelLabel": "foo-i386-5"}')
[{'channelLabel': 'foo-i386-5'}]
>>> parse_api_args('1234567,abcXYZ012,{"channelLabel": "foo-i386-5"}')
[1234567, 'abcXYZ012', {'channelLabel': 'foo-i386-5'}]
>>> parse_api_args('[1234567,"abcXYZ012",{"channelLabel": "foo-i386-5"}]')
[1234567, 'abcXYZ012', {'channelLabel': 'foo-i386-5'}]
"""
if not args:
return []
try:
x = simplejson.loads(args)
if isinstance(x, list):
ret = x
else:
ret = [x]
except ValueError:
ret = [__parse(a) for a in args.split(arg_sep)]
return ret
def results_to_json_str(results, indent=2):
"""
>>> results_to_json_str([123, 'abc', {'x':'yz'}], 0)
'[123, "abc", {"x": "yz"}]'
>>> results_to_json_str([123, 'abc', {'x':'yz'}])
'[\\n 123, \\n "abc", \\n {\\n "x": "yz"\\n }\\n]'
"""
return simplejson.dumps(results, ensure_ascii=False, indent=indent)
def configure_with_configfile(config_file, profile=""):
"""
@config_file Configuration file path, ex. '~/.swapi/config'.
"""
(server,userid,password,timeout,protocol) = ('', '', '', TIMEOUT, PROTO)
# expand '~/'
if '~' in config_file:
config_file = os.path.expanduser(config_file)
logging.debug(" config_file = %s" % config_file)
cp = configparser.SafeConfigParser()
try:
cp.read(config_file)
if profile and cp.has_section(profile):
sect = profile
else:
sect = 'DEFAULT'
logging.debug(" profile = '%s'" % profile)
server = cp.get(sect, 'server')
userid = cp.get(sect, 'userid')
password = cp.get(sect, 'password')
timeout = int(cp.get(sect, 'timeout'))
protocol = cp.get(sect, 'protocol')
except configparser.NoOptionError:
pass
return {
'server': server,
'userid': userid,
'password': password,
'timeout': timeout,
'protocol': protocol
}
def configure_with_options(config, options):
"""
@config config parameters dict: {'server':, 'userid':, ...}
@options optparse.Options
"""
server = config.get('server') or (options.server or raw_input('Enter server name > '))
userid = config.get('userid') or (options.userid or raw_input('Enter user ID > '))
password = config.get('password') or (options.password or getpass.getpass('Enter your password > '))
timeout = config.get('timeout') or ((options.timeout and options.timeout != TIMEOUT) and options.timeout or TIMEOUT)
protocol = config.get('protocol') or ((options.protocol and options.protocol != PROTO) and options.protocol or PROTO)
return {'server':server, 'userid':userid, 'password':password, 'timeout':timeout, 'protocol':protocol}
def configure(options):
conf = configure_with_configfile(options.config, options.profile)
conf = configure_with_options(conf, options)
return conf
def option_parser(cmd=sys.argv[0]):
p = optparse.OptionParser("""%(cmd)s [OPTION ...] RPC_API_STRING
Examples:
%(cmd)s --args=10821 packages.listDependencies
%(cmd)s --list-args="10821,10822,10823" packages.getDetails
%(cmd)s -vv --args=10821 packages.listDependencies
%(cmd)s -P MySpacewalkProfile --args=rhel-x86_64-server-vt-5 channel.software.getDetails
%(cmd)s -C /tmp/s.cfg -A rhel-x86_64-server-vt-5,guest channel.software.isUserSubscribable
%(cmd)s -A "rhel-i386-server-5","2010-04-01 08:00:00" channel.software.listAllPackages
%(cmd)s -A '["rhel-i386-server-5","2010-04-01 08:00:00"]' channel.software.listAllPackages
%(cmd)s --format "%%(label)s" channel.listSoftwareChannels
%(cmd)s -A 100010021 --no-cache -F "%%(hostname)s %%(description)s" system.getDetails
%(cmd)s -A '[1017068053,{"city": "tokyo", "rack": "rack-A-1"}]' system.setDetails
Config file example (%(config)s):
--------------------------------------------------------------
[DEFAULT]
server = rhn.redhat.com
userid = xyz********
password = # it will ask you if password is not set.
timeout = 900
protocol = https
[MySpacewalkProfile]
server = my-spacewalk.example.com
userid = rpcusr
password = secretpasswd
--------------------------------------------------------------
""" % {'cmd': cmd, 'config': CONFIG}
)
p.add_option('-C', '--config', help='Config file path [%default]', default=CONFIG)
p.add_option('-P', '--profile', help='Select profile (section) in config file')
p.add_option('-v', '--verbose', help='verbose mode', default=0, action="count")
p.add_option('-T', '--test', help='Test mode', default=False, action="store_true")
cog = optparse.OptionGroup(p, "Connect options")
cog.add_option('-s', '--server', help='Spacewalk/RHN server hostname.')
cog.add_option('-u', '--userid', help='Spacewalk/RHN login user id')
cog.add_option('-p', '--password', help='Spacewalk/RHN Login password')
cog.add_option('-t', '--timeout', help='Session timeout in sec [%default]', default=TIMEOUT)
cog.add_option('', '--protocol', help='Spacewalk/RHN server protocol.', default=PROTO)
p.add_option_group(cog)
caog = optparse.OptionGroup(p, "Cache options")
caog.add_option('', '--no-cache', help='Do not use query result cache', action="store_true", default=False)
caog.add_option('', '--expire', help='Expiration dates. 0 means refresh cache [%default]', default=1, type="int")
p.add_option_group(caog)
oog = optparse.OptionGroup(p, "Output options")
oog.add_option('-o', '--output', help="Output file [default: stdout]")
oog.add_option('-F', '--format', help="Output format (non-json)", default=False)
oog.add_option('-I', '--indent', help="Indent for JSON output. 0 means no indent. [%default]", type="int", default=2)
p.add_option_group(oog)
aog = optparse.OptionGroup(p, "API argument options")
aog.add_option('-A', '--args', default='',
help='Api args other than session id in comma separated strings or JSON expression [empty]')
aog.add_option('', '--list-args', help='Specify List of API args')
p.add_option_group(aog)
return p
def main(argv):
loglevel = logging.WARN
out = sys.stdout
enable_cache = True
parser = option_parser()
(options, args) = parser.parse_args(argv[1:])
if options.verbose > 0:
loglevel = logging.INFO
if options.verbose > 1:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel)
if options.test:
test()
if len(args) == 0:
parser.print_usage()
return 0
if options.no_cache:
enable_cache = False
if options.output:
out = open(options.output, 'w')
api = args[0]
conn_params = configure(options)
rapi = RpcApi(conn_params, enable_cache, options.expire)
rapi.login()
if options.list_args:
list_args = parse_api_args(options.list_args)
res = rapi.multicall(api, list_args)
else:
args = (options.args and parse_api_args(options.args) or [])
res = rapi.call(api, *args)
if not isinstance(res, list):
res = [res]
if options.format:
print >> out, '\n'.join((options.format % r for r in res))
else:
print >> out, results_to_json_str(res, options.indent)
return 0
class TestScript(unittest.TestCase):
"""TODO: More test cases.
"""
def setUp(self):
self.cmd = sys.argv[0]
def __helper(self, cfmt):
cs = cfmt % self.cmd
(status, _output) = run(cs)
assert status == 0, "cmdline=%s" % cs
def test_api_wo_arg_and_sid(self):
self.__helper("%s api.getVersion")
def test_api_wo_arg(self):
self.__helper("%s channel.listSoftwareChannels")
def test_api_w_arg(self):
self.__helper("%s --args=rhel-i386-server-5 channel.software.getDetails")
def test_api_w_arg_and_format_option(self):
self.__helper("%s -A rhel-i386-server-5 --format '%%(channel_description)s' channel.software.getDetails")
def test_api_w_arg_multicall(self):
self.__helper("%s --list-args='rhel-i386-server-5,rhel-x86_64-server-5' channel.software.getDetails")
def test_api_w_args(self):
self.__helper("%s -A 'rhel-i386-server-5,2010-04-01 08:00:00' channel.software.listAllPackages")
def test_api_w_args_as_list(self):
self.__helper("%s -A '[\"rhel-i386-server-5\",\"2010-04-01 08:00:00\"]' channel.software.listAllPackages")
def unittests():
suite = unittest.TestLoader().loadTestsFromTestCase(TestScript)
unittest.TextTestRunner(verbosity=2).run(suite)
def test():
import doctest
doctest.testmod(verbose=True)
unittests()
sys.exit(0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
# vim: set sw=4 ts=4 expandtab:
| gpl-2.0 | -5,014,865,186,729,920,000 | 30.437781 | 397 | 0.616958 | false |
reidlindsay/wins | sandbox/experiments/dsr/traffic/parse-delay.py | 1 | 7772 | #! /usr/bin/env python
"""
Parse latency (vs. traffic rate) for DSR simulation.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-12-09 12:55:44 -0600 (Fri, 09 Dec 2011) $
* $LastChangedRevision: 5364 $
:author: Ketan Mandke <[email protected]>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins import *
from wins.ieee80211 import *
from optparse import OptionParser
import sys
import numpy as np
from config_parse import FILTER_DISCONNECTED
def read_trace(options, tracefile):
# load trace from file
tr = Trace()
tr.read(tracefile)
# return trace
return tr
def hasfield(event, *args):
"""Check if event has all the specified arguments."""
ok = all([(a in event) for a in args])
return ok
def parse_info(options, trace):
# (data, param) variables
data = []
param = {'plen': None}
rate = None
envspeed = None
# event parameters
macargs = ('mac-txts', )
agtargs = ('net-root', )
# paramaters for logging data
sendbuffer = {} # log sent packets using AGT name for index
sendnodes = {} # nodes that sent data : number delivered
# default arguments for buffer entry
defaultpkt = {'txts':None, 'rxts':None, 'hopcount':None, 'sender':None}
for e in trace.events:
# check timestamp - [tmin, tmax]
ts = float(e['ts'])
if (ts<options.tmin): continue
elif (ts>options.tmax): break
# get event/object/packet parameters
obj, evt = e['obj'], e['event']
pname, pid = e['packet'], e['pid']
rname, rid = e['root'], e['rid']
nname, nid = e['node'], e['nid']
pkt = "%s(%s)"%(pname, pid)
root = "%s(%s)"%(rname, rid)
node = "%s(%s)"%(nname, nid)
# check for AGT SND
if (obj=="AGT") and (evt=="SND"):
assert (pkt not in sendbuffer)
sendbuffer[pkt] = defaultpkt.copy()
sendbuffer[pkt]['txts'] = float(ts)
sendbuffer[pkt]['sender'] = node
if node not in sendnodes: sendnodes[node] = 0
# check for AGT RCV
if (obj=="AGT") and (evt=="RCV"):
assert (pkt in sendbuffer)
sendbuffer[pkt]['rxts'] = float(ts)
sender = sendbuffer[pkt]['sender']
assert (sender in sendnodes)
sendnodes[sender] += 1
# check for DSR SND
hasagt = hasfield(e, 'agt-root')
if (obj=="DSR") and (evt=="SND") and hasagt:
agt = e['agt-root']
assert (agt in sendbuffer)
nhops = None
if (rname=="DSR|SRCROUTE"):
assert hasfield(e, 'srcroute.addresses')
srcroute = eval(e['srcroute.addresses'])
nhops = len(srcroute) + 1
elif (rname=="DSR"):
nhops = 1
assert (nhops is not None)
sendbuffer[agt]['hopcount'] = nhops
# check for other parameters
if (obj=="AGT") and (evt=="SND"):
assert hasfield(e, 'plen')
plen = int(e['plen'])
if param['plen'] is None: param['plen'] = plen
else: assert (plen==param['plen'])
if (obj=="MON") and (evt=="RATE"):
assert hasfield(e, 'rate')
rate = float(e['rate'])
if (obj=="MON") and (evt=="MODEL"):
assert hasfield(e, 'environmentspeed')
envspeed = float(e['environmentspeed'])
# skip data from disconnected topologies
if FILTER_DISCONNECTED:
isdisconnected = False
for s, v in sendnodes.items():
if (v==0): isdisconnected = True
if isdisconnected:
sys.stderr.write("WARNING: Skipping data for disconnected topology!\n")
return data, param
# calculate delay from sendbuffer
assert (rate is not None)
for p,d in sendbuffer.items():
plen = param['plen']
assert (plen is not None)
if (d['rxts'] is None):
# packet was not delivered -> ignore
x, y = rate, 0
else:
# packet was delivered
x, latency = rate, d['rxts']-d['txts']
L = 8*plen # length in bits
tput = (1e-6*L/latency) # throughput in Mbps
nhops = 1
if options.perhop:
assert (d['hopcount'] is not None)
nhops = d['hopcount']
y = 1.0*latency/nhops
dp = {'x': x, 'y': y, 'ndata': 1}
data.append(dp)
# set remaining parameters
param['use-yerror'] = True
return data, param
def parse_delay():
usage = "%prog [OPTIONS] TRACEFILE1 [TRACEFILE2 ...]\n" + \
" Writes parsed data to standard output."
parser = OptionParser(usage=usage)
parser.add_option("", "--tmin", dest="tmin", \
type="float", default=-np.inf, \
help="Specify simulation time to start from [default=%default]")
parser.add_option("", "--tmax", dest="tmax", \
type="float", default=np.inf, \
help="Specify simulation time to stop at [default=%default]")
parser.add_option("-f", "--fmt", dest="fmt", \
default="bo", help="Format of data parsed from tracefiles [default=%default]")
parser.add_option("", "--label", dest="label", \
default="Hydra PHY", help="Specify label for newly parsed data [default=%default]")
parser.add_option("", "--per-hop", dest="perhop", action="store_true", \
default=False, help="Normalize delay by the number of hops in the route.")
(options, args) = parser.parse_args()
if len(args)<1:
print "Insufficient number of arguments."
parser.print_help()
raise SystemExit
# get trace files
tracefile = args[0:]
numtraces = len(tracefile)
# set default parameters
defaultparam = {'xlabel': "Traffic Rate (packets/sec)", \
'ylabel': "Delay (sec)", \
'title': "End-to-End Delay vs. Traffic Rate", \
'label': None, \
'source': None, \
'format': None}
# define formats
formats = [(options.fmt, options.fmt[0]+"x:")] # use same format for all
#formats = [('ro','rx:'), ('bo', 'bx:'), ('go', 'gx:')]
# parse data from each data set
for k in range(numtraces):
# get tracefile
tfile = tracefile[k]
trace = read_trace(options, tfile) # treat as normal wins trace file
if not trace: continue
# get other parameters
fmt = formats[k%len(formats)]
# parse trace to get (data, param)
sys.stderr.write("Parsing trace from %s ... \n"%(tfile))
data, dparam = parse_info(options, trace)
if data:
param = defaultparam.copy()
param.update(dparam)
param['format'] = fmt[0]
param['source'] = tfile
param['label'] = options.label
pdata = {'data': data, 'parameters':param}
sys.stdout.write("%s\n"%(pdata))
if __name__ == '__main__':
parse_delay()
| apache-2.0 | -1,905,062,652,462,929,400 | 35.660377 | 95 | 0.558415 | false |
rtrwalker/geotecha | examples/speccon/example_1d_vr_001_schiffmanandstein1970.py | 1 | 2686 | # Example input file for speccon1d_vr. Use speccon1d_vr.exe to run.
# Vertical consolidation of four soil layers
# Figure 2 from:
# Schiffman, R. L, and J. R Stein. (1970) 'One-Dimensional Consolidation of
# Layered Systems'. Journal of the Soil Mechanics and Foundations
# Division 96, no. 4 (1970): 1499-1504.
# Parameters from Schiffman and Stein 1970
h = np.array([10, 20, 30, 20]) # feet
cv = np.array([0.0411, 0.1918, 0.0548, 0.0686]) # square feet per day
mv = np.array([3.07e-3, 1.95e-3, 9.74e-4, 1.95e-3]) # square feet per kip
#kv = np.array([7.89e-6, 2.34e-5, 3.33e-6, 8.35e-6]) # feet per day
kv = cv*mv # assume kv values are actually kv/gamw
# speccon1d_vr parameters
drn = 0
neig = 60
H = np.sum(h)
z2 = np.cumsum(h) / H # Normalized Z at bottom of each layer
z1 = (np.cumsum(h) - h) / H # Normalized Z at top of each layer
mvref = mv[0] # Choosing 1st layer as reference value
kvref = kv[0] # Choosing 1st layer as reference value
dTv = 1 / H**2 * kvref / mvref
mv = PolyLine(z1, z2, mv/mvref, mv/mvref)
kv = PolyLine(z1, z2, kv/kvref, kv/kvref)
surcharge_vs_time = PolyLine([0,0,30000], [0,100,100])
surcharge_vs_depth = PolyLine([0,1], [1,1]) # Load is uniform with depth
ppress_z = np.array(
[ 0. , 1. , 2. , 3. ,
4. , 5. , 6. , 7. ,
8. , 9. , 10. , 12. ,
14. , 16. , 18. , 20. ,
22. , 24. , 26. , 28. ,
30. , 33. , 36. , 39. ,
42. , 45. , 48. , 51. ,
54. , 57. , 60. , 62.22222222,
64.44444444, 66.66666667, 68.88888889, 71.11111111,
73.33333333, 75.55555556, 77.77777778, 80. ])/H
tvals=np.array(
[1.21957046e+02, 1.61026203e+02, 2.12611233e+02,
2.80721620e+02, 3.70651291e+02, 4.89390092e+02,
740.0, 8.53167852e+02, 1.12648169e+03,
1.48735211e+03, 1.96382800e+03, 2930.0,
3.42359796e+03, 4.52035366e+03, 5.96845700e+03,
7195.0, 1.04049831e+04, 1.37382380e+04,
1.81393069e+04, 2.39502662e+04, 3.16227766e+04])
ppress_z_tval_indexes=[6, 11, 15]
avg_ppress_z_pairs = [[0,1]]
settlement_z_pairs = [[0,1]]
implementation='vectorized' #['scalar', 'vectorized','fortran']
# Output options
save_data_to_file = True
save_figures_to_file = True
#show_figures = True
#directory
overwrite = True
prefix = "example_1d_vr_001_schiffmanandstein1970_"
create_directory=True
#data_ext
#input_ext
figure_ext = '.png'
title = "Speccon1d_vr example Schiffman and Stein 1970"
author = "Dr. Rohan Walker" | gpl-3.0 | -6,498,293,306,053,173,000 | 33.448718 | 75 | 0.575949 | false |
upsuper/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/tasks/generate.py | 15 | 5863 | import os
from pathlib import Path
from subprocess import check_output, check_call
import invoke
@invoke.task(help={
'version': 'version being released',
})
def announce(ctx, version):
"""Generates a new release announcement entry in the docs."""
# Get our list of authors
stdout = check_output(["git", "describe", "--abbrev=0", '--tags'])
stdout = stdout.decode('utf-8')
last_version = stdout.strip()
stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"])
stdout = stdout.decode('utf-8')
contributors = set(stdout.splitlines())
template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst'
template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8')
contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n'
text = template_text.format(version=version, contributors=contributors_text)
target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version))
target.write_text(text, encoding='UTF-8')
print("[generate.announce] Generated {}".format(target.name))
# Update index with the new release entry
index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst')
lines = index_path.read_text(encoding='UTF-8').splitlines()
indent = ' '
for index, line in enumerate(lines):
if line.startswith('{}release-'.format(indent)):
new_line = indent + target.stem
if line != new_line:
lines.insert(index, new_line)
index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8')
print("[generate.announce] Updated {}".format(index_path.name))
else:
print("[generate.announce] Skip {} (already contains release)".format(index_path.name))
break
check_call(['git', 'add', str(target)])
@invoke.task()
def regen(ctx):
"""Call regendoc tool to update examples and pytest output in the docs."""
print("[generate.regen] Updating docs")
check_call(['tox', '-e', 'regen'])
@invoke.task()
def make_tag(ctx, version):
"""Create a new (local) tag for the release, only if the repository is clean."""
from git import Repo
repo = Repo('.')
if repo.is_dirty():
print('Current repository is dirty. Please commit any changes and try again.')
raise invoke.Exit(code=2)
tag_names = [x.name for x in repo.tags]
if version in tag_names:
print("[generate.make_tag] Delete existing tag {}".format(version))
repo.delete_tag(version)
print("[generate.make_tag] Create tag {}".format(version))
repo.create_tag(version)
@invoke.task()
def devpi_upload(ctx, version, user, password=None):
"""Creates and uploads a package to devpi for testing."""
if password:
print("[generate.devpi_upload] devpi login {}".format(user))
check_call(['devpi', 'login', user, '--password', password])
check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)])
env = os.environ.copy()
env['SETUPTOOLS_SCM_PRETEND_VERSION'] = version
check_call(['devpi', 'upload', '--formats', 'sdist,bdist_wheel'], env=env)
print("[generate.devpi_upload] package uploaded")
@invoke.task(help={
'version': 'version being released',
'user': 'name of the user on devpi to stage the generated package',
'password': 'user password on devpi to stage the generated package '
'(if not given assumed logged in)',
})
def pre_release(ctx, version, user, password=None):
"""Generates new docs, release announcements and uploads a new release to devpi for testing."""
announce(ctx, version)
regen(ctx)
changelog(ctx, version, write_out=True)
msg = 'Preparing release version {}'.format(version)
check_call(['git', 'commit', '-a', '-m', msg])
make_tag(ctx, version)
devpi_upload(ctx, version=version, user=user, password=password)
print()
print('[generate.pre_release] Please push your branch and open a PR.')
@invoke.task(help={
'version': 'version being released',
'user': 'name of the user on devpi to stage the generated package',
'pypi_name': 'name of the pypi configuration section in your ~/.pypirc',
})
def publish_release(ctx, version, user, pypi_name):
"""Publishes a package previously created by the 'pre_release' command."""
from git import Repo
repo = Repo('.')
tag_names = [x.name for x in repo.tags]
if version not in tag_names:
print('Could not find tag for version {}, exiting...'.format(version))
raise invoke.Exit(code=2)
check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)])
check_call(['devpi', 'push', 'pytest=={}'.format(version), 'pypi:{}'.format(pypi_name)])
check_call(['git', 'push', '[email protected]:pytest-dev/pytest.git', version])
emails = [
'[email protected]',
'[email protected]'
]
if version.endswith('.0'):
emails.append('[email protected]')
print('Version {} has been published to PyPI!'.format(version))
print()
print('Please send an email announcement with the contents from:')
print()
print(' doc/en/announce/release-{}.rst'.format(version))
print()
print('To the following mail lists:')
print()
print(' ', ','.join(emails))
print()
print('And announce it on twitter adding the #pytest hash tag.')
@invoke.task(help={
'version': 'version being released',
'write_out': 'write changes to the actial changelog'
})
def changelog(ctx, version, write_out=False):
if write_out:
addopts = []
else:
addopts = ['--draft']
check_call(['towncrier', '--version', version] + addopts)
| mpl-2.0 | 5,769,617,999,951,429,000 | 35.191358 | 103 | 0.636023 | false |
oceanobservatories/mi-dataset | mi/dataset/parser/dosta_abcdjm_ctdbp.py | 3 | 6675 | #!/usr/bin/env python
"""
@package mi.dataset.parser.dosta_abcdjm_ctdbp
@file mi-dataset/mi/dataset/parser/dosta_abcdjm_ctdbp.py
@author Jeff Roy
@brief Parser for the dosta_abcdjm_ctdbp dataset driver
This file contains code for the dosta_abcdjm_ctdbp parser and code to produce data
particles. This parser is for recovered data only - it produces a single
particle for the data recovered from the instrument.
The input file is ASCII. There are two sections of data contained in the
input file. The first is a set of header information, and the second is a set
of hex ascii data with one data sample per line in the file. Each line in the
header section starts with a '*'. The header lines are simply ignored.
Each line of sample data produces a single data particle.
Malformed sensor data records and all header records produce no particles.
Release notes:
This parser was broken out of the previous dosta_abcdjm_ctdbp_ce
Initial Release
"""
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
import calendar
import re
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import UnexpectedDataException
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
DataParticleValue
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser.common_regexes import \
END_OF_LINE_REGEX, ANY_CHARS_REGEX, ASCII_HEX_CHAR_REGEX
# Basic patterns
# Time tuple corresponding to January 1st, 2000
JAN_1_2000 = (2000, 1, 1, 0, 0, 0, 0, 0, 0)
# regex for identifying start of a header line
START_HEADER = r'\*'
# Header data:
HEADER_PATTERN = START_HEADER # Header data starts with '*'
HEADER_PATTERN += ANY_CHARS_REGEX # followed by text
HEADER_PATTERN += END_OF_LINE_REGEX # followed by newline
HEADER_MATCHER = re.compile(HEADER_PATTERN)
# this list of strings corresponds to the particle parameter names
# it is used by build_parsed_values. Named groups in regex must match.
DATA_PARTICLE_MAP = [
'oxygen',
'ctd_time'
]
# Regex for data from the Endurance array
# Each data record is in the following format:
# ttttttccccccppppppvvvvoooooossssssss
# where each character indicates one hex ascii character.
# First 6 chars: tttttt = Temperature A/D counts (CTDBP data omitted from output)
# Next 6 chars: cccccc = Conductivity A/D counts (CTDBP data omitted from output)
# Next 6 chars: pppppp = pressure A/D counts (CTDBP data omitted from output)
# Next 4 chars: vvvv = temperature compensation A/D counts (CTDBP data omitted from output)
# Next 6 chars: oooooo = Dissolved Oxygen in counts
# Last 8 chars: ssssssss = seconds since January 1, 2000
# Total of 36 hex characters and line terminator
ENDURANCE_DATA_REGEX = r'(?:' + ASCII_HEX_CHAR_REGEX + '{6})'
ENDURANCE_DATA_REGEX += r'(?:' + ASCII_HEX_CHAR_REGEX + '{6})'
ENDURANCE_DATA_REGEX += r'(?:' + ASCII_HEX_CHAR_REGEX + '{4})'
ENDURANCE_DATA_REGEX += r'(?:' + ASCII_HEX_CHAR_REGEX + '{6})'
ENDURANCE_DATA_REGEX += r'(?P<oxygen>' + ASCII_HEX_CHAR_REGEX + '{6})'
ENDURANCE_DATA_REGEX += r'(?P<ctd_time>' + ASCII_HEX_CHAR_REGEX + '{8})' + END_OF_LINE_REGEX
ENDURANCE_DATA_MATCHER = re.compile(ENDURANCE_DATA_REGEX, re.VERBOSE)
class DataParticleType(BaseEnum):
"""
Class that defines the data particle generated from the dosta_abcdjm_ctdbp recovered data
"""
SAMPLE = 'dosta_abcdjm_ctdbp_instrument_recovered'
class DostaAbcdjmCtdbpInstrumentDataParticle(DataParticle):
"""
Class for generating the dosta_abcdjm_ctdbp_instrument_recovered data particle.
"""
_data_particle_type = DataParticleType.SAMPLE
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(DostaAbcdjmCtdbpInstrumentDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
# the data contains seconds since Jan 1, 2000. Need the number of seconds before that
seconds_till_jan_1_2000 = calendar.timegm(JAN_1_2000)
# calculate the internal timestamp
ctd_time = int(self.raw_data.group('ctd_time'), 16)
elapsed_seconds = seconds_till_jan_1_2000 + ctd_time
self.set_internal_timestamp(unix_time=elapsed_seconds)
def _build_parsed_values(self):
"""
Take recovered Hex raw data and extract different fields, converting Hex to Integer values.
@throws SampleException If there is a problem with sample creation
"""
return [self._encode_value(name, self.raw_data.group(name), lambda x: int(x, 16))
for name in DATA_PARTICLE_MAP]
class DostaAbcdjmCtdbpParser(SimpleParser):
"""
Parser for dosta_abcdjm_ctdbp data.
"""
def __init__(self, stream_handle, exception_callback):
super(DostaAbcdjmCtdbpParser, self).__init__({},
stream_handle,
exception_callback)
def parse_file(self):
for line in self._stream_handle:
# If this is a valid sensor data record,
# use the extracted fields to generate a particle.
# check for match from Endurance
match = ENDURANCE_DATA_MATCHER.match(line)
if match is not None:
particle = self._extract_sample(DostaAbcdjmCtdbpInstrumentDataParticle, None, match, None)
if particle is not None:
self._record_buffer.append(particle)
# It's not a sensor data record, see if it's a header record.
else:
# If it's a valid header record, ignore it.
# Otherwise generate warning for unknown data.
header_match = HEADER_MATCHER.match(line)
log.debug('Header match: %s', str(header_match))
if header_match is None:
warning_message = 'Unknown data found in chunk %s' % line
log.warn(warning_message)
self._exception_callback(UnexpectedDataException(warning_message))
| bsd-2-clause | -5,615,347,329,787,152,000 | 37.583815 | 106 | 0.6403 | false |
bearstech/modoboa | modoboa/limits/tests/test_api.py | 1 | 13603 | # coding: utf-8
"""Test cases for the limits extension."""
from __future__ import unicode_literals
from testfixtures import compare
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from modoboa.admin.factories import populate_database
from modoboa.admin.models import Domain
from modoboa.core import factories as core_factories
from modoboa.core.models import User
from modoboa.lib import permissions
from modoboa.lib import tests as lib_tests
from .. import utils
class APIAdminLimitsTestCase(lib_tests.ModoAPITestCase):
"""Check that limits are used also by the API."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(APIAdminLimitsTestCase, cls).setUpTestData()
for name, tpl in utils.get_user_limit_templates():
cls.localconfig.parameters.set_value(
"deflt_user_{0}_limit".format(name), 2)
cls.localconfig.save()
populate_database()
cls.user = User.objects.get(username="[email protected]")
cls.da_token = Token.objects.create(user=cls.user)
cls.reseller = core_factories.UserFactory(
username="reseller", groups=("Resellers", ),
)
cls.r_token = Token.objects.create(user=cls.reseller)
def test_domadmins_limit(self):
"""Check domain admins limit."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.r_token.key)
limit = self.reseller.userobjectlimit_set.get(name="domain_admins")
url = reverse("api:account-list")
data = {
"username": "[email protected]",
"role": "DomainAdmins",
"password": "Toto1234",
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertFalse(limit.is_exceeded())
data["username"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
data["username"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
user = User.objects.get(username="[email protected]")
domain = Domain.objects.get(name="test.com")
domain.add_admin(self.reseller)
url = reverse("api:account-detail", args=[user.pk])
data = {
"username": user.username,
"role": "DomainAdmins",
"password": "Toto1234",
"mailbox": {
"full_address": user.username,
"quota": user.mailbox.quota
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_domains_limit(self):
"""Check domains limit."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.r_token.key)
limit = self.reseller.userobjectlimit_set.get(name="domains")
quota = self.reseller.userobjectlimit_set.get(name="quota")
quota.max_value = 3
quota.save(update_fields=["max_value"])
url = reverse("api:domain-list")
data = {"name": "test3.com", "quota": 1}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertFalse(limit.is_exceeded())
self.assertFalse(quota.is_exceeded())
data["name"] = "test4.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
self.assertFalse(quota.is_exceeded())
data["name"] = "test5.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.content.decode('utf-8'), '"Domains: limit reached"'
)
self.client.delete(
reverse("api:domain-detail",
args=[Domain.objects.get(name="test4.com").pk]))
data["quota"] = 0
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content.decode(),
'"You\'re not allowed to define unlimited values"')
def test_domain_aliases_limit(self):
"""Check domain aliases limit."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.r_token.key)
domain = Domain.objects.get(name="test.com")
domain.add_admin(self.reseller)
limit = self.reseller.userobjectlimit_set.get(name="domain_aliases")
url = reverse("api:domain_alias-list")
data = {"name": "dalias1.com", "target": domain.pk}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertFalse(limit.is_exceeded())
data["name"] = "dalias2.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
data["username"] = "dalias3.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_mailboxes_limit(self):
"""Check mailboxes limit."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
limit = self.user.userobjectlimit_set.get(name="mailboxes")
url = reverse("api:account-list")
data = {
"username": "[email protected]",
"role": "SimpleUsers",
"password": "Toto1234",
"mailbox": {
"full_address": "[email protected]",
"quota": 10
}
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertFalse(limit.is_exceeded())
data["username"] = "[email protected]"
data["mailbox"]["full_address"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
data["username"] = "[email protected]"
data["mailbox"]["full_address"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_aliases_limit(self):
"""Check mailbox aliases limit."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
limit = self.user.userobjectlimit_set.get(name="mailbox_aliases")
url = reverse("api:alias-list")
data = {
"address": "[email protected]",
"recipients": [
"[email protected]", "[email protected]", "user_éé@nonlocal.com"
]
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertFalse(limit.is_exceeded())
data["address"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
data["address"] = "[email protected]"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
class APIDomainLimitsTestCase(lib_tests.ModoAPITestCase):
"""Check that limits are used also by the API."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(APIDomainLimitsTestCase, cls).setUpTestData()
cls.localconfig.parameters.set_value(
"enable_domain_limits", True)
for name, tpl in utils.get_domain_limit_templates():
cls.localconfig.parameters.set_value(
"deflt_domain_{0}_limit".format(name), 2)
cls.localconfig.save()
populate_database()
def test_mailboxes_limit(self):
"""Check mailboxes limit."""
domain = Domain.objects.get(name="test.com")
limit = domain.domainobjectlimit_set.get(name="mailboxes")
self.assertTrue(limit.is_exceeded())
url = reverse("api:account-list")
data = {
"username": "[email protected]",
"role": "SimpleUsers",
"password": "Toto1234",
"mailbox": {
"full_address": "[email protected]",
"quota": 10
}
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_domain_aliases_limit(self):
"""Check domain_aliases limit."""
domain = Domain.objects.get(name="test.com")
limit = domain.domainobjectlimit_set.get(name="domain_aliases")
url = reverse("api:domain_alias-list")
data = {"name": "dalias1.com", "target": domain.pk}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
data["name"] = "dalias2.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
self.assertTrue(limit.is_exceeded())
data["name"] = "dalias3.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_mailbox_aliases_limit(self):
"""Check mailbox_aliases limit."""
domain = Domain.objects.get(name="test.com")
limit = domain.domainobjectlimit_set.get(name="mailbox_aliases")
self.assertTrue(limit.is_exceeded())
url = reverse("api:alias-list")
data = {
"address": "[email protected]",
"recipients": [
"[email protected]", "[email protected]", "user_éé@nonlocal.com"
]
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
class ResourcesAPITestCase(lib_tests.ModoAPITestCase):
"""Check resources API."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(ResourcesAPITestCase, cls).setUpTestData()
for name, tpl in utils.get_user_limit_templates():
cls.localconfig.parameters.set_value(
"deflt_user_{0}_limit".format(name), 2)
cls.localconfig.save()
populate_database()
cls.user = User.objects.get(username="[email protected]")
cls.da_token = Token.objects.create(user=cls.user)
cls.reseller = core_factories.UserFactory(
username="reseller", groups=("Resellers", ),
)
cls.r_token = Token.objects.create(user=cls.reseller)
def test_get_admin_resources(self):
"""Retrieve admin resources."""
url = reverse("api:resources-detail", args=[self.user.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected = {
"quota": 2,
"mailboxes": 2,
"domain_admins": 2,
"domain_aliases": 2,
"domains": 2,
"mailbox_aliases": 2
}
compare(expected, response.data)
# As reseller => fails
self.client.credentials(
HTTP_AUTHORIZATION="Token {}".format(self.r_token.key))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# As domain admin => fails
self.client.credentials(
HTTP_AUTHORIZATION="Token {}".format(self.da_token.key))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_update_resources(self):
"""Update resources."""
url = reverse("api:resources-detail", args=[self.reseller.pk])
response = self.client.get(url)
resources = response.data
resources.update({"domains": 1000, "mailboxes": 1000})
response = self.client.put(url, resources)
self.assertEqual(response.status_code, 200)
self.assertEqual(
self.reseller.userobjectlimit_set.get(name="domains").max_value,
1000)
# As domain admin => fails
self.client.credentials(
HTTP_AUTHORIZATION="Token {}".format(self.da_token.key))
resources.update({"domains": 2, "mailboxes": 2})
url = reverse("api:resources-detail", args=[self.user.pk])
response = self.client.put(url, resources)
self.assertEqual(response.status_code, 404)
# As reseller => ok
permissions.grant_access_to_object(self.reseller, self.user, True)
self.client.credentials(
HTTP_AUTHORIZATION="Token {}".format(self.r_token.key))
resources.update({"domains": 500, "mailboxes": 500})
url = reverse("api:resources-detail", args=[self.user.pk])
response = self.client.put(url, resources)
self.assertEqual(response.status_code, 200)
self.assertEqual(
self.user.userobjectlimit_set.get(name="domains").max_value,
500)
self.assertEqual(
self.reseller.userobjectlimit_set.get(name="domains").max_value,
502)
resources.update({"domains": 1003})
response = self.client.put(url, resources)
self.assertEqual(response.status_code, 424)
| isc | 261,014,767,997,290,180 | 38.190202 | 78 | 0.604971 | false |
MiuLab/TC-Bot | src/deep_dialog/usersims/usersim_rule.py | 1 | 22386 | """
Created on May 14, 2016
a rule-based user simulator
-- user_goals_first_turn_template.revised.v1.p: all goals
-- user_goals_first_turn_template.part.movie.v1.p: moviename in goal.inform_slots
-- user_goals_first_turn_template.part.nomovie.v1.p: no moviename in goal.inform_slots
@author: xiul, t-zalipt
"""
from .usersim import UserSimulator
import argparse, json, random, copy
from deep_dialog import dialog_config
class RuleSimulator(UserSimulator):
""" A rule-based user simulator for testing dialog policy """
def __init__(self, movie_dict=None, act_set=None, slot_set=None, start_set=None, params=None):
""" Constructor shared by all user simulators """
self.movie_dict = movie_dict
self.act_set = act_set
self.slot_set = slot_set
self.start_set = start_set
self.max_turn = params['max_turn']
self.slot_err_probability = params['slot_err_probability']
self.slot_err_mode = params['slot_err_mode']
self.intent_err_probability = params['intent_err_probability']
self.simulator_run_mode = params['simulator_run_mode']
self.simulator_act_level = params['simulator_act_level']
self.learning_phase = params['learning_phase']
def initialize_episode(self):
""" Initialize a new episode (dialog)
state['history_slots']: keeps all the informed_slots
state['rest_slots']: keep all the slots (which is still in the stack yet)
"""
self.state = {}
self.state['history_slots'] = {}
self.state['inform_slots'] = {}
self.state['request_slots'] = {}
self.state['rest_slots'] = []
self.state['turn'] = 0
self.episode_over = False
self.dialog_status = dialog_config.NO_OUTCOME_YET
#self.goal = random.choice(self.start_set)
self.goal = self._sample_goal(self.start_set)
self.goal['request_slots']['ticket'] = 'UNK'
self.constraint_check = dialog_config.CONSTRAINT_CHECK_FAILURE
""" Debug: build a fake goal mannually """
#self.debug_falk_goal()
# sample first action
user_action = self._sample_action()
assert (self.episode_over != 1),' but we just started'
return user_action
def _sample_action(self):
""" randomly sample a start action based on user goal """
self.state['diaact'] = random.choice(dialog_config.start_dia_acts.keys())
# "sample" informed slots
if len(self.goal['inform_slots']) > 0:
known_slot = random.choice(self.goal['inform_slots'].keys())
self.state['inform_slots'][known_slot] = self.goal['inform_slots'][known_slot]
if 'moviename' in self.goal['inform_slots'].keys(): # 'moviename' must appear in the first user turn
self.state['inform_slots']['moviename'] = self.goal['inform_slots']['moviename']
for slot in self.goal['inform_slots'].keys():
if known_slot == slot or slot == 'moviename': continue
self.state['rest_slots'].append(slot)
self.state['rest_slots'].extend(self.goal['request_slots'].keys())
# "sample" a requested slot
request_slot_set = list(self.goal['request_slots'].keys())
request_slot_set.remove('ticket')
if len(request_slot_set) > 0:
request_slot = random.choice(request_slot_set)
else:
request_slot = 'ticket'
self.state['request_slots'][request_slot] = 'UNK'
if len(self.state['request_slots']) == 0:
self.state['diaact'] = 'inform'
if (self.state['diaact'] in ['thanks','closing']): self.episode_over = True #episode_over = True
else: self.episode_over = False #episode_over = False
sample_action = {}
sample_action['diaact'] = self.state['diaact']
sample_action['inform_slots'] = self.state['inform_slots']
sample_action['request_slots'] = self.state['request_slots']
sample_action['turn'] = self.state['turn']
self.add_nl_to_action(sample_action)
return sample_action
def _sample_goal(self, goal_set):
""" sample a user goal """
sample_goal = random.choice(self.start_set[self.learning_phase])
return sample_goal
def corrupt(self, user_action):
""" Randomly corrupt an action with error probs (slot_err_probability and slot_err_mode) on Slot and Intent (intent_err_probability). """
for slot in user_action['inform_slots'].keys():
slot_err_prob_sample = random.random()
if slot_err_prob_sample < self.slot_err_probability: # add noise for slot level
if self.slot_err_mode == 0: # replace the slot_value only
if slot in self.movie_dict.keys(): user_action['inform_slots'][slot] = random.choice(self.movie_dict[slot])
elif self.slot_err_mode == 1: # combined
slot_err_random = random.random()
if slot_err_random <= 0.33:
if slot in self.movie_dict.keys(): user_action['inform_slots'][slot] = random.choice(self.movie_dict[slot])
elif slot_err_random > 0.33 and slot_err_random <= 0.66:
del user_action['inform_slots'][slot]
random_slot = random.choice(self.movie_dict.keys())
user_action[random_slot] = random.choice(self.movie_dict[random_slot])
else:
del user_action['inform_slots'][slot]
elif self.slot_err_mode == 2: #replace slot and its values
del user_action['inform_slots'][slot]
random_slot = random.choice(self.movie_dict.keys())
user_action[random_slot] = random.choice(self.movie_dict[random_slot])
elif self.slot_err_mode == 3: # delete the slot
del user_action['inform_slots'][slot]
intent_err_sample = random.random()
if intent_err_sample < self.intent_err_probability: # add noise for intent level
user_action['diaact'] = random.choice(self.act_set.keys())
def debug_falk_goal(self):
""" Debug function: build a fake goal mannually (Can be moved in future) """
self.goal['inform_slots'].clear()
#self.goal['inform_slots']['city'] = 'seattle'
self.goal['inform_slots']['numberofpeople'] = '2'
#self.goal['inform_slots']['theater'] = 'amc pacific place 11 theater'
#self.goal['inform_slots']['starttime'] = '10:00 pm'
#self.goal['inform_slots']['date'] = 'tomorrow'
self.goal['inform_slots']['moviename'] = 'zoology'
self.goal['inform_slots']['distanceconstraints'] = 'close to 95833'
self.goal['request_slots'].clear()
self.goal['request_slots']['ticket'] = 'UNK'
self.goal['request_slots']['theater'] = 'UNK'
self.goal['request_slots']['starttime'] = 'UNK'
self.goal['request_slots']['date'] = 'UNK'
def next(self, system_action):
""" Generate next User Action based on last System Action """
self.state['turn'] += 2
self.episode_over = False
self.dialog_status = dialog_config.NO_OUTCOME_YET
sys_act = system_action['diaact']
if (self.max_turn > 0 and self.state['turn'] > self.max_turn):
self.dialog_status = dialog_config.FAILED_DIALOG
self.episode_over = True
self.state['diaact'] = "closing"
else:
self.state['history_slots'].update(self.state['inform_slots'])
self.state['inform_slots'].clear()
if sys_act == "inform":
self.response_inform(system_action)
elif sys_act == "multiple_choice":
self.response_multiple_choice(system_action)
elif sys_act == "request":
self.response_request(system_action)
elif sys_act == "thanks":
self.response_thanks(system_action)
elif sys_act == "confirm_answer":
self.response_confirm_answer(system_action)
elif sys_act == "closing":
self.episode_over = True
self.state['diaact'] = "thanks"
self.corrupt(self.state)
response_action = {}
response_action['diaact'] = self.state['diaact']
response_action['inform_slots'] = self.state['inform_slots']
response_action['request_slots'] = self.state['request_slots']
response_action['turn'] = self.state['turn']
response_action['nl'] = ""
# add NL to dia_act
self.add_nl_to_action(response_action)
return response_action, self.episode_over, self.dialog_status
def response_confirm_answer(self, system_action):
""" Response for Confirm_Answer (System Action) """
if len(self.state['rest_slots']) > 0:
request_slot = random.choice(self.state['rest_slots'])
if request_slot in self.goal['request_slots'].keys():
self.state['diaact'] = "request"
self.state['request_slots'][request_slot] = "UNK"
elif request_slot in self.goal['inform_slots'].keys():
self.state['diaact'] = "inform"
self.state['inform_slots'][request_slot] = self.goal['inform_slots'][request_slot]
if request_slot in self.state['rest_slots']:
self.state['rest_slots'].remove(request_slot)
else:
self.state['diaact'] = "thanks"
def response_thanks(self, system_action):
""" Response for Thanks (System Action) """
self.episode_over = True
self.dialog_status = dialog_config.SUCCESS_DIALOG
request_slot_set = copy.deepcopy(self.state['request_slots'].keys())
if 'ticket' in request_slot_set:
request_slot_set.remove('ticket')
rest_slot_set = copy.deepcopy(self.state['rest_slots'])
if 'ticket' in rest_slot_set:
rest_slot_set.remove('ticket')
if len(request_slot_set) > 0 or len(rest_slot_set) > 0:
self.dialog_status = dialog_config.FAILED_DIALOG
for info_slot in self.state['history_slots'].keys():
if self.state['history_slots'][info_slot] == dialog_config.NO_VALUE_MATCH:
self.dialog_status = dialog_config.FAILED_DIALOG
if info_slot in self.goal['inform_slots'].keys():
if self.state['history_slots'][info_slot] != self.goal['inform_slots'][info_slot]:
self.dialog_status = dialog_config.FAILED_DIALOG
if 'ticket' in system_action['inform_slots'].keys():
if system_action['inform_slots']['ticket'] == dialog_config.NO_VALUE_MATCH:
self.dialog_status = dialog_config.FAILED_DIALOG
if self.constraint_check == dialog_config.CONSTRAINT_CHECK_FAILURE:
self.dialog_status = dialog_config.FAILED_DIALOG
def response_request(self, system_action):
""" Response for Request (System Action) """
if len(system_action['request_slots'].keys()) > 0:
slot = system_action['request_slots'].keys()[0] # only one slot
if slot in self.goal['inform_slots'].keys(): # request slot in user's constraints #and slot not in self.state['request_slots'].keys():
self.state['inform_slots'][slot] = self.goal['inform_slots'][slot]
self.state['diaact'] = "inform"
if slot in self.state['rest_slots']: self.state['rest_slots'].remove(slot)
if slot in self.state['request_slots'].keys(): del self.state['request_slots'][slot]
self.state['request_slots'].clear()
elif slot in self.goal['request_slots'].keys() and slot not in self.state['rest_slots'] and slot in self.state['history_slots'].keys(): # the requested slot has been answered
self.state['inform_slots'][slot] = self.state['history_slots'][slot]
self.state['request_slots'].clear()
self.state['diaact'] = "inform"
elif slot in self.goal['request_slots'].keys() and slot in self.state['rest_slots']: # request slot in user's goal's request slots, and not answered yet
self.state['diaact'] = "request" # "confirm_question"
self.state['request_slots'][slot] = "UNK"
########################################################################
# Inform the rest of informable slots
########################################################################
for info_slot in self.state['rest_slots']:
if info_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][info_slot] = self.goal['inform_slots'][info_slot]
for info_slot in self.state['inform_slots'].keys():
if info_slot in self.state['rest_slots']:
self.state['rest_slots'].remove(info_slot)
else:
if len(self.state['request_slots']) == 0 and len(self.state['rest_slots']) == 0:
self.state['diaact'] = "thanks"
else:
self.state['diaact'] = "inform"
self.state['inform_slots'][slot] = dialog_config.I_DO_NOT_CARE
else: # this case should not appear
if len(self.state['rest_slots']) > 0:
random_slot = random.choice(self.state['rest_slots'])
if random_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][random_slot] = self.goal['inform_slots'][random_slot]
self.state['rest_slots'].remove(random_slot)
self.state['diaact'] = "inform"
elif random_slot in self.goal['request_slots'].keys():
self.state['request_slots'][random_slot] = self.goal['request_slots'][random_slot]
self.state['diaact'] = "request"
def response_multiple_choice(self, system_action):
""" Response for Multiple_Choice (System Action) """
slot = system_action['inform_slots'].keys()[0]
if slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][slot] = self.goal['inform_slots'][slot]
elif slot in self.goal['request_slots'].keys():
self.state['inform_slots'][slot] = random.choice(system_action['inform_slots'][slot])
self.state['diaact'] = "inform"
if slot in self.state['rest_slots']: self.state['rest_slots'].remove(slot)
if slot in self.state['request_slots'].keys(): del self.state['request_slots'][slot]
def response_inform(self, system_action):
""" Response for Inform (System Action) """
if 'taskcomplete' in system_action['inform_slots'].keys(): # check all the constraints from agents with user goal
self.state['diaact'] = "thanks"
#if 'ticket' in self.state['rest_slots']: self.state['request_slots']['ticket'] = 'UNK'
self.constraint_check = dialog_config.CONSTRAINT_CHECK_SUCCESS
if system_action['inform_slots']['taskcomplete'] == dialog_config.NO_VALUE_MATCH:
self.state['history_slots']['ticket'] = dialog_config.NO_VALUE_MATCH
if 'ticket' in self.state['rest_slots']: self.state['rest_slots'].remove('ticket')
if 'ticket' in self.state['request_slots'].keys(): del self.state['request_slots']['ticket']
for slot in self.goal['inform_slots'].keys():
# Deny, if the answers from agent can not meet the constraints of user
if slot not in system_action['inform_slots'].keys() or (self.goal['inform_slots'][slot].lower() != system_action['inform_slots'][slot].lower()):
self.state['diaact'] = "deny"
self.state['request_slots'].clear()
self.state['inform_slots'].clear()
self.constraint_check = dialog_config.CONSTRAINT_CHECK_FAILURE
break
else:
for slot in system_action['inform_slots'].keys():
self.state['history_slots'][slot] = system_action['inform_slots'][slot]
if slot in self.goal['inform_slots'].keys():
if system_action['inform_slots'][slot] == self.goal['inform_slots'][slot]:
if slot in self.state['rest_slots']: self.state['rest_slots'].remove(slot)
if len(self.state['request_slots']) > 0:
self.state['diaact'] = "request"
elif len(self.state['rest_slots']) > 0:
rest_slot_set = copy.deepcopy(self.state['rest_slots'])
if 'ticket' in rest_slot_set:
rest_slot_set.remove('ticket')
if len(rest_slot_set) > 0:
inform_slot = random.choice(rest_slot_set) # self.state['rest_slots']
if inform_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][inform_slot] = self.goal['inform_slots'][inform_slot]
self.state['diaact'] = "inform"
self.state['rest_slots'].remove(inform_slot)
elif inform_slot in self.goal['request_slots'].keys():
self.state['request_slots'][inform_slot] = 'UNK'
self.state['diaact'] = "request"
else:
self.state['request_slots']['ticket'] = 'UNK'
self.state['diaact'] = "request"
else: # how to reply here?
self.state['diaact'] = "thanks" # replies "closing"? or replies "confirm_answer"
else: # != value Should we deny here or ?
########################################################################
# TODO When agent informs(slot=value), where the value is different with the constraint in user goal, Should we deny or just inform the correct value?
########################################################################
self.state['diaact'] = "inform"
self.state['inform_slots'][slot] = self.goal['inform_slots'][slot]
if slot in self.state['rest_slots']: self.state['rest_slots'].remove(slot)
else:
if slot in self.state['rest_slots']:
self.state['rest_slots'].remove(slot)
if slot in self.state['request_slots'].keys():
del self.state['request_slots'][slot]
if len(self.state['request_slots']) > 0:
request_set = list(self.state['request_slots'].keys())
if 'ticket' in request_set:
request_set.remove('ticket')
if len(request_set) > 0:
request_slot = random.choice(request_set)
else:
request_slot = 'ticket'
self.state['request_slots'][request_slot] = "UNK"
self.state['diaact'] = "request"
elif len(self.state['rest_slots']) > 0:
rest_slot_set = copy.deepcopy(self.state['rest_slots'])
if 'ticket' in rest_slot_set:
rest_slot_set.remove('ticket')
if len(rest_slot_set) > 0:
inform_slot = random.choice(rest_slot_set) #self.state['rest_slots']
if inform_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][inform_slot] = self.goal['inform_slots'][inform_slot]
self.state['diaact'] = "inform"
self.state['rest_slots'].remove(inform_slot)
if 'ticket' in self.state['rest_slots']:
self.state['request_slots']['ticket'] = 'UNK'
self.state['diaact'] = "request"
elif inform_slot in self.goal['request_slots'].keys():
self.state['request_slots'][inform_slot] = self.goal['request_slots'][inform_slot]
self.state['diaact'] = "request"
else:
self.state['request_slots']['ticket'] = 'UNK'
self.state['diaact'] = "request"
else:
self.state['diaact'] = "thanks" # or replies "confirm_answer"
def main(params):
user_sim = RuleSimulator()
user_sim.initialize_episode()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
params = vars(args)
print ("User Simulator Parameters:")
print (json.dumps(params, indent=2))
main(params)
| mit | -4,018,702,831,594,770,400 | 49.580645 | 186 | 0.522737 | false |
jbloomlab/phydms | tests/test_alignmentSimulationRandomSeed.py | 1 | 4797 | """Tests random number seeding in aligment simulation.
Makes sure the random numbering seeding gives reproducible results.
Written by Sarah Hilton and Jesse Bloom.
"""
import os
import numpy
import unittest
import random
import phydmslib.models
import phydmslib.treelikelihood
import phydmslib.simulate
from phydmslib.constants import N_NT, N_AA, AA_TO_INDEX
import Bio.SeqIO
import Bio.Phylo
import glob
class test_simulateRandomSeed_ExpCM(unittest.TestCase):
"""Tests `simulate.simulateAlignment` module with different seeds."""
# use approach here to run multiple tests:
# http://stackoverflow.com/questions/17260469/instantiate-python-unittest-testcase-with-arguments
MODEL = phydmslib.models.ExpCM_empirical_phi
def test_simulateAlignmentRandomSeed(self):
"""Simulate evolution, ensure scaled branches match number of subs."""
numpy.random.seed(1)
random.seed(1)
# define model
nsites = 200
prefs = []
minpref = 0.01
for _r in range(nsites):
rprefs = numpy.random.dirichlet([1] * N_AA)
rprefs[rprefs < minpref] = minpref
rprefs /= rprefs.sum()
prefs.append(dict(zip(sorted(AA_TO_INDEX.keys()), rprefs)))
kappa = 4.2
omega = 0.4
beta = 1.5
mu = 0.3
if self.MODEL == phydmslib.models.ExpCM:
phi = numpy.random.dirichlet([7] * N_NT)
model = phydmslib.models.ExpCM(prefs, kappa=kappa, omega=omega,
beta=beta, mu=mu, phi=phi,
freeparams=['mu'])
elif self.MODEL == phydmslib.models.ExpCM_empirical_phi:
g = numpy.random.dirichlet([7] * N_NT)
model = phydmslib.models.ExpCM_empirical_phi(prefs, g, kappa=kappa,
omega=omega,
beta=beta, mu=mu,
freeparams=['mu'])
elif self.MODEL == phydmslib.models.YNGKP_M0:
e_pw = numpy.asarray([numpy.random.dirichlet([7] * N_NT) for i
in range(3)])
model = phydmslib.models.YNGKP_M0(e_pw, nsites)
else:
raise ValueError("Invalid MODEL: {0}".format(type(self.MODEL)))
# make a test tree
# tree is two sequences separated by a single branch
t = 0.04 / model.branchScale
newicktree = '(tip1:{0},tip2:{0});'.format(t / 2.0)
temptree = '_temp.tree'
with open(temptree, 'w') as f:
f.write(newicktree)
counter = 0
seed = 1
alignments = [{}, {}, {}]
# alignments with the same seed number should be the same
# make two alignments with the same seed number
for counter in range(2):
alignmentPrefix = "test_counter{0}_seed{1}".format(counter, seed)
phydmslib.simulate.simulateAlignment(model, temptree,
alignmentPrefix, seed)
for s in Bio.SeqIO.parse("test_counter{0}_seed{1}_simulated"
"alignment.fasta".format(counter, seed),
"fasta"):
alignments[counter][s.id] = str(s.seq)
# check they are the same
for key in alignments[counter].keys():
self.assertTrue(alignments[counter][key] ==
alignments[counter - 1][key])
# alignments with different seed numbers should be different
# make an alignment with a different seed number
seed += 1
counter += 1
alignmentPrefix = "test_counter{0}_seed{1}".format(counter, seed)
phydmslib.simulate.simulateAlignment(model, temptree,
alignmentPrefix, seed)
for s in Bio.SeqIO.parse("test_counter{0}_seed{1}_simulatedalignment."
"fasta".format(counter, seed), "fasta"):
alignments[counter][s.id] = str(s.seq)
# check they are different
for key in alignments[counter].keys():
self.assertFalse(alignments[counter][key] ==
alignments[counter - 1][key])
# general clean-up
os.remove(temptree)
for fasta in glob.glob("test*simulatedalignment.fasta"):
if os.path.isfile(fasta):
os.remove(fasta)
class test_simulateRandomSeed_YNGKP_M0(test_simulateRandomSeed_ExpCM):
"""Tests `simulateAlignment` of `YNGKP_M0` model."""
MODEL = phydmslib.models.YNGKP_M0
if __name__ == '__main__':
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
| gpl-3.0 | 4,682,527,580,663,885,000 | 38.975 | 101 | 0.563269 | false |
xkollar/spacewalk | backend/server/handlers/xmlrpc/proxy.py | 6 | 10920 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# system module import
import time
# common module imports
from rhn.UserDictCase import UserDictCase
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnLib import rfc822time, timestamp
from spacewalk.common.rhnTranslate import _
# local module imports
from spacewalk.server.rhnLib import computeSignature
from spacewalk.server.rhnHandler import rhnHandler
from spacewalk.server import rhnServer, rhnSQL, apacheAuth, rhnPackage, rhnChannel
# a class that provides additional authentication support for the
# proxy functions
class rhnProxyHandler(rhnHandler):
def __init__(self):
rhnHandler.__init__(self)
def auth_system(self, system_id):
""" System authentication. We override the standard function because
we need to check additionally if this system_id is entitled for
proxy functionality.
"""
log_debug(3)
server = rhnHandler.auth_system(self, system_id)
# if it did not blow up, we have a valid server. Check proxy
# entitlement.
# XXX: this needs to be moved out of the rhnServer module,
# possibly in here
h = rhnSQL.prepare("""
select 1
from rhnProxyInfo pi
where pi.server_id = :server_id
""")
h.execute(server_id=self.server_id)
row = h.fetchone_dict()
if not row:
# we require entitlement for this functionality
log_error("Server not entitled for Proxy", self.server_id)
raise rhnFault(1002, _(
'Spacewalk Proxy service not enabled for server profile: "%s"')
% server.server["name"])
# we're fine...
return server
def auth_client(self, token):
""" Authenticate a system based on the same authentication tokens
the client is sending for GET requests
"""
log_debug(3)
# Build a UserDictCase out of the token
dict = UserDictCase(token)
# Set rhnFlags so that we can piggyback on apacheAuth's auth_client
rhnFlags.set('AUTH_SESSION_TOKEN', dict)
# XXX To clean up apacheAuth.auth_client's logging, this is not about
# GET requests
result = apacheAuth.auth_client()
if not result:
raise rhnFault(33, _("Invalid session key"))
log_debug(4, "Client auth OK")
# We checked it already, so we're sure it's there
client_id = dict['X-RHN-Server-Id']
server = rhnServer.search(client_id)
if not server:
raise rhnFault(8, _("This server ID no longer exists"))
# XXX: should we check if the username still has access to it?
# probably not, because there is no known good way we can
# update the server system_id on the client side when
# permissions change... Damn it. --gafton
self.server = server
self.server_id = client_id
self.user = dict['X-RHN-Auth-User-Id']
return server
class Proxy(rhnProxyHandler):
""" this is the XML-RPC receiver for proxy calls """
def __init__(self):
log_debug(3)
rhnProxyHandler.__init__(self)
self.functions.append('package_source_in_channel')
self.functions.append('login')
self.functions.append('listAllPackagesKickstart')
self.functions.append('getKickstartChannel')
self.functions.append('getKickstartOrgChannel')
self.functions.append('getKickstartSessionChannel')
self.functions.append('getKickstartChildChannel')
self.functions.append('getTinyUrlChannel')
self.functions.append('checkTokenValidity')
# Method to force a check of the client's auth token.
# Proxy may call this if it does not recognize the token, which may
# happen if the proxy is load-balanced.
def checkTokenValidity(self, token, systemid):
log_debug(5, token, systemid)
# authenticate that this request is initiated from a proxy
try:
self.auth_system(systemid)
server = self.auth_client(token) # sets self.server_id
except rhnFault:
# A Fault means that something did not auth. Either the caller
# is not a proxy or the token is not valid, return false.
return False
# Proxy has to calculate new proxy-clock-skew, and needs channel info
ret = {}
ret['X-RHN-Auth-Server-Time'] = str(time.time())
channels = rhnChannel.getSubscribedChannels(self.server_id)
ret['X-RHN-Auth-Channels'] = channels
return ret
def package_source_in_channel(self, package, channel, auth_token):
""" Validates the client request for a source package download """
log_debug(3, package, channel)
server = self.auth_client(auth_token)
return rhnPackage.package_source_in_channel(self.server_id,
package, channel)
def login(self, system_id):
""" Login routine for the proxy
Return a formatted string of session token information as regards
an Spacewalk Proxy. Also sets this information in the headers.
NOTE: design description for the auth token format and how it is
is used is well documented in the proxy/broker/rhnProxyAuth.py
code.
"""
log_debug(5, system_id)
# Authenticate. We need the user record to be able to generate
# auth tokens
self.load_user = 1
self.auth_system(system_id)
# log the entry
log_debug(1, self.server_id)
rhnServerTime = str(time.time())
expireOffset = str(CFG.PROXY_AUTH_TIMEOUT)
signature = computeSignature(CFG.SECRET_KEY, self.server_id, self.user,
rhnServerTime, expireOffset)
token = '%s:%s:%s:%s:%s' % (self.server_id, self.user, rhnServerTime,
expireOffset, signature)
# NOTE: for RHN Proxies of version 3.1+ tokens are passed up in a
# multi-valued header with HOSTNAME tagged onto the end of the
# token, so, it looks something like this:
# x-rhn-proxy-auth: 'TOKEN1:HOSTNAME1,TOKEN2:HOSTNAME2'
# This note is only that -- a "heads up" -- in case anyone gets
# confused.
# Push this value into the headers so that the proxy can
# intercept and cache it without parsing the xmlrpc.
transport = rhnFlags.get('outputTransportOptions')
transport['X-RHN-Action'] = 'login'
transport['X-RHN-Proxy-Auth'] = token
return token
def listAllPackagesKickstart(self, channel, system_id):
""" Creates and/or serves up a cached copy of all the packages for
this channel, including checksum information.
"""
log_debug(5, channel)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
packages = rhnChannel.list_all_packages_checksum(channel)
# transport options...
rhnFlags.set("compress_response", 1)
return packages
def getKickstartChannel(self, kickstart, system_id):
""" Gets channel information for this kickstart tree"""
log_debug(5, kickstart)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
return self.__getKickstartChannel(kickstart)
def getKickstartOrgChannel(self, kickstart, org_id, system_id):
""" Gets channel information for this kickstart tree"""
log_debug(5, kickstart, org_id)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
ret = rhnChannel.getChannelInfoForKickstartOrg(kickstart, org_id)
return self.__getKickstart(kickstart, ret)
def getKickstartSessionChannel(self, kickstart, session, system_id):
""" Gets channel information for this kickstart tree"""
log_debug(5, kickstart, session)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
return self.__getKickstartSessionChannel(kickstart, session)
def getKickstartChildChannel(self, kickstart, child, system_id):
""" Gets channel information for this kickstart tree"""
log_debug(5, kickstart, child)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
if (hasattr(CFG, 'KS_RESTRICT_CHILD_CHANNELS') and
CFG.KS_RESTRICT_CHILD_CHANNELS):
return getKickstartChannel(kickstart)
ret = rhnChannel.getChildChannelInfoForKickstart(kickstart, child)
return self.__getKickstart(kickstart, ret)
def getTinyUrlChannel(self, tinyurl, system_id):
""" Gets channel information for this tinyurl"""
log_debug(5, tinyurl)
# authenticate that this request is initiated from a proxy
self.auth_system(system_id)
ret = rhnChannel.getChannelInfoForTinyUrl(tinyurl)
if not ret or not 'url' in ret or len(ret['url'].split('/')) != 6:
raise rhnFault(40,
"could not find any data on tiny url '%s'" % tinyurl)
# tiny urls are always for kickstart sessions
args = ret['url'].split('/')
return self.__getKickstartSessionChannel(args[-1], args[-2])
#-----------------------------------------------------------------------------
def __getKickstartChannel(self, kickstart):
ret = rhnChannel.getChannelInfoForKickstart(kickstart)
return self.__getKickstart(kickstart, ret)
def __getKickstartSessionChannel(self, kickstart, session):
ret = rhnChannel.getChannelInfoForKickstartSession(session)
if not ret:
return self.__getKickstartChannel(kickstart)
return self.__getKickstart(kickstart, ret)
def __getKickstart(self, kickstart, ret):
if not ret:
raise rhnFault(40,
"could not find any data on kickstart '%s'" % kickstart)
return ret
| gpl-2.0 | -7,280,104,270,236,944,000 | 40.052632 | 83 | 0.642125 | false |
klmitch/keystone | keystone/tests/unit/tests/test_utils.py | 26 | 1279 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from testtools import testcase
from keystone.tests.unit import utils
class TestWipDecorator(testcase.TestCase):
def test_raises_SkipError_when_broken_test_fails(self):
@utils.wip('waiting on bug #000000')
def test():
raise Exception('i expected a failure - this is a WIP')
e = self.assertRaises(testcase.TestSkipped, test)
self.assertThat(str(e), matchers.Contains('#000000'))
def test_raises_AssertionError_when_test_passes(self):
@utils.wip('waiting on bug #000000')
def test():
pass # literally
e = self.assertRaises(AssertionError, test)
self.assertThat(str(e), matchers.Contains('#000000'))
| apache-2.0 | 5,781,815,078,932,110,000 | 33.567568 | 75 | 0.706802 | false |
cybercercher/crits | crits/core/bucket.py | 10 | 1701 | import logging
from mongoengine import Document
from mongoengine import StringField, IntField
from django.conf import settings
from crits.core.migrate import migrate_bucket
from crits.core.crits_mongoengine import CritsDocument, CritsSchemaDocument
logger = logging.getLogger(__name__)
class Bucket(CritsDocument, CritsSchemaDocument, Document):
"""
CRITs Bucket Class
"""
meta = {
"collection": settings.COL_BUCKET_LISTS,
"crits_type": 'Bucketlist',
"latest_schema_version": 2,
"schema_doc": {
'name': 'Bucketlist name',
'Actor': 'Integer',
'Backdoor': 'Integer',
'Campaign': 'Integer',
'Certificate': 'Integer',
'Domain': 'Integer',
'Email': 'Integer',
'Target': 'Integer',
'Event': 'Integer',
'Exploit': 'Integer',
'IP': 'Integer',
'Indicator': 'Integer',
'PCAP': 'Integer',
'RawData': 'Integer',
'Sample': 'Integer'
},
}
name = StringField(required=True)
Actor = IntField(default=0)
Backdoor = IntField(default=0)
Campaign = IntField(default=0)
Certificate = IntField(default=0)
Domain = IntField(default=0)
Email = IntField(default=0)
Event = IntField(default=0)
Exploit = IntField(default=0)
Indicator = IntField(default=0)
IP = IntField(default=0)
PCAP = IntField(default=0)
RawData = IntField(default=0)
Sample = IntField(default=0)
Target = IntField(default=0)
def migrate(self):
"""
Migrate to the latest schema version.
"""
migrate_bucket(self)
| mit | -1,423,401,079,896,914,200 | 26.435484 | 75 | 0.587889 | false |
optimamodel/Optima | tests/testpeople.py | 1 | 1810 | """
Test to see if the calculation of people has changed.
Version: 2016
"""
from numpy import shape, array
import optima as op
import os
refresh = 1 # Creates defaultpeople.ppl rather than copares
eps = 1e-3 # Don't expect a totally exact match
filename = '2016nov21.npy'
P = op.defaultproject('best')
P.results = op.odict() # Clear
P.runsim()
newraw = P.results[0].raw[0]
if refresh or not(os.path.exists(filename)):
op.saveobj(filename, newraw)
print('Created new "%s".' % filename)
else:
oldraw = op.loadobj(filename)
for key in ['people'] + oldraw.keys(): # Do all keys, but make sure people is first
if type(oldraw[key])==type(array([])):
diffraw = abs(newraw[key]-oldraw[key])
if (diffraw>eps).any(): # If not every element is a real number >0, throw an error
if key!='people':
errormsg = 'WARNING, key "%s" does not match! Total mismatch: %s' % (key, sum(abs(diffraw)))
raise Exception(errormsg)
if key=='people':
for t in range(shape(diffraw)[2]): # Loop over all heath states
for errstate in range(shape(diffraw)[0]): # Loop over all heath states
for errpop in range(shape(diffraw)[1]): # Loop over all populations
if abs(diffraw[errstate,errpop,t])>eps:
errormsg = 'WARNING, people do not match!\npeople[%i, %i, %i] = %f vs. %f' % (errstate, errpop, t, oldraw['people'][errstate,errpop,t], newraw['people'][errstate,errpop,t])
raise Exception(errormsg)
else:
print('People are the same, yay! (max diff in people: %s)' % abs(newraw['people']-oldraw['people']).max())
| lgpl-3.0 | -5,742,642,224,696,130,000 | 39.222222 | 208 | 0.575691 | false |
kevinwu06/google-python-exercises | basic/string1.py | 1 | 3636 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count < 10:
return 'Number of donuts: %s' % count
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
return ''
else:
return s[0:2]+s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
t = s.replace(s[0],'*')
return s[0] + t[1:]
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
aX = b[0:2]+a[2:]
bX = a[0:2]+b[2:]
return aX+" "+bX
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| apache-2.0 | -4,303,612,328,014,750,000 | 29.049587 | 78 | 0.659791 | false |
kinoc/opencog | opencog/python/blending/src/chooser/choose_all.py | 22 | 2586 | from opencog.type_constructors import types
from blending.src.chooser.base_chooser import BaseChooser
from blending.util.blending_config import BlendConfig
from blending.util.blending_error import blending_status
__author__ = 'DongMin Kim'
class ChooseAll(BaseChooser):
"""Atoms chooser that choosing all atoms.
This chooser will choose every atom that has given type, and check the
number of atoms.
"""
def __init__(self, a):
super(self.__class__, self).__init__(a)
def make_default_config(self):
"""Initialize a default config for this class."""
super(self.__class__, self).make_default_config()
def __get_atoms_all(self, focus_atoms, atom_type, least_count):
"""Actual algorithm for choosing atoms.
Args:
focus_atoms: The atoms to blend.
atom_type: The types to limit the result of chosen.
least_count: Threshold value for minimum count of chosen atoms.
:param focus_atoms: list[Atom]
:param atom_type: int
:param least_count: int
"""
# Filter the atoms with specified type.
self.ret = filter(
lambda atom:
atom.is_a(atom_type),
focus_atoms
)
if len(self.ret) < least_count:
self.last_status = blending_status.NOT_ENOUGH_ATOMS
return
def atom_choose_impl(self, focus_atoms, config_base):
"""Implemented factory method to choosing atoms.
Args:
focus_atoms: The atoms to blend.
config_base: A Node to save custom config.
:param focus_atoms: list[Atom]
:param config_base: Atom
"""
# Choose all atoms in AtomSpace if focus_atoms was not given.
focus_atoms = self.a.get_atoms_by_type(types.Atom) \
if len(focus_atoms) is 0 \
else focus_atoms
atom_type = BlendConfig().get_str(
self.a, "choose-atom-type", config_base
)
least_count = BlendConfig().get_int(
self.a, "choose-least-count", config_base
)
# Check if given atom_type is valid or not.
try:
atom_type = types.__dict__[atom_type]
except KeyError:
atom_type = types.Node
# Check if given least_count is valid or not.
if least_count < 0:
self.last_status = blending_status.NOT_ENOUGH_ATOMS
return
# Call the actual choosing algorithm method.
self.__get_atoms_all(focus_atoms, atom_type, least_count)
| agpl-3.0 | -4,356,747,578,176,253,000 | 31.734177 | 75 | 0.595514 | false |
leiferikb/bitpop | src/tools/swarming_client/utils/tools.py | 1 | 6398 | # Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Various utility functions and classes not specific to any single area."""
import cStringIO
import json
import logging
import logging.handlers
import optparse
import os
import re
import sys
import time
import traceback
class OptionParserWithLogging(optparse.OptionParser):
"""Adds --verbose option."""
# Set to True to enable --log-file options.
enable_log_file = True
def __init__(self, verbose=0, log_file=None, **kwargs):
kwargs.setdefault('description', sys.modules['__main__'].__doc__)
optparse.OptionParser.__init__(self, **kwargs)
self.group_logging = optparse.OptionGroup(self, 'Logging')
self.group_logging.add_option(
'-v', '--verbose',
action='count',
default=verbose,
help='Use multiple times to increase verbosity')
if self.enable_log_file:
self.group_logging.add_option(
'-l', '--log-file',
default=log_file,
help='The name of the file to store rotating log details')
self.group_logging.add_option(
'--no-log', action='store_const', const='', dest='log_file',
help='Disable log file')
def parse_args(self, *args, **kwargs):
# Make sure this group is always the last one.
self.add_option_group(self.group_logging)
options, args = optparse.OptionParser.parse_args(self, *args, **kwargs)
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
level = levels[min(len(levels) - 1, options.verbose)]
logging_console = logging.StreamHandler()
logging_console.setFormatter(logging.Formatter(
'%(levelname)5s %(module)15s(%(lineno)3d): %(message)s'))
logging_console.setLevel(level)
logging.getLogger().setLevel(level)
logging.getLogger().addHandler(logging_console)
if self.enable_log_file and options.log_file:
# This is necessary otherwise attached handler will miss the messages.
logging.getLogger().setLevel(logging.DEBUG)
logging_rotating_file = logging.handlers.RotatingFileHandler(
options.log_file,
maxBytes=10 * 1024 * 1024,
backupCount=5,
encoding='utf-8')
# log files are always at DEBUG level.
logging_rotating_file.setLevel(logging.DEBUG)
logging_rotating_file.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)-8s %(module)15s(%(lineno)3d): %(message)s'))
logging.getLogger().addHandler(logging_rotating_file)
return options, args
class Profiler(object):
"""Context manager that records time spend inside its body."""
def __init__(self, name):
self.name = name
self.start_time = None
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, _exc_type, _exec_value, _traceback):
time_taken = time.time() - self.start_time
logging.info('Profiling: Section %s took %3.3f seconds',
self.name, time_taken)
class Unbuffered(object):
"""Disable buffering on a file object."""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
if '\n' in data:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def disable_buffering():
"""Makes this process and child processes stdout unbuffered."""
if not os.environ.get('PYTHONUNBUFFERED'):
# Since sys.stdout is a C++ object, it's impossible to do
# sys.stdout.write = lambda...
sys.stdout = Unbuffered(sys.stdout)
os.environ['PYTHONUNBUFFERED'] = 'x'
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def read_json(filepath):
with open(filepath, 'r') as f:
return json.load(f)
def write_json(filepath_or_handle, data, dense):
"""Writes data into filepath or file handle encoded as json.
If dense is True, the json is packed. Otherwise, it is human readable.
"""
if dense:
kwargs = {'sort_keys': True, 'separators': (',',':')}
else:
kwargs = {'sort_keys': True, 'indent': 2}
if hasattr(filepath_or_handle, 'write'):
json.dump(data, filepath_or_handle, **kwargs)
else:
with open(filepath_or_handle, 'wb') as f:
json.dump(data, f, **kwargs)
def format_json(data, dense):
"""Returns a string with json encoded data.
If dense is True, the json is packed. Otherwise, it is human readable.
"""
buf = cStringIO.StringIO()
write_json(buf, data, dense)
return buf.getvalue()
def report_error(error):
"""Prints a error to stderr, wrapping it into header and footer.
That way errors can be reliably extracted from logs. It's indented to be used
only for non recoverable unexpected errors. Is should NOT be used for input
validation, command line argument errors, etc.
Arguments:
error: error message string (possibly multiple lines) or an instance of
Exception subclass. In the later case a traceback will also be
reported. It's assumed that |report_error| is called in an except
block where |error| was caught.
"""
print >> sys.stderr, '[------ Swarming Error ------]'
print >> sys.stderr, str(error)
if isinstance(error, Exception):
print >> sys.stderr, traceback.format_exc(),
print >> sys.stderr, '[----------------------------]'
def gen_blacklist(regexes):
"""Returns a lambda to be used as a blacklist."""
compiled = [re.compile(i) for i in regexes]
return lambda f: any(j.match(f) for j in compiled)
def get_bool_env_var(name):
"""Return True if integer environment variable |name| value is non zero.
If environment variable is missing or is set to '0', returns False.
"""
return bool(int(os.environ.get(name, '0')))
def is_headless():
"""True if running in non-interactive mode on some bot machine.
Examines os.environ for presence of SWARMING_HEADLESS var.
"""
headless_env_keys = (
# This is Chromium specific. Set when running under buildbot slave.
'CHROME_HEADLESS',
# Set when running under swarm bot.
'SWARMING_HEADLESS',
)
return any(get_bool_env_var(key) for key in headless_env_keys)
| gpl-3.0 | -452,192,694,238,004,200 | 30.673267 | 80 | 0.666927 | false |
ruiaylin/percona-xtrabackup | storage/innobase/xtrabackup/test/python/testtools/tests/test_testtools.py | 42 | 42838 | # Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
"""Tests for extensions to the base test library."""
from pprint import pformat
import os
import sys
import tempfile
import unittest
from testtools import (
ErrorHolder,
MultipleExceptions,
PlaceHolder,
TestCase,
clone_test_with_new_id,
content,
skip,
skipIf,
skipUnless,
testcase,
)
from testtools.matchers import (
Equals,
MatchesException,
Raises,
)
from testtools.tests.helpers import (
an_exc_info,
LoggingResult,
Python26TestResult,
Python27TestResult,
ExtendedTestResult,
)
try:
exec('from __future__ import with_statement')
except SyntaxError:
pass
else:
from test_with_with import *
class TestPlaceHolder(TestCase):
def makePlaceHolder(self, test_id="foo", short_description=None):
return PlaceHolder(test_id, short_description)
def test_id_comes_from_constructor(self):
# The id() of a PlaceHolder is whatever you pass into the constructor.
test = PlaceHolder("test id")
self.assertEqual("test id", test.id())
def test_shortDescription_is_id(self):
# The shortDescription() of a PlaceHolder is the id, by default.
test = PlaceHolder("test id")
self.assertEqual(test.id(), test.shortDescription())
def test_shortDescription_specified(self):
# If a shortDescription is provided to the constructor, then
# shortDescription() returns that instead.
test = PlaceHolder("test id", "description")
self.assertEqual("description", test.shortDescription())
def test_repr_just_id(self):
# repr(placeholder) shows you how the object was constructed.
test = PlaceHolder("test id")
self.assertEqual(
"<testtools.testcase.PlaceHolder(%s)>" % repr(test.id()),
repr(test))
def test_repr_with_description(self):
# repr(placeholder) shows you how the object was constructed.
test = PlaceHolder("test id", "description")
self.assertEqual(
"<testtools.testcase.PlaceHolder(%r, %r)>" % (
test.id(), test.shortDescription()),
repr(test))
def test_counts_as_one_test(self):
# A placeholder test counts as one test.
test = self.makePlaceHolder()
self.assertEqual(1, test.countTestCases())
def test_str_is_id(self):
# str(placeholder) is always the id(). We are not barbarians.
test = self.makePlaceHolder()
self.assertEqual(test.id(), str(test))
def test_runs_as_success(self):
# When run, a PlaceHolder test records a success.
test = self.makePlaceHolder()
log = []
test.run(LoggingResult(log))
self.assertEqual(
[('startTest', test), ('addSuccess', test), ('stopTest', test)],
log)
def test_call_is_run(self):
# A PlaceHolder can be called, in which case it behaves like run.
test = self.makePlaceHolder()
run_log = []
test.run(LoggingResult(run_log))
call_log = []
test(LoggingResult(call_log))
self.assertEqual(run_log, call_log)
def test_runs_without_result(self):
# A PlaceHolder can be run without a result, in which case there's no
# way to actually get at the result.
self.makePlaceHolder().run()
def test_debug(self):
# A PlaceHolder can be debugged.
self.makePlaceHolder().debug()
class TestErrorHolder(TestCase):
def makeException(self):
try:
raise RuntimeError("danger danger")
except:
return sys.exc_info()
def makePlaceHolder(self, test_id="foo", error=None,
short_description=None):
if error is None:
error = self.makeException()
return ErrorHolder(test_id, error, short_description)
def test_id_comes_from_constructor(self):
# The id() of a PlaceHolder is whatever you pass into the constructor.
test = ErrorHolder("test id", self.makeException())
self.assertEqual("test id", test.id())
def test_shortDescription_is_id(self):
# The shortDescription() of a PlaceHolder is the id, by default.
test = ErrorHolder("test id", self.makeException())
self.assertEqual(test.id(), test.shortDescription())
def test_shortDescription_specified(self):
# If a shortDescription is provided to the constructor, then
# shortDescription() returns that instead.
test = ErrorHolder("test id", self.makeException(), "description")
self.assertEqual("description", test.shortDescription())
def test_repr_just_id(self):
# repr(placeholder) shows you how the object was constructed.
error = self.makeException()
test = ErrorHolder("test id", error)
self.assertEqual(
"<testtools.testcase.ErrorHolder(%r, %r)>" % (test.id(), error),
repr(test))
def test_repr_with_description(self):
# repr(placeholder) shows you how the object was constructed.
error = self.makeException()
test = ErrorHolder("test id", error, "description")
self.assertEqual(
"<testtools.testcase.ErrorHolder(%r, %r, %r)>" % (
test.id(), error, test.shortDescription()),
repr(test))
def test_counts_as_one_test(self):
# A placeholder test counts as one test.
test = self.makePlaceHolder()
self.assertEqual(1, test.countTestCases())
def test_str_is_id(self):
# str(placeholder) is always the id(). We are not barbarians.
test = self.makePlaceHolder()
self.assertEqual(test.id(), str(test))
def test_runs_as_error(self):
# When run, a PlaceHolder test records a success.
error = self.makeException()
test = self.makePlaceHolder(error=error)
log = []
test.run(LoggingResult(log))
self.assertEqual(
[('startTest', test),
('addError', test, error),
('stopTest', test)], log)
def test_call_is_run(self):
# A PlaceHolder can be called, in which case it behaves like run.
test = self.makePlaceHolder()
run_log = []
test.run(LoggingResult(run_log))
call_log = []
test(LoggingResult(call_log))
self.assertEqual(run_log, call_log)
def test_runs_without_result(self):
# A PlaceHolder can be run without a result, in which case there's no
# way to actually get at the result.
self.makePlaceHolder().run()
def test_debug(self):
# A PlaceHolder can be debugged.
self.makePlaceHolder().debug()
class TestEquality(TestCase):
"""Test ``TestCase``'s equality implementation."""
def test_identicalIsEqual(self):
# TestCase's are equal if they are identical.
self.assertEqual(self, self)
def test_nonIdenticalInUnequal(self):
# TestCase's are not equal if they are not identical.
self.assertNotEqual(TestCase(methodName='run'),
TestCase(methodName='skip'))
class TestAssertions(TestCase):
"""Test assertions in TestCase."""
def raiseError(self, exceptionFactory, *args, **kwargs):
raise exceptionFactory(*args, **kwargs)
def test_formatTypes_single(self):
# Given a single class, _formatTypes returns the name.
class Foo(object):
pass
self.assertEqual('Foo', self._formatTypes(Foo))
def test_formatTypes_multiple(self):
# Given multiple types, _formatTypes returns the names joined by
# commas.
class Foo(object):
pass
class Bar(object):
pass
self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
def test_assertRaises(self):
# assertRaises asserts that a callable raises a particular exception.
self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
def test_assertRaises_fails_when_no_error_raised(self):
# assertRaises raises self.failureException when it's passed a
# callable that raises no error.
ret = ('orange', 42)
try:
self.assertRaises(RuntimeError, lambda: ret)
except self.failureException:
# We expected assertRaises to raise this exception.
e = sys.exc_info()[1]
self.assertEqual(
'%s not raised, %r returned instead.'
% (self._formatTypes(RuntimeError), ret), str(e))
else:
self.fail('Expected assertRaises to fail, but it did not.')
def test_assertRaises_fails_when_different_error_raised(self):
# assertRaises re-raises an exception that it didn't expect.
self.assertThat(lambda: self.assertRaises(RuntimeError,
self.raiseError, ZeroDivisionError),
Raises(MatchesException(ZeroDivisionError)))
def test_assertRaises_returns_the_raised_exception(self):
# assertRaises returns the exception object that was raised. This is
# useful for testing that exceptions have the right message.
# This contraption stores the raised exception, so we can compare it
# to the return value of assertRaises.
raisedExceptions = []
def raiseError():
try:
raise RuntimeError('Deliberate error')
except RuntimeError:
raisedExceptions.append(sys.exc_info()[1])
raise
exception = self.assertRaises(RuntimeError, raiseError)
self.assertEqual(1, len(raisedExceptions))
self.assertTrue(
exception is raisedExceptions[0],
"%r is not %r" % (exception, raisedExceptions[0]))
def test_assertRaises_with_multiple_exceptions(self):
# assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
# function raises one of ExceptionTwo or ExceptionOne.
expectedExceptions = (RuntimeError, ZeroDivisionError)
self.assertRaises(
expectedExceptions, self.raiseError, expectedExceptions[0])
self.assertRaises(
expectedExceptions, self.raiseError, expectedExceptions[1])
def test_assertRaises_with_multiple_exceptions_failure_mode(self):
# If assertRaises is called expecting one of a group of exceptions and
# a callable that doesn't raise an exception, then fail with an
# appropriate error message.
expectedExceptions = (RuntimeError, ZeroDivisionError)
failure = self.assertRaises(
self.failureException,
self.assertRaises, expectedExceptions, lambda: None)
self.assertEqual(
'%s not raised, None returned instead.'
% self._formatTypes(expectedExceptions), str(failure))
def assertFails(self, message, function, *args, **kwargs):
"""Assert that function raises a failure with the given message."""
failure = self.assertRaises(
self.failureException, function, *args, **kwargs)
self.assertEqual(message, str(failure))
def test_assertIn_success(self):
# assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
self.assertIn(3, range(10))
self.assertIn('foo', 'foo bar baz')
self.assertIn('foo', 'foo bar baz'.split())
def test_assertIn_failure(self):
# assertIn(needle, haystack) fails the test when 'needle' is not in
# 'haystack'.
self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
self.assertFails(
'%r not in %r' % ('qux', 'foo bar baz'),
self.assertIn, 'qux', 'foo bar baz')
def test_assertNotIn_success(self):
# assertNotIn(needle, haystack) asserts that 'needle' is not in
# 'haystack'.
self.assertNotIn(3, [0, 1, 2])
self.assertNotIn('qux', 'foo bar baz')
def test_assertNotIn_failure(self):
# assertNotIn(needle, haystack) fails the test when 'needle' is in
# 'haystack'.
self.assertFails('3 in [1, 2, 3]', self.assertNotIn, 3, [1, 2, 3])
self.assertFails(
'%r in %r' % ('foo', 'foo bar baz'),
self.assertNotIn, 'foo', 'foo bar baz')
def test_assertIsInstance(self):
# assertIsInstance asserts that an object is an instance of a class.
class Foo(object):
"""Simple class for testing assertIsInstance."""
foo = Foo()
self.assertIsInstance(foo, Foo)
def test_assertIsInstance_multiple_classes(self):
# assertIsInstance asserts that an object is an instance of one of a
# group of classes.
class Foo(object):
"""Simple class for testing assertIsInstance."""
class Bar(object):
"""Another simple class for testing assertIsInstance."""
foo = Foo()
self.assertIsInstance(foo, (Foo, Bar))
self.assertIsInstance(Bar(), (Foo, Bar))
def test_assertIsInstance_failure(self):
# assertIsInstance(obj, klass) fails the test when obj is not an
# instance of klass.
class Foo(object):
"""Simple class for testing assertIsInstance."""
self.assertFails(
'42 is not an instance of %s' % self._formatTypes(Foo),
self.assertIsInstance, 42, Foo)
def test_assertIsInstance_failure_multiple_classes(self):
# assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
# not an instance of klass1 or klass2.
class Foo(object):
"""Simple class for testing assertIsInstance."""
class Bar(object):
"""Another simple class for testing assertIsInstance."""
self.assertFails(
'42 is not an instance of %s' % self._formatTypes([Foo, Bar]),
self.assertIsInstance, 42, (Foo, Bar))
def test_assertIsInstance_overridden_message(self):
# assertIsInstance(obj, klass, msg) permits a custom message.
self.assertFails("foo", self.assertIsInstance, 42, str, "foo")
def test_assertIs(self):
# assertIs asserts that an object is identical to another object.
self.assertIs(None, None)
some_list = [42]
self.assertIs(some_list, some_list)
some_object = object()
self.assertIs(some_object, some_object)
def test_assertIs_fails(self):
# assertIs raises assertion errors if one object is not identical to
# another.
self.assertFails('None is not 42', self.assertIs, None, 42)
self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
def test_assertIs_fails_with_message(self):
# assertIs raises assertion errors if one object is not identical to
# another, and includes a user-supplied message, if it's provided.
self.assertFails(
'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
def test_assertIsNot(self):
# assertIsNot asserts that an object is not identical to another
# object.
self.assertIsNot(None, 42)
self.assertIsNot([42], [42])
self.assertIsNot(object(), object())
def test_assertIsNot_fails(self):
# assertIsNot raises assertion errors if one object is identical to
# another.
self.assertFails('None is None', self.assertIsNot, None, None)
some_list = [42]
self.assertFails(
'[42] is [42]', self.assertIsNot, some_list, some_list)
def test_assertIsNot_fails_with_message(self):
# assertIsNot raises assertion errors if one object is identical to
# another, and includes a user-supplied message if it's provided.
self.assertFails(
'None is None: foo bar', self.assertIsNot, None, None, "foo bar")
def test_assertThat_matches_clean(self):
class Matcher(object):
def match(self, foo):
return None
self.assertThat("foo", Matcher())
def test_assertThat_mismatch_raises_description(self):
calls = []
class Mismatch(object):
def __init__(self, thing):
self.thing = thing
def describe(self):
calls.append(('describe_diff', self.thing))
return "object is not a thing"
def get_details(self):
return {}
class Matcher(object):
def match(self, thing):
calls.append(('match', thing))
return Mismatch(thing)
def __str__(self):
calls.append(('__str__',))
return "a description"
class Test(TestCase):
def test(self):
self.assertThat("foo", Matcher())
result = Test("test").run()
self.assertEqual([
('match', "foo"),
('describe_diff', "foo"),
('__str__',),
], calls)
self.assertFalse(result.wasSuccessful())
def test_assertEqual_nice_formatting(self):
message = "These things ought not be equal."
a = ['apple', 'banana', 'cherry']
b = {'Thatcher': 'One who mends roofs of straw',
'Major': 'A military officer, ranked below colonel',
'Blair': 'To shout loudly',
'Brown': 'The colour of healthy human faeces'}
expected_error = '\n'.join(
[message,
'not equal:',
'a = %s' % pformat(a),
'b = %s' % pformat(b),
''])
expected_error = '\n'.join([
'Match failed. Matchee: "%r"' % b,
'Matcher: Annotate(%r, Equals(%r))' % (message, a),
'Difference: !=:',
'reference = %s' % pformat(a),
'actual = %s' % pformat(b),
': ' + message,
''
])
self.assertFails(expected_error, self.assertEqual, a, b, message)
self.assertFails(expected_error, self.assertEquals, a, b, message)
self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
def test_assertEqual_formatting_no_message(self):
a = "cat"
b = "dog"
expected_error = '\n'.join([
'Match failed. Matchee: "dog"',
'Matcher: Equals(\'cat\')',
'Difference: \'cat\' != \'dog\'',
''
])
self.assertFails(expected_error, self.assertEqual, a, b)
self.assertFails(expected_error, self.assertEquals, a, b)
self.assertFails(expected_error, self.failUnlessEqual, a, b)
class TestAddCleanup(TestCase):
"""Tests for TestCase.addCleanup."""
class LoggingTest(TestCase):
"""A test that logs calls to setUp, runTest and tearDown."""
def setUp(self):
TestCase.setUp(self)
self._calls = ['setUp']
def brokenSetUp(self):
# A tearDown that deliberately fails.
self._calls = ['brokenSetUp']
raise RuntimeError('Deliberate Failure')
def runTest(self):
self._calls.append('runTest')
def brokenTest(self):
raise RuntimeError('Deliberate broken test')
def tearDown(self):
self._calls.append('tearDown')
TestCase.tearDown(self)
def setUp(self):
TestCase.setUp(self)
self._result_calls = []
self.test = TestAddCleanup.LoggingTest('runTest')
self.logging_result = LoggingResult(self._result_calls)
def assertErrorLogEqual(self, messages):
self.assertEqual(messages, [call[0] for call in self._result_calls])
def assertTestLogEqual(self, messages):
"""Assert that the call log equals 'messages'."""
case = self._result_calls[0][1]
self.assertEqual(messages, case._calls)
def logAppender(self, message):
"""A cleanup that appends 'message' to the tests log.
Cleanups are callables that are added to a test by addCleanup. To
verify that our cleanups run in the right order, we add strings to a
list that acts as a log. This method returns a cleanup that will add
the given message to that log when run.
"""
self.test._calls.append(message)
def test_fixture(self):
# A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
# This test doesn't test addCleanup itself, it just sanity checks the
# fixture.
self.test.run(self.logging_result)
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
def test_cleanup_run_before_tearDown(self):
# Cleanup functions added with 'addCleanup' are called before tearDown
# runs.
self.test.addCleanup(self.logAppender, 'cleanup')
self.test.run(self.logging_result)
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
def test_add_cleanup_called_if_setUp_fails(self):
# Cleanup functions added with 'addCleanup' are called even if setUp
# fails. Note that tearDown has a different behavior: it is only
# called when setUp succeeds.
self.test.setUp = self.test.brokenSetUp
self.test.addCleanup(self.logAppender, 'cleanup')
self.test.run(self.logging_result)
self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
def test_addCleanup_called_in_reverse_order(self):
# Cleanup functions added with 'addCleanup' are called in reverse
# order.
#
# One of the main uses of addCleanup is to dynamically create
# resources that need some sort of explicit tearDown. Often one
# resource will be created in terms of another, e.g.,
# self.first = self.makeFirst()
# self.second = self.makeSecond(self.first)
#
# When this happens, we generally want to clean up the second resource
# before the first one, since the second depends on the first.
self.test.addCleanup(self.logAppender, 'first')
self.test.addCleanup(self.logAppender, 'second')
self.test.run(self.logging_result)
self.assertTestLogEqual(
['setUp', 'runTest', 'tearDown', 'second', 'first'])
def test_tearDown_runs_after_cleanup_failure(self):
# tearDown runs even if a cleanup function fails.
self.test.addCleanup(lambda: 1/0)
self.test.run(self.logging_result)
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
def test_cleanups_continue_running_after_error(self):
# All cleanups are always run, even if one or two of them fail.
self.test.addCleanup(self.logAppender, 'first')
self.test.addCleanup(lambda: 1/0)
self.test.addCleanup(self.logAppender, 'second')
self.test.run(self.logging_result)
self.assertTestLogEqual(
['setUp', 'runTest', 'tearDown', 'second', 'first'])
def test_error_in_cleanups_are_captured(self):
# If a cleanup raises an error, we want to record it and fail the the
# test, even though we go on to run other cleanups.
self.test.addCleanup(lambda: 1/0)
self.test.run(self.logging_result)
self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
def test_keyboard_interrupt_not_caught(self):
# If a cleanup raises KeyboardInterrupt, it gets reraised.
def raiseKeyboardInterrupt():
raise KeyboardInterrupt()
self.test.addCleanup(raiseKeyboardInterrupt)
self.assertThat(lambda:self.test.run(self.logging_result),
Raises(MatchesException(KeyboardInterrupt)))
def test_all_errors_from_MultipleExceptions_reported(self):
# When a MultipleExceptions exception is caught, all the errors are
# reported.
def raiseMany():
try:
1/0
except Exception:
exc_info1 = sys.exc_info()
try:
1/0
except Exception:
exc_info2 = sys.exc_info()
raise MultipleExceptions(exc_info1, exc_info2)
self.test.addCleanup(raiseMany)
self.logging_result = ExtendedTestResult()
self.test.run(self.logging_result)
self.assertEqual(['startTest', 'addError', 'stopTest'],
[event[0] for event in self.logging_result._events])
self.assertEqual(set(['traceback', 'traceback-1']),
set(self.logging_result._events[1][2].keys()))
def test_multipleCleanupErrorsReported(self):
# Errors from all failing cleanups are reported as separate backtraces.
self.test.addCleanup(lambda: 1/0)
self.test.addCleanup(lambda: 1/0)
self.logging_result = ExtendedTestResult()
self.test.run(self.logging_result)
self.assertEqual(['startTest', 'addError', 'stopTest'],
[event[0] for event in self.logging_result._events])
self.assertEqual(set(['traceback', 'traceback-1']),
set(self.logging_result._events[1][2].keys()))
def test_multipleErrorsCoreAndCleanupReported(self):
# Errors from all failing cleanups are reported, with stopTest,
# startTest inserted.
self.test = TestAddCleanup.LoggingTest('brokenTest')
self.test.addCleanup(lambda: 1/0)
self.test.addCleanup(lambda: 1/0)
self.logging_result = ExtendedTestResult()
self.test.run(self.logging_result)
self.assertEqual(['startTest', 'addError', 'stopTest'],
[event[0] for event in self.logging_result._events])
self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
set(self.logging_result._events[1][2].keys()))
class TestWithDetails(TestCase):
def assertDetailsProvided(self, case, expected_outcome, expected_keys):
"""Assert that when case is run, details are provided to the result.
:param case: A TestCase to run.
:param expected_outcome: The call that should be made.
:param expected_keys: The keys to look for.
"""
result = ExtendedTestResult()
case.run(result)
case = result._events[0][1]
expected = [
('startTest', case),
(expected_outcome, case),
('stopTest', case),
]
self.assertEqual(3, len(result._events))
self.assertEqual(expected[0], result._events[0])
self.assertEqual(expected[1], result._events[1][0:2])
# Checking the TB is right is rather tricky. doctest line matching
# would help, but 'meh'.
self.assertEqual(sorted(expected_keys),
sorted(result._events[1][2].keys()))
self.assertEqual(expected[-1], result._events[-1])
def get_content(self):
return content.Content(
content.ContentType("text", "foo"), lambda: ['foo'])
class TestExpectedFailure(TestWithDetails):
"""Tests for expected failures and unexpected successess."""
def make_unexpected_case(self):
class Case(TestCase):
def test(self):
raise testcase._UnexpectedSuccess
case = Case('test')
return case
def test_raising__UnexpectedSuccess_py27(self):
case = self.make_unexpected_case()
result = Python27TestResult()
case.run(result)
case = result._events[0][1]
self.assertEqual([
('startTest', case),
('addUnexpectedSuccess', case),
('stopTest', case),
], result._events)
def test_raising__UnexpectedSuccess_extended(self):
case = self.make_unexpected_case()
result = ExtendedTestResult()
case.run(result)
case = result._events[0][1]
self.assertEqual([
('startTest', case),
('addUnexpectedSuccess', case, {}),
('stopTest', case),
], result._events)
def make_xfail_case_xfails(self):
content = self.get_content()
class Case(TestCase):
def test(self):
self.addDetail("foo", content)
self.expectFailure("we are sad", self.assertEqual,
1, 0)
case = Case('test')
return case
def make_xfail_case_succeeds(self):
content = self.get_content()
class Case(TestCase):
def test(self):
self.addDetail("foo", content)
self.expectFailure("we are sad", self.assertEqual,
1, 1)
case = Case('test')
return case
def test_expectFailure_KnownFailure_extended(self):
case = self.make_xfail_case_xfails()
self.assertDetailsProvided(case, "addExpectedFailure",
["foo", "traceback", "reason"])
def test_expectFailure_KnownFailure_unexpected_success(self):
case = self.make_xfail_case_succeeds()
self.assertDetailsProvided(case, "addUnexpectedSuccess",
["foo", "reason"])
class TestUniqueFactories(TestCase):
"""Tests for getUniqueString and getUniqueInteger."""
def test_getUniqueInteger(self):
# getUniqueInteger returns an integer that increments each time you
# call it.
one = self.getUniqueInteger()
self.assertEqual(1, one)
two = self.getUniqueInteger()
self.assertEqual(2, two)
def test_getUniqueString(self):
# getUniqueString returns the current test id followed by a unique
# integer.
name_one = self.getUniqueString()
self.assertEqual('%s-%d' % (self.id(), 1), name_one)
name_two = self.getUniqueString()
self.assertEqual('%s-%d' % (self.id(), 2), name_two)
def test_getUniqueString_prefix(self):
# If getUniqueString is given an argument, it uses that argument as
# the prefix of the unique string, rather than the test id.
name_one = self.getUniqueString('foo')
self.assertThat(name_one, Equals('foo-1'))
name_two = self.getUniqueString('bar')
self.assertThat(name_two, Equals('bar-2'))
class TestCloneTestWithNewId(TestCase):
"""Tests for clone_test_with_new_id."""
def test_clone_test_with_new_id(self):
class FooTestCase(TestCase):
def test_foo(self):
pass
test = FooTestCase('test_foo')
oldName = test.id()
newName = self.getUniqueString()
newTest = clone_test_with_new_id(test, newName)
self.assertEqual(newName, newTest.id())
self.assertEqual(oldName, test.id(),
"the original test instance should be unchanged.")
def test_cloned_testcase_does_not_share_details(self):
"""A cloned TestCase does not share the details dict."""
class Test(TestCase):
def test_foo(self):
self.addDetail(
'foo', content.Content('text/plain', lambda: 'foo'))
orig_test = Test('test_foo')
cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
orig_test.run(unittest.TestResult())
self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
self.assertEqual(None, cloned_test.getDetails().get('foo'))
class TestDetailsProvided(TestWithDetails):
def test_addDetail(self):
mycontent = self.get_content()
self.addDetail("foo", mycontent)
details = self.getDetails()
self.assertEqual({"foo": mycontent}, details)
def test_addError(self):
class Case(TestCase):
def test(this):
this.addDetail("foo", self.get_content())
1/0
self.assertDetailsProvided(Case("test"), "addError",
["foo", "traceback"])
def test_addFailure(self):
class Case(TestCase):
def test(this):
this.addDetail("foo", self.get_content())
self.fail('yo')
self.assertDetailsProvided(Case("test"), "addFailure",
["foo", "traceback"])
def test_addSkip(self):
class Case(TestCase):
def test(this):
this.addDetail("foo", self.get_content())
self.skip('yo')
self.assertDetailsProvided(Case("test"), "addSkip",
["foo", "reason"])
def test_addSucccess(self):
class Case(TestCase):
def test(this):
this.addDetail("foo", self.get_content())
self.assertDetailsProvided(Case("test"), "addSuccess",
["foo"])
def test_addUnexpectedSuccess(self):
class Case(TestCase):
def test(this):
this.addDetail("foo", self.get_content())
raise testcase._UnexpectedSuccess()
self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
["foo"])
def test_addDetails_from_Mismatch(self):
content = self.get_content()
class Mismatch(object):
def describe(self):
return "Mismatch"
def get_details(self):
return {"foo": content}
class Matcher(object):
def match(self, thing):
return Mismatch()
def __str__(self):
return "a description"
class Case(TestCase):
def test(self):
self.assertThat("foo", Matcher())
self.assertDetailsProvided(Case("test"), "addFailure",
["foo", "traceback"])
def test_multiple_addDetails_from_Mismatch(self):
content = self.get_content()
class Mismatch(object):
def describe(self):
return "Mismatch"
def get_details(self):
return {"foo": content, "bar": content}
class Matcher(object):
def match(self, thing):
return Mismatch()
def __str__(self):
return "a description"
class Case(TestCase):
def test(self):
self.assertThat("foo", Matcher())
self.assertDetailsProvided(Case("test"), "addFailure",
["bar", "foo", "traceback"])
def test_addDetails_with_same_name_as_key_from_get_details(self):
content = self.get_content()
class Mismatch(object):
def describe(self):
return "Mismatch"
def get_details(self):
return {"foo": content}
class Matcher(object):
def match(self, thing):
return Mismatch()
def __str__(self):
return "a description"
class Case(TestCase):
def test(self):
self.addDetail("foo", content)
self.assertThat("foo", Matcher())
self.assertDetailsProvided(Case("test"), "addFailure",
["foo", "foo-1", "traceback"])
class TestSetupTearDown(TestCase):
def test_setUpNotCalled(self):
class DoesnotcallsetUp(TestCase):
def setUp(self):
pass
def test_method(self):
pass
result = unittest.TestResult()
DoesnotcallsetUp('test_method').run(result)
self.assertEqual(1, len(result.errors))
def test_tearDownNotCalled(self):
class DoesnotcalltearDown(TestCase):
def test_method(self):
pass
def tearDown(self):
pass
result = unittest.TestResult()
DoesnotcalltearDown('test_method').run(result)
self.assertEqual(1, len(result.errors))
class TestSkipping(TestCase):
"""Tests for skipping of tests functionality."""
def test_skip_causes_skipException(self):
self.assertThat(lambda:self.skip("Skip this test"),
Raises(MatchesException(self.skipException)))
def test_can_use_skipTest(self):
self.assertThat(lambda:self.skipTest("Skip this test"),
Raises(MatchesException(self.skipException)))
def test_skip_without_reason_works(self):
class Test(TestCase):
def test(self):
raise self.skipException()
case = Test("test")
result = ExtendedTestResult()
case.run(result)
self.assertEqual('addSkip', result._events[1][0])
self.assertEqual('no reason given.',
''.join(result._events[1][2]['reason'].iter_text()))
def test_skipException_in_setup_calls_result_addSkip(self):
class TestThatRaisesInSetUp(TestCase):
def setUp(self):
TestCase.setUp(self)
self.skip("skipping this test")
def test_that_passes(self):
pass
calls = []
result = LoggingResult(calls)
test = TestThatRaisesInSetUp("test_that_passes")
test.run(result)
case = result._events[0][1]
self.assertEqual([('startTest', case),
('addSkip', case, "skipping this test"), ('stopTest', case)],
calls)
def test_skipException_in_test_method_calls_result_addSkip(self):
class SkippingTest(TestCase):
def test_that_raises_skipException(self):
self.skip("skipping this test")
result = Python27TestResult()
test = SkippingTest("test_that_raises_skipException")
test.run(result)
case = result._events[0][1]
self.assertEqual([('startTest', case),
('addSkip', case, "skipping this test"), ('stopTest', case)],
result._events)
def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
class SkippingTest(TestCase):
def setUp(self):
TestCase.setUp(self)
raise self.skipException("skipping this test")
def test_that_raises_skipException(self):
pass
result = Python26TestResult()
test = SkippingTest("test_that_raises_skipException")
test.run(result)
self.assertEqual('addSuccess', result._events[1][0])
def test_skip_with_old_result_object_calls_addError(self):
class SkippingTest(TestCase):
def test_that_raises_skipException(self):
raise self.skipException("skipping this test")
result = Python26TestResult()
test = SkippingTest("test_that_raises_skipException")
test.run(result)
self.assertEqual('addSuccess', result._events[1][0])
def test_skip_decorator(self):
class SkippingTest(TestCase):
@skip("skipping this test")
def test_that_is_decorated_with_skip(self):
self.fail()
result = Python26TestResult()
test = SkippingTest("test_that_is_decorated_with_skip")
test.run(result)
self.assertEqual('addSuccess', result._events[1][0])
def test_skipIf_decorator(self):
class SkippingTest(TestCase):
@skipIf(True, "skipping this test")
def test_that_is_decorated_with_skipIf(self):
self.fail()
result = Python26TestResult()
test = SkippingTest("test_that_is_decorated_with_skipIf")
test.run(result)
self.assertEqual('addSuccess', result._events[1][0])
def test_skipUnless_decorator(self):
class SkippingTest(TestCase):
@skipUnless(False, "skipping this test")
def test_that_is_decorated_with_skipUnless(self):
self.fail()
result = Python26TestResult()
test = SkippingTest("test_that_is_decorated_with_skipUnless")
test.run(result)
self.assertEqual('addSuccess', result._events[1][0])
class TestOnException(TestCase):
def test_default_works(self):
events = []
class Case(TestCase):
def method(self):
self.onException(an_exc_info)
events.append(True)
case = Case("method")
case.run()
self.assertThat(events, Equals([True]))
def test_added_handler_works(self):
events = []
class Case(TestCase):
def method(self):
self.addOnException(events.append)
self.onException(an_exc_info)
case = Case("method")
case.run()
self.assertThat(events, Equals([an_exc_info]))
def test_handler_that_raises_is_not_caught(self):
events = []
class Case(TestCase):
def method(self):
self.addOnException(events.index)
self.assertThat(lambda: self.onException(an_exc_info),
Raises(MatchesException(ValueError)))
case = Case("method")
case.run()
self.assertThat(events, Equals([]))
class TestPatchSupport(TestCase):
class Case(TestCase):
def test(self):
pass
def test_patch(self):
# TestCase.patch masks obj.attribute with the new value.
self.foo = 'original'
test = self.Case('test')
test.patch(self, 'foo', 'patched')
self.assertEqual('patched', self.foo)
def test_patch_restored_after_run(self):
# TestCase.patch masks obj.attribute with the new value, but restores
# the original value after the test is finished.
self.foo = 'original'
test = self.Case('test')
test.patch(self, 'foo', 'patched')
test.run()
self.assertEqual('original', self.foo)
def test_successive_patches_apply(self):
# TestCase.patch can be called multiple times per test. Each time you
# call it, it overrides the original value.
self.foo = 'original'
test = self.Case('test')
test.patch(self, 'foo', 'patched')
test.patch(self, 'foo', 'second')
self.assertEqual('second', self.foo)
def test_successive_patches_restored_after_run(self):
# TestCase.patch restores the original value, no matter how many times
# it was called.
self.foo = 'original'
test = self.Case('test')
test.patch(self, 'foo', 'patched')
test.patch(self, 'foo', 'second')
test.run()
self.assertEqual('original', self.foo)
def test_patch_nonexistent_attribute(self):
# TestCase.patch can be used to patch a non-existent attribute.
test = self.Case('test')
test.patch(self, 'doesntexist', 'patched')
self.assertEqual('patched', self.doesntexist)
def test_restore_nonexistent_attribute(self):
# TestCase.patch can be used to patch a non-existent attribute, after
# the test run, the attribute is then removed from the object.
test = self.Case('test')
test.patch(self, 'doesntexist', 'patched')
test.run()
marker = object()
value = getattr(self, 'doesntexist', marker)
self.assertIs(marker, value)
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| gpl-2.0 | 2,444,451,243,105,727,500 | 36.478565 | 79 | 0.602736 | false |
ThiefMaster/indico | indico/modules/categories/controllers/display.py | 3 | 26725 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import date, datetime, time, timedelta
from functools import partial
from io import BytesIO
from itertools import chain, groupby
from operator import attrgetter, itemgetter
from time import mktime
import dateutil
from dateutil.relativedelta import relativedelta
from flask import flash, jsonify, redirect, request, session
from pytz import utc
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer, undefer_group
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.colors import ColorTuple
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.categories.controllers.util import (get_category_view_params, group_by_month,
make_format_event_date_func, make_happening_now_func,
make_is_recent_func)
from indico.modules.categories.models.categories import Category
from indico.modules.categories.serialize import (serialize_categories_ical, serialize_category, serialize_category_atom,
serialize_category_chain)
from indico.modules.categories.util import get_category_stats, get_upcoming_events
from indico.modules.categories.views import WPCategory, WPCategoryCalendar, WPCategoryStatistics
from indico.modules.events.models.events import Event
from indico.modules.events.timetable.util import get_category_timetable
from indico.modules.news.util import get_recent_news
from indico.modules.users import User
from indico.modules.users.models.favorites import favorite_category_table
from indico.util.date_time import format_date, format_number, now_utc
from indico.util.decorators import classproperty
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.rh import RH, allow_signed_url
from indico.web.util import jsonify_data
CALENDAR_COLOR_PALETTE = [
ColorTuple('#1F1100', '#ECC495'),
ColorTuple('#0F0202', '#B9CBCA'),
ColorTuple('#0D1E1F', '#C2ECEF'),
ColorTuple('#000000', '#D0C296'),
ColorTuple('#202020', '#EFEBC2')
]
def _flat_map(func, list_):
return chain.from_iterable(map(func, list_))
class RHCategoryIcon(RHDisplayCategoryBase):
_category_query_options = undefer('icon'),
def _check_access(self):
# Category icons are always public
pass
def _process(self):
if not self.category.has_icon:
raise NotFound
metadata = self.category.icon_metadata
return send_file(metadata['filename'], BytesIO(self.category.icon), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryLogo(RHDisplayCategoryBase):
_category_query_options = undefer('logo'),
def _process(self):
if not self.category.has_logo:
raise NotFound
metadata = self.category.logo_metadata
return send_file(metadata['filename'], BytesIO(self.category.logo), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryStatisticsJSON(RHDisplayCategoryBase):
def _process(self):
stats = get_category_stats(self.category.id)
if 'min_year' not in stats:
# in case the instance was freshly updated and still has data
# cached we need to invalidate it to avoid breaking the page
# TODO: remove this in 3.0; by then people had enough time to update to 2.3...
get_category_stats.clear_cached(self.category.id)
stats = get_category_stats(self.category.id)
data = {
'events': stats['events_by_year'],
'contributions': stats['contribs_by_year'],
'files': stats['attachments'],
'updated': stats['updated'].isoformat(),
'total_events': sum(stats['events_by_year'].values()),
'total_contributions': sum(stats['contribs_by_year'].values()),
'min_year': stats['min_year'],
'max_year': date.today().year,
}
if self.category.is_root:
data['users'] = User.query.filter_by(is_deleted=False, is_pending=False).count()
return jsonify(data)
class RHCategoryStatistics(RHDisplayCategoryBase):
def _process(self):
if request.accept_mimetypes.best_match(('application/json', 'text/html')) == 'application/json':
return redirect(url_for('categories.statistics_json', category_id=self.category.id))
return WPCategoryStatistics.render_template('category_statistics.html', self.category)
class RHCategoryInfo(RHDisplayCategoryBase):
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = subqueryload('children')
children_strategy.load_only('id', 'parent_id', 'title', 'protection_mode', 'event_creation_restricted')
children_strategy.subqueryload('acl_entries')
children_strategy.undefer('deep_children_count')
children_strategy.undefer('deep_events_count')
children_strategy.undefer('has_events')
return (children_strategy,
load_only('id', 'parent_id', 'title', 'protection_mode'),
subqueryload('acl_entries'),
undefer('deep_children_count'),
undefer('deep_events_count'),
undefer('has_events'),
undefer('chain'))
def _process(self):
return jsonify_data(flash=False,
**serialize_category_chain(self.category, include_children=True, include_parents=True))
class RHReachableCategoriesInfo(RH):
def _get_reachable_categories(self, id_, excluded_ids):
cat = Category.query.filter_by(id=id_).options(joinedload('children').load_only('id')).one()
ids = ({c.id for c in cat.children} | {c.id for c in cat.parent_chain_query}) - excluded_ids
if not ids:
return []
return (Category.query
.filter(Category.id.in_(ids))
.options(*RHCategoryInfo._category_query_options)
.all())
def _process(self):
excluded_ids = set(request.json.get('exclude', set())) if request.json else set()
categories = self._get_reachable_categories(request.view_args['category_id'], excluded_ids=excluded_ids)
return jsonify_data(categories=[serialize_category_chain(c, include_children=True) for c in categories],
flash=False)
class RHCategorySearch(RH):
def _process(self):
q = request.args['q'].lower()
query = (Category.query
.filter(Category.title_matches(q))
.options(undefer('deep_children_count'), undefer('deep_events_count'), undefer('has_events'),
joinedload('acl_entries')))
if session.user:
# Prefer favorite categories
query = query.order_by(Category.favorite_of.any(favorite_category_table.c.user_id == session.user.id)
.desc())
# Prefer exact matches and matches at the beginning, then order by category title and if
# those are identical by the chain titles
query = (query
.order_by((db.func.lower(Category.title) == q).desc(),
db.func.lower(Category.title).startswith(q).desc(),
db.func.lower(Category.title),
Category.chain_titles))
total_count = query.count()
query = query.limit(10)
return jsonify_data(categories=[serialize_category(c, with_favorite=True, with_path=True) for c in query],
total_count=total_count, flash=False)
class RHSubcatInfo(RHDisplayCategoryBase):
"""Get basic information about subcategories.
This is intended to return information shown on a category display
page that is not needed immediately and is somewhat expensive to
retrieve.
"""
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = joinedload('children')
children_strategy.load_only('id')
children_strategy.undefer('deep_events_count')
return children_strategy, load_only('id', 'parent_id', 'protection_mode')
def _process(self):
event_counts = {c.id: {'value': c.deep_events_count, 'pretty': format_number(c.deep_events_count)}
for c in self.category.children}
return jsonify_data(flash=False, event_counts=event_counts)
class RHDisplayCategoryEventsBase(RHDisplayCategoryBase):
"""Base class for display pages displaying an event list."""
_category_query_options = (joinedload('children').load_only('id', 'title', 'protection_mode'),
undefer('attachment_count'), undefer('has_events'))
_event_query_options = (joinedload('person_links'), joinedload('series'), undefer_group('series'),
joinedload('label'),
load_only('id', 'category_id', 'created_dt', 'start_dt', 'end_dt', 'timezone',
'protection_mode', 'title', 'type_', 'series_pos', 'series_count',
'own_address', 'own_venue_id', 'own_venue_name', 'label_id', 'label_message',
'visibility'))
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.now = now_utc(exact=False).astimezone(self.category.display_tzinfo)
class RHDisplayCategory(RHDisplayCategoryEventsBase):
"""Show the contents of a category (events/subcategories)"""
def _process(self):
params = get_category_view_params(self.category, self.now)
if not self.category.is_root:
return WPCategory.render_template('display/category.html', self.category, **params)
news = get_recent_news()
upcoming_events = get_upcoming_events()
return WPCategory.render_template('display/root_category.html', self.category, news=news,
upcoming_events=upcoming_events, **params)
class RHEventList(RHDisplayCategoryEventsBase):
"""Return the HTML for the event list before/after a specific month."""
def _parse_year_month(self, string):
try:
dt = datetime.strptime(string, '%Y-%m')
except (TypeError, ValueError):
return None
return self.category.display_tzinfo.localize(dt)
def _process_args(self):
RHDisplayCategoryEventsBase._process_args(self)
before = self._parse_year_month(request.args.get('before'))
after = self._parse_year_month(request.args.get('after'))
if before is None and after is None:
raise BadRequest('"before" or "after" parameter must be specified')
hidden_event_ids = {e.id for e in self.category.get_hidden_events(user=session.user)}
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.filter(Event.id.notin_(hidden_event_ids))
.order_by(Event.start_dt.desc(), Event.id.desc()))
if before:
event_query = event_query.filter(Event.start_dt < before)
if after:
event_query = event_query.filter(Event.start_dt >= after)
self.events = event_query.all()
def _process(self):
tpl = get_template_module('categories/display/event_list.html')
html = tpl.event_list_block(events_by_month=group_by_month(self.events, self.now, self.category.tzinfo),
format_event_date=make_format_event_date_func(self.category),
is_recent=make_is_recent_func(self.now),
happening_now=make_happening_now_func(self.now))
return jsonify_data(flash=False, html=html)
class RHShowEventsInCategoryBase(RHDisplayCategoryBase):
"""
Set whether the events in a category are automatically displayed or not.
"""
session_field = ''
def _show_events(self, show_events):
category_ids = session.setdefault(self.session_field, set())
if show_events:
category_ids.add(self.category.id)
else:
category_ids.discard(self.category.id)
session.modified = True
def _process_DELETE(self):
self._show_events(False)
def _process_PUT(self):
self._show_events(True)
class RHShowFutureEventsInCategory(RHShowEventsInCategoryBase):
"""
Set whether the past events in a category are automatically displayed or not.
"""
session_field = 'fetch_future_events_in'
class RHShowPastEventsInCategory(RHShowEventsInCategoryBase):
"""
Set whether the past events in a category are automatically displayed or not.
"""
session_field = 'fetch_past_events_in'
@allow_signed_url
class RHExportCategoryICAL(RHDisplayCategoryBase):
def _process(self):
filename = f'{secure_filename(self.category.title, str(self.category.id))}-category.ics'
buf = serialize_categories_ical([self.category.id], session.user,
Event.end_dt >= (now_utc() - timedelta(weeks=4)))
return send_file(filename, buf, 'text/calendar')
class RHExportCategoryAtom(RHDisplayCategoryBase):
def _process(self):
filename = f'{secure_filename(self.category.title, str(self.category.id))}-category.atom'
buf = serialize_category_atom(self.category,
url_for(request.endpoint, self.category, _external=True),
session.user,
Event.end_dt >= now_utc())
return send_file(filename, buf, 'application/atom+xml')
class RHCategoryOverview(RHDisplayCategoryBase):
"""Display the events for a particular day, week or month."""
def _get_timetable(self):
return get_category_timetable([self.category.id], self.start_dt, self.end_dt,
detail_level=self.detail, tz=self.category.display_tzinfo,
from_categ=self.category, grouped=False)
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.detail = request.args.get('detail', 'event')
if self.detail not in ('event', 'session', 'contribution'):
raise BadRequest('Invalid detail argument')
self.period = request.args.get('period', 'day')
if self.period not in ('day', 'month', 'week'):
raise BadRequest('Invalid period argument')
if 'date' in request.args:
try:
date = datetime.strptime(request.args['date'], '%Y-%m-%d')
except ValueError:
raise BadRequest('Invalid date argument')
else:
date = datetime.now()
date = self.category.display_tzinfo.localize(date)
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if self.period == 'day':
self.start_dt = date
self.end_dt = self.start_dt + relativedelta(days=1)
elif self.period == 'week':
self.start_dt = date - relativedelta(days=date.weekday())
self.end_dt = self.start_dt + relativedelta(days=7)
elif self.period == 'month':
self.start_dt = date + relativedelta(day=1)
self.end_dt = self.start_dt + relativedelta(months=1)
def _process(self):
info = self._get_timetable()
events = info['events']
# Only categories with icons are listed in the sidebar
subcategory_ids = {event.category.effective_icon_data['source_id']
for event in events if event.category.has_effective_icon}
subcategories = Category.query.filter(Category.id.in_(subcategory_ids)).all()
# Events spanning multiple days must appear on all days
events = _flat_map(partial(self._process_multiday_events, info), events)
def _event_sort_key(event):
# Ongoing events are shown after all other events on the same day and are sorted by start_date
ongoing = getattr(event, 'ongoing', False)
return (event.start_dt.date(), ongoing,
-mktime(event.first_occurence_start_dt.timetuple()) if ongoing else event.start_dt.time())
events = sorted(events, key=_event_sort_key)
params = {
'detail': self.detail,
'period': self.period,
'subcategories': subcategories,
'start_dt': self.start_dt,
'end_dt': self.end_dt - relativedelta(days=1), # Display a close-ended interval
'previous_day_url': self._other_day_url(self.start_dt - relativedelta(days=1)),
'next_day_url': self._other_day_url(self.start_dt + relativedelta(days=1)),
'previous_month_url': self._other_day_url(self.start_dt - relativedelta(months=1)),
'next_month_url': self._other_day_url(self.start_dt + relativedelta(months=1)),
'previous_year_url': self._other_day_url(self.start_dt - relativedelta(years=1)),
'next_year_url': self._other_day_url(self.start_dt + relativedelta(years=1)),
'mathjax': True
}
if self.detail != 'event':
cte = self.category.get_protection_parent_cte()
params['accessible_categories'] = {cat_id
for cat_id, prot_parent_id in db.session.query(cte)
if prot_parent_id == self.category.id}
if self.period == 'day':
return WPCategory.render_template('display/overview/day.html', self.category, events=events, **params)
elif self.period == 'week':
days = self._get_week_days()
template = 'display/overview/week.html'
params['previous_week_url'] = self._other_day_url(self.start_dt - relativedelta(days=7))
params['next_week_url'] = self._other_day_url(self.start_dt + relativedelta(days=7))
elif self.period == 'month':
days = self._get_calendar_days()
template = 'display/overview/month.html'
events_by_day = []
for day in days:
events_by_day.append((day, self._pop_head_while(lambda x: x.start_dt.date() <= day.date(), events)))
# Check whether all weekends are empty
hide_weekend = (not any(map(itemgetter(1), events_by_day[5::7])) and
not any(map(itemgetter(1), events_by_day[6::7])))
if hide_weekend:
events_by_day = [x for x in events_by_day if x[0].weekday() not in (5, 6)]
return WPCategory.render_template(template, self.category, events_by_day=events_by_day,
hide_weekend=hide_weekend, **params)
def _get_week_days(self):
# Return the days shown in the weekly overview
return self._get_days(self.start_dt, self.end_dt)
def _get_calendar_days(self):
# Return the days shown in the monthly overview
start_dt = self.start_dt - relativedelta(days=self.start_dt.weekday())
end_dt = self.end_dt + relativedelta(days=(7 - self.end_dt.weekday()) % 7)
return self._get_days(start_dt, end_dt)
@staticmethod
def _get_days(start_dt, end_dt):
# Return all days in the open-ended interval
current_dt = start_dt
tz = current_dt.tzinfo
next_day = current_dt.date() + timedelta(1)
beginning_of_next_day = tz.localize(datetime.combine(next_day, time()))
while current_dt < end_dt:
yield current_dt
current_dt = beginning_of_next_day
beginning_of_next_day = current_dt + relativedelta(days=1)
@staticmethod
def _pop_head_while(predicate, list_):
# Pop the head of the list while the predicate is true and return the popped elements
res = []
while len(list_) and predicate(list_[0]):
res.append(list_[0])
list_.pop(0)
return res
def _other_day_url(self, date):
return url_for('.overview', self.category, detail=self.detail, period=self.period,
date=format_date(date, 'yyyy-MM-dd'))
def _process_multiday_events(self, info, event):
# Add "fake" proxy events for events spanning multiple days such that there is one event per day
# Function type: Event -> List[Event]
tzinfo = self.category.display_tzinfo
# Breaks, contributions and sessions grouped by start_dt. Each EventProxy will return the relevant ones only
timetable_objects = sorted(chain(*list(info[event.id].values())), key=attrgetter('timetable_entry.start_dt'))
timetable_objects_by_date = {x[0]: list(x[1]) for x
in groupby(timetable_objects, key=lambda x: x.start_dt.astimezone(tzinfo).date())}
# All the days of the event shown in the overview
event_days = self._get_days(max(self.start_dt, event.start_dt.astimezone(tzinfo)),
min(self.end_dt, event.end_dt.astimezone(tzinfo)))
# Generate a proxy object with adjusted start_dt and timetable_objects for each day
return [_EventProxy(event, day, tzinfo, timetable_objects_by_date.get(day.date(), [])) for day in event_days]
class _EventProxy:
def __init__(self, event, date, tzinfo, timetable_objects):
start_dt = datetime.combine(date, event.start_dt.astimezone(tzinfo).timetz())
assert date >= event.start_dt
assert date <= event.end_dt
object.__setattr__(self, '_start_dt', start_dt)
object.__setattr__(self, '_real_event', event)
object.__setattr__(self, '_event_tz_start_date', event.start_dt.astimezone(tzinfo).date())
object.__setattr__(self, '_timetable_objects', timetable_objects)
def __getattribute__(self, name):
if name == 'start_dt':
return object.__getattribute__(self, '_start_dt')
event = object.__getattribute__(self, '_real_event')
if name == 'timetable_objects':
return object.__getattribute__(self, '_timetable_objects')
if name == 'ongoing':
# the event is "ongoing" if the dates (in the tz of the category)
# of the event and the proxy (calendar entry) don't match
event_start_date = object.__getattribute__(self, '_event_tz_start_date')
return event_start_date != self.start_dt.date()
if name == 'first_occurence_start_dt':
return event.start_dt
return getattr(event, name)
def __setattr__(self, name, value):
raise AttributeError('This instance is read-only')
def __repr__(self):
return '<_EventProxy({}, {})>'.format(self.start_dt, object.__getattribute__(self, '_real_event'))
class RHCategoryCalendarView(RHDisplayCategoryBase):
def _process(self):
if not request.is_xhr:
return WPCategoryCalendar.render_template('display/calendar.html', self.category,
start_dt=request.args.get('start_dt'))
tz = self.category.display_tzinfo
start = tz.localize(dateutil.parser.parse(request.args['start'])).astimezone(utc)
end = tz.localize(dateutil.parser.parse(request.args['end'])).astimezone(utc)
query = (Event.query
.filter(Event.starts_between(start, end),
Event.is_visible_in(self.category.id),
~Event.is_deleted)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'category_id')))
events = self._get_event_data(query)
ongoing_events = (Event.query
.filter(Event.is_visible_in(self.category.id),
~Event.is_deleted,
Event.start_dt < start,
Event.end_dt > end)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone'))
.order_by(Event.title)
.all())
return jsonify_data(flash=False, events=events, ongoing_event_count=len(ongoing_events),
ongoing_events_html=self._render_ongoing_events(ongoing_events))
def _get_event_data(self, event_query):
data = []
tz = self.category.display_tzinfo
for event in event_query:
category_id = event.category_id
event_data = {'title': event.title,
'start': event.start_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'end': event.end_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'url': event.url}
colors = CALENDAR_COLOR_PALETTE[category_id % len(CALENDAR_COLOR_PALETTE)]
event_data.update({'textColor': '#' + colors.text, 'color': '#' + colors.background})
data.append(event_data)
return data
def _render_ongoing_events(self, ongoing_events):
template = get_template_module('categories/display/_calendar_ongoing_events.html')
return template.render_ongoing_events(ongoing_events, self.category.display_tzinfo)
class RHCategoryUpcomingEvent(RHDisplayCategoryBase):
"""Redirect to the upcoming event of a category."""
def _process(self):
event = self._get_upcoming_event()
if event:
return redirect(event.url)
else:
flash(_('There are no upcoming events for this category'))
return redirect(self.category.url)
def _get_upcoming_event(self):
query = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt > now_utc(),
~Event.is_deleted)
.options(subqueryload('acl_entries'))
.order_by(Event.start_dt, Event.id))
res = get_n_matching(query, 1, lambda event: event.can_access(session.user))
if res:
return res[0]
| mit | 5,454,829,898,145,752,000 | 45.237024 | 120 | 0.614406 | false |
romain-dartigues/ansible | lib/ansible/modules/source_control/git_config.py | 7 | 6952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Marius Gedminas <[email protected]>
# (c) 2016, Matthew Gamble <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: git_config
author:
- Matthew Gamble (@djmattyg007)
- Marius Gedminas (@mgedmin)
version_added: 2.1
requirements: ['git']
short_description: Read and write git configuration
description:
- The C(git_config) module changes git configuration by invoking 'git config'.
This is needed if you don't want to use M(template) for the entire git
config file (e.g. because you need to change just C(user.email) in
/etc/.git/config). Solutions involving M(command) are cumbersome or
don't work correctly in check mode.
options:
list_all:
description:
- List all settings (optionally limited to a given I(scope))
type: bool
default: 'no'
name:
description:
- The name of the setting. If no value is supplied, the value will
be read from the config if it has been set.
repo:
description:
- Path to a git repository for reading and writing values from a
specific repo.
scope:
description:
- Specify which scope to read/set values from. This is required
when setting config values. If this is set to local, you must
also specify the repo parameter. It defaults to system only when
not using I(list_all)=yes.
choices: [ "local", "global", "system" ]
value:
description:
- When specifying the name of a single setting, supply a value to
set that setting to the given value.
'''
EXAMPLES = '''
# Set some settings in ~/.gitconfig
- git_config:
name: alias.ci
scope: global
value: commit
- git_config:
name: alias.st
scope: global
value: status
# Or system-wide:
- git_config:
name: alias.remotev
scope: system
value: remote -v
- git_config:
name: core.editor
scope: global
value: vim
# scope=system is the default
- git_config:
name: alias.diffc
value: diff --cached
- git_config:
name: color.ui
value: auto
# Make etckeeper not complain when invoked by cron
- git_config:
name: user.email
repo: /etc
scope: local
value: 'root@{{ ansible_fqdn }}'
# Read individual values from git config
- git_config:
name: alias.ci
scope: global
# scope: system is also assumed when reading values, unless list_all=yes
- git_config:
name: alias.diffc
# Read all values from git config
- git_config:
list_all: yes
scope: global
# When list_all=yes and no scope is specified, you get configuration from all scopes
- git_config:
list_all: yes
# Specify a repository to include local settings
- git_config:
list_all: yes
repo: /path/to/repo.git
'''
RETURN = '''
---
config_value:
description: When list_all=no and value is not set, a string containing the value of the setting in name
returned: success
type: string
sample: "vim"
config_values:
description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
returned: success
type: dictionary
sample:
core.editor: "vim"
color.ui: "auto"
alias.diffc: "diff --cached"
alias.remotev: "remote -v"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def main():
module = AnsibleModule(
argument_spec=dict(
list_all=dict(required=False, type='bool', default=False),
name=dict(type='str'),
repo=dict(type='path'),
scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
value=dict(required=False)
),
mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
required_if=[('scope', 'local', ['repo'])],
required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git', True)
params = module.params
# We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
# Set the locale to C to ensure consistent messages.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if params['name']:
name = params['name']
else:
name = None
if params['scope']:
scope = params['scope']
elif params['list_all']:
scope = None
else:
scope = 'system'
if params['value']:
new_value = params['value']
else:
new_value = None
args = [git_path, "config", "--includes"]
if params['list_all']:
args.append('-l')
if scope:
args.append("--" + scope)
if name:
args.append(name)
if scope == 'local':
dir = params['repo']
elif params['list_all'] and params['repo']:
# Include local settings from a specific repo when listing all available settings
dir = params['repo']
else:
# Run from root directory to avoid accidentally picking up any local config settings
dir = "/"
(rc, out, err) = module.run_command(' '.join(args), cwd=dir)
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
# This just means nothing has been set at the given scope
module.exit_json(changed=False, msg='', config_values={})
elif rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
if params['list_all']:
values = out.rstrip().splitlines()
config_values = {}
for value in values:
k, v = value.split('=', 1)
config_values[k] = v
module.exit_json(changed=False, msg='', config_values=config_values)
elif not new_value:
module.exit_json(changed=False, msg='', config_value=out.rstrip())
else:
old_value = out.rstrip()
if old_value == new_value:
module.exit_json(changed=False, msg="")
if not module.check_mode:
new_value_quoted = shlex_quote(new_value)
cmd = ' '.join(args + [new_value_quoted])
(rc, out, err) = module.run_command(cmd, cwd=dir)
if err:
module.fail_json(rc=rc, msg=err, cmd=cmd)
module.exit_json(
msg='setting changed',
diff=dict(
before_header=' '.join(args),
before=old_value + "\n",
after_header=' '.join(args),
after=new_value + "\n"
),
changed=True
)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,077,004,745,419,205,000 | 27.727273 | 116 | 0.619534 | false |
mendax-grip/cfdemUtilities | viscosymetri/plotViscCouette.py | 4 | 2758 | # This program is a Torque monitor for a log file that is currently being created in OpenFOAM
# The code dynamically re-reads the logfile and update the graphic
# It is impossible to close the graphic until the complete refresh is over (this can be modified)
# USAGE : python ./monitorTorque.py LOGFILE
# Author : Bruno Blais
# Last modified : 23-01-2014
#Python imports
#----------------
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
import re # Ouhh regular expressions :)
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Physical parameter
R = 0.0238
L=0.01
k=0.0138/0.0238
mu=0.1
phiVar=17500.*20;
epsMax=0.65
nBar=2.
factor = 1./(4.*numpy.pi*(1*2.*numpy.pi)*L*R*R * (k*k)/(1.-k*k)) / mu /2. / numpy.pi
#=============================
# READER OF LOG FILE
#=============================
# This function reads the log file and extracts the torque
def readf(fname):
t=[]
moment=[]
infile = open(fname,'r')
if (infile!=0):
print "Log file opened"
for l in infile:
l_str = l.split(")")
if (len(l_str)>3):
l2_str = l_str[5].split()
if (len(l_str)>2):
l2_num = float(l2_str[2])
moment.extend([l2_num])
l2_str = l_str[0].split()
l2_num = float(l2_str[0])
t.extend([l2_num])
else:
print "File %s could not be opened" %fname
for i in range(0,len(moment)):
moment[i] = abs(moment[i]/(4.*numpy.pi*(1*2.*numpy.pi)*L*R*R * (k*k)/(1.-k*k))) / mu
return t, moment
infile.close();
#======================
# MAIN
#======================
# Get name from terminal
ax = plt.figure("Torque") #Create window
#Labeling
plt.ylabel('Dyanmic viscosity [Pa*s]')
plt.xlabel('Time [s]')
plt.title('Dynamic evolution of the viscosity')
visc=[]
phi=[]
viscP = []
for i in range(1,len(sys.argv)):
fname = sys.argv[i]
[t,moment] = readf(fname)
phi.extend([float(int(fname))/phiVar])
visc.extend([numpy.average(moment[-100:-1])])
plt.plot(t,moment,'-')
#get the power viscosity
fnamePower="p"+fname
t, p = numpy.loadtxt(fnamePower, unpack=True)
# convert power to viscosity
viscP.extend([p[-1]*factor])
plt.show()
#Second plot of evolution of viscosity vs phi
ax = plt.figure("Viscosity") #Create window
plt.ylabel('Dyanmic viscosity [Pa*s]')
plt.xlabel('Fraction of solid')
plt.title('Viscosity vs fraction of solid')
viscAnalytical=[]
for i in phi:
viscAnalytical.extend([(1-i/epsMax)**(-nBar*epsMax)])
plt.plot(phi,visc,'-x', label='Simulation results Torque')
plt.plot(phi,viscP,'-x', label='Simulation results Power')
plt.plot(phi, viscAnalytical,'-o',label='Analytical model')
plt.legend(loc=2)
#plt.yscale('log')
plt.show()
| lgpl-3.0 | 7,178,959,562,948,134,000 | 21.064 | 97 | 0.607687 | false |
google/pigweed | pw_hdlc/py/pw_hdlc/rpc_console.py | 1 | 7524 | # Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Console for interacting with pw_rpc over HDLC.
To start the console, provide a serial port as the --device argument and paths
or globs for .proto files that define the RPC services to support:
python -m pw_hdlc.rpc_console --device /dev/ttyUSB0 sample.proto
This starts an IPython console for communicating with the connected device. A
few variables are predefined in the interactive console. These include:
rpcs - used to invoke RPCs
device - the serial device used for communication
client - the pw_rpc.Client
protos - protocol buffer messages indexed by proto package
An example echo RPC command:
rpcs.pw.rpc.EchoService.Echo(msg="hello!")
"""
import argparse
import glob
from inspect import cleandoc
import logging
from pathlib import Path
import sys
from typing import Any, Collection, Iterable, Iterator, BinaryIO
import socket
import serial # type: ignore
import pw_cli.log
from pw_console.console_app import embed
from pw_console.__main__ import create_temp_log_file
from pw_tokenizer import tokens
from pw_tokenizer.database import LoadTokenDatabases
from pw_tokenizer.detokenize import Detokenizer, detokenize_base64
from pw_hdlc.rpc import HdlcRpcClient, default_channels
_LOG = logging.getLogger(__name__)
_DEVICE_LOG = logging.getLogger('rpc_device')
PW_RPC_MAX_PACKET_SIZE = 256
SOCKET_SERVER = 'localhost'
SOCKET_PORT = 33000
MKFIFO_MODE = 0o666
def _parse_args():
"""Parses and returns the command line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--device', help='the serial port to use')
parser.add_argument('-b',
'--baudrate',
type=int,
default=115200,
help='the baud rate to use')
parser.add_argument(
'-o',
'--output',
type=argparse.FileType('wb'),
default=sys.stdout.buffer,
help=('The file to which to write device output (HDLC channel 1); '
'provide - or omit for stdout.'))
parser.add_argument('--logfile', help='Console debug log file.')
group.add_argument('-s',
'--socket-addr',
type=str,
help='use socket to connect to server, type default for\
localhost:33000, or manually input the server address:port')
parser.add_argument("--token-databases",
metavar='elf_or_token_database',
nargs="+",
action=LoadTokenDatabases,
help="Path to tokenizer database csv file(s).")
parser.add_argument('--proto-globs',
nargs='+',
help='glob pattern for .proto files')
return parser.parse_args()
def _expand_globs(globs: Iterable[str]) -> Iterator[Path]:
for pattern in globs:
for file in glob.glob(pattern, recursive=True):
yield Path(file)
def _start_ipython_terminal(client: HdlcRpcClient) -> None:
"""Starts an interactive IPython terminal with preset variables."""
local_variables = dict(
client=client,
channel_client=client.client.channel(1),
rpcs=client.client.channel(1).rpcs,
protos=client.protos.packages,
# Include the active pane logger for creating logs in the repl.
LOG=_DEVICE_LOG,
)
welcome_message = cleandoc("""
Welcome to the Pigweed Console!
Press F1 for help.
Example commands:
rpcs.pw.rpc.EchoService.Echo(msg='hello!')
LOG.warning('Message appears console log window.')
""")
embed(global_vars=local_variables,
local_vars=None,
loggers=[_DEVICE_LOG],
repl_startup_message=welcome_message,
help_text=__doc__)
class SocketClientImpl:
def __init__(self, config: str):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server = ''
socket_port = 0
if config == 'default':
socket_server = SOCKET_SERVER
socket_port = SOCKET_PORT
else:
socket_server, socket_port_str = config.split(':')
socket_port = int(socket_port_str)
self.socket.connect((socket_server, socket_port))
def write(self, data: bytes):
self.socket.sendall(data)
def read(self, num_bytes: int = PW_RPC_MAX_PACKET_SIZE):
return self.socket.recv(num_bytes)
def console(device: str, baudrate: int, proto_globs: Collection[str],
token_databases: Collection[tokens.Database], socket_addr: str,
logfile: str, output: Any) -> int:
"""Starts an interactive RPC console for HDLC."""
# argparse.FileType doesn't correctly handle '-' for binary files.
if output is sys.stdout:
output = sys.stdout.buffer
if not logfile:
# Create a temp logfile to prevent logs from appearing over stdout. This
# would corrupt the prompt toolkit UI.
logfile = create_temp_log_file()
pw_cli.log.install(logging.INFO, True, False, logfile)
if token_databases:
detokenizer = Detokenizer(tokens.Database.merged(*token_databases),
show_errors=False)
if not proto_globs:
proto_globs = ['**/*.proto']
protos = list(_expand_globs(proto_globs))
if not protos:
_LOG.critical('No .proto files were found with %s',
', '.join(proto_globs))
_LOG.critical('At least one .proto file is required')
return 1
_LOG.debug('Found %d .proto files found with %s', len(protos),
', '.join(proto_globs))
if socket_addr is None:
serial_device = serial.Serial(device, baudrate, timeout=1)
read = lambda: serial_device.read(8192)
write = serial_device.write
else:
try:
socket_device = SocketClientImpl(socket_addr)
read = socket_device.read
write = socket_device.write
except ValueError:
_LOG.exception('Failed to initialize socket at %s', socket_addr)
return 1
_start_ipython_terminal(
HdlcRpcClient(
read, protos, default_channels(write),
lambda data: detokenize_and_write_to_output(
data, output, detokenizer)))
return 0
def detokenize_and_write_to_output(data: bytes,
unused_output: BinaryIO = sys.stdout.buffer,
detokenizer=None):
log_line = data
if detokenizer:
log_line = detokenize_base64(detokenizer, data)
for line in log_line.decode(errors="surrogateescape").splitlines():
_DEVICE_LOG.info(line)
def main() -> int:
return console(**vars(_parse_args()))
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -6,425,777,960,323,988,000 | 33.045249 | 80 | 0.629718 | false |
lmco/laikaboss | laikaboss/modules/explode_zip.py | 20 | 6315 | # Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cStringIO
import logging
from laikaboss.objectmodel import ExternalVars, ModuleObject#, QuitScanException
from laikaboss.si_module import SI_MODULE
from laikaboss.util import get_option
import zipfile
class EXPLODE_ZIP(SI_MODULE):
'''Laika module for exploding buffers out of zipped files.'''
def __init__(self,):
'''Main constructor'''
self.module_name = "EXPLODE_ZIP"
def _run(self, scanObject, result, depth, args):
'''Laika framework module logic execution'''
moduleResult = []
# Determine file limit from arguments
file_limit = int(get_option(args, 'filelimit', 'zipfilelimit', 0))
byte_limit = int(get_option(args, 'bytelimit', 'zipbytelimit', 0))
zippw = get_option(args, 'password', 'zippassword', '')
# write temporary file so we can open it with zipfile
file = cStringIO.StringIO()
file.write(scanObject.buffer)
try:
logging.debug("first attempt at unzipping..")
self._unzip_file(self, moduleResult, file, scanObject, result, zippw, file_limit, byte_limit)
except zipfile.BadZipfile:
try:
# try to repair the zip file (known python limitation)
logging.debug("error extracting zip, trying to fix it")
self._fix_bad_zip(file, scanObject.buffer)
self._unzip_file(self, moduleResult, file, scanObject, result, zippw, file_limit, byte_limit)
#except QuitScanException:
# raise
except:
# add a flag to the object to indicate it couldn't be extracted
logging.debug("couldn't fix zip, marking it as corrupt")
scanObject.addFlag("CORRUPT_ZIP")
# error logging handled by SI_MODULE wrapper
raise
finally:
scanObject.addMetadata(self.module_name, "Unzipped", len(moduleResult))
file.close()
return moduleResult
# These private methods are set to static to ensure immutability since
# they may be called more than once in the lifetime of the class
@staticmethod
def _unzip_file(self, moduleResult, file, scanObject, result, password, file_limit, byte_limit):
'''
Attempts to unzip the file, looping through the namelist and adding each
object to the ModuleResult. We add the filename from the archive to the
external variables so it is available during recursive scanning.
If the file is encrypted (determined by an exception), add the flag and return
Arguments:
moduleResult -- an instance of the ModuleResult class created above
file -- a file object created using the buffer passed into this module
scanObject -- an instance of the ScanObject class, created by the dispatcher
result -- an instance of the ScanResult class, created by the caller
password -- the password for the zipfile, if any
file_limit -- the maximum number of files to explode, adds flag if exceeded
byte_limit -- the maximum size in bytes for an exploded buffer, adds flag if exceeded
Returns:
Nothing, modification made directly moduleResult.
'''
try:
zf = zipfile.ZipFile(file)
if password:
zf.setpassword(password)
file_count = 0
#dir_depth_max = 0
#dir_count = 0
namelist = zf.namelist()
scanObject.addMetadata(self.module_name, "Total_Files", len(namelist))
exceeded_byte_limit = False
for name in namelist:
if byte_limit:
info = zf.getinfo(name)
if info.file_size > byte_limit:
logging.debug("EXPLODE_ZIP: skipping file due to byte limit")
exceeded_byte_limit = True
continue
childBuffer = zf.read(name)
if byte_limit and len(childBuffer) > byte_limit:
logging.debug("EXPLODE_ZIP: skipping file due to byte limit")
exceeded_byte_limit = True
continue
moduleResult.append(ModuleObject(buffer=childBuffer,
externalVars=ExternalVars(filename='e_zip_%s' % name)))
file_count += 1
if file_limit and file_count >= file_limit:
scanObject.addFlag("zip:err:LIMIT_EXCEEDED")
logging.debug("EXPLODE_ZIP: breaking due to file limit")
break
if exceeded_byte_limit:
scanObject.addFlag("zip:err:BYTE_LIMIT_EXCEEDED")
except RuntimeError as rte:
if "encrypted" in rte.args[0]:
scanObject.addFlag("ENCRYPTED_ZIP")
else:
raise
@staticmethod
def _fix_bad_zip(file, buffer):
'''
Python's zipfile module does not tolerate extra data after the central directory
signature in a zip archive. This function truncates the file so that the python
zipfile module can properly extract the file.
Arguments:
file -- a python file object containing the bad zip file
buffer -- a raw buffer of the bad zip file
Returns:
Nothing, modification made directly to the file object.
'''
pos = buffer.find('\x50\x4b\x05\x06') # End of central directory signature
if (pos > 0):
logging.debug("Truncating file at location %s", str(pos + 22))
file.seek(pos + 22) # size of 'ZIP end of central directory record'
file.truncate()
| apache-2.0 | 8,532,966,706,493,343,000 | 42.854167 | 109 | 0.617577 | false |
BruceDLong/CodeDog | Scons/scons-local-4.1.0.post1/SCons/Tool/midl.py | 4 | 3012 | """SCons.Tool.midl
Tool-specific initialization for midl (Microsoft IDL compiler).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.IDL
import SCons.Util
from .MSCommon import msvc_exists
def midl_emitter(target, source, env):
"""Produces a list of outputs from the MIDL compiler"""
base, _ = SCons.Util.splitext(str(target[0]))
tlb = target[0]
incl = base + '.h'
interface = base + '_i.c'
targets = [tlb, incl, interface]
midlcom = env['MIDLCOM']
if midlcom.find('/proxy') != -1:
proxy = base + '_p.c'
targets.append(proxy)
if midlcom.find('/dlldata') != -1:
dlldata = base + '_data.c'
targets.append(dlldata)
return (targets, source)
idl_scanner = SCons.Scanner.IDL.IDLScan()
midl_action = SCons.Action.Action('$MIDLCOM', '$MIDLCOMSTR')
midl_builder = SCons.Builder.Builder(action = midl_action,
src_suffix = '.idl',
suffix='.tlb',
emitter = midl_emitter,
source_scanner = idl_scanner)
def generate(env):
"""Add Builders and construction variables for midl to an Environment."""
env['MIDL'] = 'MIDL.EXE'
env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo')
env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL'
env['BUILDERS']['TypeLibrary'] = midl_builder
def exists(env):
return msvc_exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | 9,175,001,298,632,724,000 | 33.227273 | 159 | 0.666999 | false |
danielyule/hearthbreaker | tests/power_tests.py | 7 | 7968 | import random
import unittest
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
from hearthbreaker.cards.minions.hunter import SteamwheedleSniper
from hearthbreaker.cards.minions.neutral import StonetuskBoar
from hearthbreaker.cards.minions.priest import ProphetVelen
from hearthbreaker.cards.minions.warlock import DreadInfernal
from hearthbreaker.cards.spells.mage import Pyroblast
from tests.agents.testing_agents import CardTestingAgent
from hearthbreaker.cards import HuntersMark, MogushanWarden, AvengingWrath, CircleOfHealing, AlAkirTheWindlord, \
Shadowform, DefiasRingleader, Doomguard, ArcaneIntellect, Swipe, ArathiWeaponsmith, MassDispel
from hearthbreaker.powers import MindSpike, MindShatter
from tests.testing_utils import generate_game_for
class TestPowers(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_DruidPower(self):
game = generate_game_for(Swipe, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.players[0].hero.armor)
self.assertEqual(29, game.players[1].hero.health)
def test_HunterPower(self):
game = generate_game_for(HuntersMark, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.other_player.hero.health)
def test_MagePower(self):
game = generate_game_for(ArcaneIntellect, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
def test_PaladinPower(self):
game = generate_game_for(AvengingWrath, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(1, game.current_player.minions[0].health)
self.assertEqual("Silver Hand Recruit", game.current_player.minions[0].card.name)
def test_PriestPower(self):
game = generate_game_for(CircleOfHealing, MogushanWarden, PredictableAgent, DoNothingAgent)
game.players[1].hero.health = 20
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(22, game.players[1].hero.health)
def test_MindSpike(self):
game = generate_game_for(Shadowform, MogushanWarden, PredictableAgent, DoNothingAgent)
game.players[0].hero.power = MindSpike()
game.players[0].hero.power.hero = game.players[0].hero
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.players[1].hero.health)
def test_MindShatter(self):
game = generate_game_for(Shadowform, Shadowform, PredictableAgent, DoNothingAgent)
game.players[0].hero.power = MindShatter()
game.players[0].hero.power.hero = game.players[0].hero
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RoguePower(self):
game = generate_game_for(DefiasRingleader, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
self.assertEqual(29, game.players[1].hero.health)
def test_ShamanPower(self):
game = generate_game_for(AlAkirTheWindlord, MassDispel, PredictableAgent, CardTestingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stoneclaw Totem", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Healing Totem", game.players[0].minions[1].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual("Searing Totem", game.players[0].minions[2].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual("Wrath of Air Totem", game.players[0].minions[3].card.name)
self.assertEqual(1, game.players[0].spell_damage)
# All Totems are out, nothing should be summoned
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
def test_WarlockPower(self):
game = generate_game_for(Doomguard, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.players[0].hero.health)
self.assertEqual(6, len(game.players[0].hand))
def test_WarriorPower(self):
game = generate_game_for(ArathiWeaponsmith, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].hero.armor)
def test_double_power_use(self):
testing_env = self
class PowerTestingAgent(DoNothingAgent):
def __init__(self):
super().__init__()
self.turn = 0
def do_turn(self, player):
self.turn += 1
if self.turn is 4:
player.hero.power.use()
testing_env.assertFalse(player.hero.power.can_use())
elif self.turn is 7:
player.hero.power.use()
player.game.play_card(player.hand[0])
testing_env.assertTrue(player.hero.power.can_use())
game = generate_game_for(Shadowform, MogushanWarden, PowerTestingAgent, DoNothingAgent)
for turn in range(0, 13):
game.play_single_turn()
def test_Velen_and_Hunter(self):
game = generate_game_for(HuntersMark, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# Velen attacks once for 7 damage, and the hero power attacks once for 4 damage
self.assertEqual(19, game.other_player.hero.health)
def test_Velen_SteamwheedleSniper_and_Hunter(self):
game = generate_game_for(SteamwheedleSniper, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(8):
game.play_single_turn()
ProphetVelen().summon(game.players[0], game, 0)
game.play_single_turn()
self.assertEqual(22, game.other_player.hero.health)
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual("Prophet Velen", game.current_player.minions[1].card.name)
def test_Velen_and_Warlock(self):
game = generate_game_for(DreadInfernal, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# The player's hero is damaged for 4 rather than 2 because of Velen
self.assertEqual(26, game.current_player.hero.health)
def test_Velen_and_Mage(self):
game = generate_game_for(Pyroblast, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# Velen is Hero powered for two damage
self.assertEqual(5, game.current_player.minions[0].health)
| mit | 8,241,900,139,780,689,000 | 37.679612 | 113 | 0.664659 | false |
ViDA-NYU/data-polygamy | sigmod16/performance-evaluation/nyc-open/running-time-relationship.py | 1 | 5894 | # Copyright (C) 2016 New York University
# This file is part of Data Polygamy which is released under the Revised BSD License
# See file LICENSE for full license details.
import os
import sys
import math
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', family='sans-serif')
import matplotlib.pyplot as plt
import matplotlib.font_manager as font
import matplotlib.ticker as ticker
import locale
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
temp = {"hour": ["hour","day","week","month"],
"day": ["day","week","month"],
"week": ["week","month"],
"month": ["month"]}
spatial = {"zip": ["zip","city"],
"city": ["city"]}
if sys.argv[1] == "help":
print "[metadata dir] [file] [use y label] [metadata file]"
sys.exit(0)
phases = ["aggregates", "index", "relationship-restricted"]
legends = {"aggregates": "Scalar Function Computation",
"index": "Event Computation",
"relationship-restricted": "Relationship Computation"}
phases_color = {"aggregates": "#003399",
"index": "#0099CC",
"relationship-restricted": "#003399"}
metadata_dir = sys.argv[1]
use_y_label = eval(sys.argv[3])
metadata_file = sys.argv[4]
datasets = []
f = open(metadata_file)
line = f.readline()
while line != "":
l = line.split(",")
datasets.append(l[0])
line = f.readline()
data = {"aggregates": [],
"index": [],
"relationship-restricted": []}
f = open(sys.argv[2])
line = f.readline()
current_n_datasets = 0
max_n_datasets = -1
min_n_datasets = sys.maxint
current_time = 0
current_phase = ""
while line != "":
if line.endswith("datasets\n"):
if current_phase != "":
data[current_phase].append([current_n_datasets, current_time])
current_time = 0
current_n_datasets = int(line.split(" ")[0])
max_n_datasets = max(max_n_datasets, current_n_datasets)
min_n_datasets = min(min_n_datasets, current_n_datasets)
elif line.startswith("No "):
pass
elif line.startswith("Deleted "):
pass
elif line.strip() == "\n":
pass
elif line.startswith("Moved:"):
pass
else:
l = line.split("\t")
current_time += int(l[1])
current_phase = l[0]
line = f.readline()
data[l[0]].append([current_n_datasets, current_time])
current_time = 0
print data
def get_number_relationships(data_dir, datasets, n):
n_relationships = 0
n_datasets = datasets[:n]
dataset_att = {}
dataset_res = {}
# searching for files
files = os.listdir(data_dir)
for file in files:
for dataset in n_datasets:
data = dataset + "-"
if data in file and not file.startswith("."):
l = file.replace(".aggregates","").split("-")
spatial_res = l[-1]
temp_res = l[-2]
dataset_res[dataset] = []
for t in temp[temp_res]:
for s in spatial[spatial_res]:
dataset_res[dataset].append(t + "-" + s)
f = open(os.path.join(data_dir,file))
line = f.readline()
dataset_att[dataset] = int(f.readline().strip())
f.close()
for i in range(len(n_datasets)):
ds1 = n_datasets[i]
if not dataset_res.has_key(ds1):
continue
for j in range(1, len(n_datasets)):
ds2 = n_datasets[j]
if not dataset_res.has_key(ds2):
continue
for res in dataset_res[ds1]:
if (res in dataset_res[ds2]):
n_relationships += dataset_att[ds1]*dataset_att[ds2]
return n_relationships
# plots
xlabel = "Number of Data Sets"
ylabel = "Evaluation Rate (rel. / min)"
plt.figure(figsize=(8, 6), dpi=80)
f, ax = plt.subplots()
ax.set_axis_bgcolor("#E0E0E0")
output = ""
datasets_to_show = [2]+range(30,301,30)
n_relationships = []
n_datasets = []
for i in datasets_to_show:
n_datasets.append(i)
n_relationships.append(get_number_relationships(metadata_dir, datasets, i))
phase = "relationship-restricted"
x_data = []
y_data = []
for i in range(len(data[phase])):
if (n_datasets[i] < 30):
continue
x_data.append(n_datasets[i])
y_data.append(n_relationships[i]/(float(data[phase][i][1])/60000))
line, = ax.plot(x_data, y_data, linewidth=2.0, color=phases_color[phase], linestyle='-')
output += str([x for x in n_relationships]) + ": " + str([x[1] for x in data[phase]]) + "\n"
ax.tick_params(axis='y', labelsize=22)
ax.tick_params(axis='x', labelsize=18)
ax.set_ylim(0,20000)
def my_formatter_fun_y(x, p):
if (x/1000) >= 1:
return locale.format('%d', x/1000, grouping=True) + "K"
else:
return locale.format('%d', x, grouping=True)
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(my_formatter_fun_y))
def my_formatter_fun_x(x, p):
if (x/1000000) >= 1:
return locale.format('%d', x/1000000, grouping=True) + "M"
elif (x/1000) >= 1:
return locale.format('%d', x/1000, grouping=True) + "K"
else:
return locale.format('%d', x, grouping=True)
ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(my_formatter_fun_x))
ax.set_xticks(x_data)
plt.minorticks_off()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.set_xlabel(xlabel,fontproperties=font.FontProperties(size=22,weight='bold'))
if (use_y_label):
ax.set_ylabel(ylabel,fontproperties=font.FontProperties(size=22,weight='bold'))
ax.grid(b=True, axis='both', color='w', linestyle='-', linewidth=0.7)
ax.set_axisbelow(True)
filename = "running-time-relationship"
plt.savefig(filename + ".png", bbox_inches='tight', pad_inches=0.05)
f = open(filename + ".out", "w")
f.write(output)
f.close()
plt.clf()
| bsd-3-clause | -1,238,665,130,260,776,000 | 28.918782 | 92 | 0.602647 | false |
drimer/NetControl | tests/test_devices_view_selenium.py | 1 | 1480 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
import mock
from selenium.webdriver.common.by import By
from selenium.webdriver.phantomjs.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from netcontrol.network import Device
WEBDRIVER_MAX_TIMEOUT = 15
MOCK_DEVICES = [
Device(ip_address='192.168.0.34', mac_address='AA:DD:EE:FF:BB:CC'),
Device(ip_address='192.168.0.156', mac_address='AA:EE:DD:FF:AA:CC'),
]
def _get_devices_mock():
return MOCK_DEVICES
def _get_ifaces_mock():
return ['eth0']
class HomeViewSeleniumTest(StaticLiveServerTestCase):
@mock.patch('webapp.views.get_devices', _get_devices_mock)
def test_that_home_view_shows_devices_grid(self):
url = '%s%s' % (self.live_server_url, reverse('devices'))
driver = WebDriver()
driver.get(url)
waiter = WebDriverWait(driver, WEBDRIVER_MAX_TIMEOUT)
waiter.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'device')))
device_grid = driver.find_element_by_id('device-grid')
devices = device_grid.find_elements_by_class_name('device')
self.assertEqual(len(MOCK_DEVICES), len(devices))
for device in MOCK_DEVICES:
self.assertIn(device.ip_address, device_grid.text)
self.assertIn(device.mac_address, device_grid.text)
| gpl-2.0 | -5,619,255,978,063,821,000 | 33.418605 | 84 | 0.714865 | false |
Disiok/poetry-seq2seq | main.py | 1 | 1842 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
from plan import Planner
from predict import Seq2SeqPredictor
import sys
import tensorflow as tf
tf.app.flags.DEFINE_boolean('cangtou', False, 'Generate Acrostic Poem')
reload(sys)
sys.setdefaultencoding('utf8')
def get_cangtou_keywords(input):
assert(len(input) == 4)
return [c for c in input]
def main(cangtou=False):
planner = Planner()
with Seq2SeqPredictor() as predictor:
# Run loop
terminate = False
while not terminate:
try:
input = raw_input('Input Text:\n').decode('utf-8').strip()
if not input:
print 'Input cannot be empty!'
elif input.lower() in ['quit', 'exit']:
terminate = True
else:
if cangtou:
keywords = get_cangtou_keywords(input)
else:
# Generate keywords
keywords = planner.plan(input)
# Generate poem
lines = predictor.predict(keywords)
# Print keywords and poem
print 'Keyword:\t\tPoem:'
for line_number in xrange(4):
punctuation = u',' if line_number % 2 == 0 else u'。'
print u'{keyword}\t\t{line}{punctuation}'.format(
keyword=keywords[line_number],
line=lines[line_number],
punctuation=punctuation
)
except EOFError:
terminate = True
except KeyboardInterrupt:
terminate = True
print '\nTerminated.'
if __name__ == '__main__':
main(cangtou=tf.app.flags.FLAGS.cangtou) | mit | 5,761,037,331,231,479,000 | 30.169492 | 76 | 0.496736 | false |
temnoregg/django-helpdesk | helpdesk/admin.py | 3 | 2365 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from helpdesk.models import Queue, Ticket, FollowUp, PreSetReply, KBCategory
from helpdesk.models import EscalationExclusion, EmailTemplate, KBItem
from helpdesk.models import TicketChange, Attachment, IgnoreEmail
from helpdesk.models import CustomField
from helpdesk.models import QueueMembership
from helpdesk import settings as helpdesk_settings
class QueueAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'email_address', 'locale')
class TicketAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'assigned_to', 'submitter_email',)
date_hierarchy = 'created'
list_filter = ('assigned_to', 'status', )
class TicketChangeInline(admin.StackedInline):
model = TicketChange
class AttachmentInline(admin.StackedInline):
model = Attachment
class FollowUpAdmin(admin.ModelAdmin):
inlines = [TicketChangeInline, AttachmentInline]
class KBItemAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'last_updated',)
list_display_links = ('title',)
class CustomFieldAdmin(admin.ModelAdmin):
list_display = ('name', 'label', 'data_type')
class EmailTemplateAdmin(admin.ModelAdmin):
list_display = ('template_name', 'heading', 'locale')
list_filter = ('locale', )
class QueueMembershipInline(admin.StackedInline):
model = QueueMembership
class UserAdminWithQueueMemberships(UserAdmin):
def change_view(self, request, object_id, form_url='', extra_context=None):
self.inlines = (QueueMembershipInline,)
return super(UserAdminWithQueueMemberships, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
admin.site.register(Ticket, TicketAdmin)
admin.site.register(Queue, QueueAdmin)
admin.site.register(FollowUp, FollowUpAdmin)
admin.site.register(PreSetReply)
admin.site.register(EscalationExclusion)
admin.site.register(EmailTemplate, EmailTemplateAdmin)
admin.site.register(KBCategory)
admin.site.register(KBItem, KBItemAdmin)
admin.site.register(IgnoreEmail)
admin.site.register(CustomField, CustomFieldAdmin)
if helpdesk_settings.HELPDESK_ENABLE_PER_QUEUE_STAFF_MEMBERSHIP:
admin.site.unregister(get_user_model())
admin.site.register(get_user_model(), UserAdminWithQueueMemberships)
| bsd-3-clause | -5,345,724,065,037,979,000 | 36.539683 | 79 | 0.769979 | false |
ahmedalsudani/mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/cuddlefish/preflight.py | 30 | 3489 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, sys
import base64
import simplejson as json
def create_jid():
"""Return 'jid1-XYZ', where 'XYZ' is a randomly-generated string. (in the
previous jid0- series, the string securely identified a specific public
key). To get a suitable add-on ID, append '@jetpack' to this string.
"""
# per https://developer.mozilla.org/en/Install_Manifests#id all XPI id
# values must either be in the form of a 128-bit GUID (crazy braces
# and all) or in the form of an email address (crazy @ and all).
# Firefox will refuse to install an add-on with an id that doesn't
# match one of these forms. The actual regexp is at:
# http://mxr.mozilla.org/mozilla-central/source/toolkit/mozapps/extensions/XPIProvider.jsm#130
# So the JID needs an @-suffix, and the only legal punctuation is
# "-._". So we start with a base64 encoding, and replace the
# punctuation (+/) with letters (AB), losing a few bits of integrity.
# even better: windows has a maximum path length limitation of 256
# characters:
# http://msdn.microsoft.com/en-us/library/aa365247%28VS.85%29.aspx
# (unless all paths are prefixed with "\\?\", I kid you not). The
# typical install will put add-on code in a directory like:
# C:\Documents and Settings\<username>\Application Data\Mozilla\Firefox\Profiles\232353483.default\extensions\$JID\...
# (which is 108 chars long without the $JID).
# Then the unpacked XPI contains packaged resources like:
# resources/$JID-api-utils-lib/main.js (35 chars plus the $JID)
#
# We create a random 80 bit string, base64 encode that (with
# AB instead of +/ to be path-safe), then bundle it into
# "jid1-XYZ@jetpack". This gives us 27 characters. The resulting
# main.js will have a path length of 211 characters, leaving us 45
# characters of margin.
#
# 80 bits is enough to generate one billion JIDs and still maintain lower
# than a one-in-a-million chance of accidental collision. (1e9 JIDs is 30
# bits, square for the "birthday-paradox" to get 60 bits, add 20 bits for
# the one-in-a-million margin to get 80 bits)
# if length were no issue, we'd prefer to use this:
h = os.urandom(80/8)
s = base64.b64encode(h, "AB").strip("=")
jid = "jid1-" + s
return jid
def preflight_config(target_cfg, filename, stderr=sys.stderr):
modified = False
config = json.load(open(filename, 'r'))
if "id" not in config:
print >>stderr, ("No 'id' in package.json: creating a new ID for you.")
jid = create_jid()
config["id"] = jid
modified = True
if modified:
i = 0
backup = filename + ".backup"
while os.path.exists(backup):
if i > 1000:
raise ValueError("I'm having problems finding a good name"
" for the backup file. Please move %s out"
" of the way and try again."
% (filename + ".backup"))
backup = filename + ".backup-%d" % i
i += 1
os.rename(filename, backup)
new_json = json.dumps(config, indent=4)
open(filename, 'w').write(new_json+"\n")
return False, True
return True, False
| gpl-3.0 | -5,706,584,263,681,276,000 | 44.311688 | 122 | 0.638865 | false |
serzans/wagtail | wagtail/wagtailimages/tests/test_image_operations.py | 15 | 11668 | import unittest
from wagtail.wagtailimages import image_operations
from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
from wagtail.wagtailimages.models import Image, Filter
class WillowOperationRecorder(object):
"""
This class pretends to be a Willow image but instead, it records
the operations that have been performed on the image for testing
"""
def __init__(self, start_size):
self.ran_operations = []
self.start_size = start_size
def __getattr__(self, attr):
def operation(*args, **kwargs):
self.ran_operations.append((attr, args, kwargs))
return operation
def get_size(self):
size = self.start_size
for operation in self.ran_operations:
if operation[0] == 'resize':
size = operation[1][0]
elif operation[0] == 'crop':
crop = operation[1][0]
size = crop[2] - crop[0], crop[3] - crop[1]
return size
class ImageOperationTestCase(unittest.TestCase):
operation_class = None
filter_spec_tests = []
filter_spec_error_tests = []
run_tests = []
@classmethod
def make_filter_spec_test(cls, filter_spec, expected_output):
def test_filter_spec(self):
operation = self.operation_class(*filter_spec.split('-'))
# Check the attributes are set correctly
for attr, value in expected_output.items():
self.assertEqual(getattr(operation, attr), value)
test_name = 'test_filter_%s' % filter_spec
test_filter_spec.__name__ = test_name
return test_filter_spec
@classmethod
def make_filter_spec_error_test(cls, filter_spec):
def test_filter_spec_error(self):
self.assertRaises(InvalidFilterSpecError, self.operation_class, *filter_spec.split('-'))
test_name = 'test_filter_%s_raises_%s' % (filter_spec, InvalidFilterSpecError.__name__)
test_filter_spec_error.__name__ = test_name
return test_filter_spec_error
@classmethod
def make_run_test(cls, filter_spec, image, expected_output):
def test_run(self):
# Make operation
operation = self.operation_class(*filter_spec.split('-'))
# Make operation recorder
operation_recorder = WillowOperationRecorder((image.width, image.height))
# Run
operation.run(operation_recorder, image)
# Check
self.assertEqual(operation_recorder.ran_operations, expected_output)
test_name = 'test_run_%s' % filter_spec
test_run.__name__ = test_name
return test_run
@classmethod
def setup_test_methods(cls):
if cls.operation_class is None:
return
# Filter spec tests
for args in cls.filter_spec_tests:
filter_spec_test = cls.make_filter_spec_test(*args)
setattr(cls, filter_spec_test.__name__, filter_spec_test)
# Filter spec error tests
for filter_spec in cls.filter_spec_error_tests:
filter_spec_error_test = cls.make_filter_spec_error_test(filter_spec)
setattr(cls, filter_spec_error_test.__name__, filter_spec_error_test)
# Running tests
for args in cls.run_tests:
run_test = cls.make_run_test(*args)
setattr(cls, run_test.__name__, run_test)
class TestDoNothingOperation(ImageOperationTestCase):
operation_class = image_operations.DoNothingOperation
filter_spec_tests = [
('original', dict()),
('blahblahblah', dict()),
('123456', dict()),
]
filter_spec_error_tests = [
'cannot-take-multiple-parameters',
]
run_tests = [
('original', Image(width=1000, height=1000), []),
]
TestDoNothingOperation.setup_test_methods()
class TestFillOperation(ImageOperationTestCase):
operation_class = image_operations.FillOperation
filter_spec_tests = [
('fill-800x600', dict(width=800, height=600, crop_closeness=0)),
('hello-800x600', dict(width=800, height=600, crop_closeness=0)),
('fill-800x600-c0', dict(width=800, height=600, crop_closeness=0)),
('fill-800x600-c100', dict(width=800, height=600, crop_closeness=1)),
('fill-800x600-c50', dict(width=800, height=600, crop_closeness=0.5)),
('fill-800x600-c1000', dict(width=800, height=600, crop_closeness=1)),
('fill-800000x100', dict(width=800000, height=100, crop_closeness=0)),
]
filter_spec_error_tests = [
'fill',
'fill-800',
'fill-abc',
'fill-800xabc',
'fill-800x600-',
'fill-800x600x10',
'fill-800x600-d100',
]
run_tests = [
# Basic usage
('fill-800x600', Image(width=1000, height=1000), [
('crop', ((0, 125, 1000, 875), ), {}),
('resize', ((800, 600), ), {}),
]),
# Basic usage with an oddly-sized original image
# This checks for a rounding precision issue (#968)
('fill-200x200', Image(width=539, height=720), [
('crop', ((0, 90, 539, 630), ), {}),
('resize', ((200, 200), ), {}),
]),
# Closeness shouldn't have any effect when used without a focal point
('fill-800x600-c100', Image(width=1000, height=1000), [
('crop', ((0, 125, 1000, 875), ), {}),
('resize', ((800, 600), ), {}),
]),
# Should always crop towards focal point. Even if no closeness is set
('fill-80x60', Image(
width=1000,
height=1000,
focal_point_x=1000,
focal_point_y=500,
focal_point_width=0,
focal_point_height=0,
), [
# Crop the largest possible crop box towards the focal point
('crop', ((0, 125, 1000, 875), ), {}),
# Resize it down to final size
('resize', ((80, 60), ), {}),
]),
# Should crop as close as possible without upscaling
('fill-80x60-c100', Image(
width=1000,
height=1000,
focal_point_x=1000,
focal_point_y=500,
focal_point_width=0,
focal_point_height=0,
), [
# Crop as close as possible to the focal point
('crop', ((920, 470, 1000, 530), ), {}),
# No need to resize, crop should've created an 80x60 image
]),
# Ditto with a wide image
# Using a different filter so method name doesn't clash
('fill-100x60-c100', Image(
width=2000,
height=1000,
focal_point_x=2000,
focal_point_y=500,
focal_point_width=0,
focal_point_height=0,
), [
# Crop to the right hand side
('crop', ((1900, 470, 2000, 530), ), {}),
]),
# Make sure that the crop box never enters the focal point
('fill-50x50-c100', Image(
width=2000,
height=1000,
focal_point_x=1000,
focal_point_y=500,
focal_point_width=100,
focal_point_height=20,
), [
# Crop a 100x100 box around the entire focal point
('crop', ((950, 450, 1050, 550), ), {}),
# Resize it down to 50x50
('resize', ((50, 50), ), {}),
]),
# Test that the image is never upscaled
('fill-1000x800', Image(width=100, height=100), [
('crop', ((0, 10, 100, 90), ), {}),
]),
# Test that the crop closeness gets capped to prevent upscaling
('fill-1000x800-c100', Image(
width=1500,
height=1000,
focal_point_x=750,
focal_point_y=500,
focal_point_width=0,
focal_point_height=0,
), [
# Crop a 1000x800 square out of the image as close to the
# focal point as possible. Will not zoom too far in to
# prevent upscaling
('crop', ((250, 100, 1250, 900), ), {}),
]),
# Test for an issue where a ZeroDivisionError would occur when the
# focal point size, image size and filter size match
# See: #797
('fill-1500x1500-c100', Image(
width=1500,
height=1500,
focal_point_x=750,
focal_point_y=750,
focal_point_width=1500,
focal_point_height=1500,
), [
# This operation could probably be optimised out
('crop', ((0, 0, 1500, 1500), ), {}),
]),
# A few tests for single pixel images
('fill-100x100', Image(
width=1,
height=1,
), [
('crop', ((0, 0, 1, 1), ), {}),
]),
# This one once gave a ZeroDivisionError
('fill-100x150', Image(
width=1,
height=1,
), [
('crop', ((0, 0, 1, 1), ), {}),
]),
('fill-150x100', Image(
width=1,
height=1,
), [
('crop', ((0, 0, 1, 1), ), {}),
]),
]
TestFillOperation.setup_test_methods()
class TestMinMaxOperation(ImageOperationTestCase):
operation_class = image_operations.MinMaxOperation
filter_spec_tests = [
('min-800x600', dict(method='min', width=800, height=600)),
('max-800x600', dict(method='max', width=800, height=600)),
]
filter_spec_error_tests = [
'min',
'min-800',
'min-abc',
'min-800xabc',
'min-800x600-',
'min-800x600-c100',
'min-800x600x10',
]
run_tests = [
# Basic usage of min
('min-800x600', Image(width=1000, height=1000), [
('resize', ((800, 800), ), {}),
]),
# Basic usage of max
('max-800x600', Image(width=1000, height=1000), [
('resize', ((600, 600), ), {}),
]),
]
TestMinMaxOperation.setup_test_methods()
class TestWidthHeightOperation(ImageOperationTestCase):
operation_class = image_operations.WidthHeightOperation
filter_spec_tests = [
('width-800', dict(method='width', size=800)),
('height-600', dict(method='height', size=600)),
]
filter_spec_error_tests = [
'width',
'width-800x600',
'width-abc',
'width-800-c100',
]
run_tests = [
# Basic usage of width
('width-400', Image(width=1000, height=500), [
('resize', ((400, 200), ), {}),
]),
# Basic usage of height
('height-400', Image(width=1000, height=500), [
('resize', ((800, 400), ), {}),
]),
]
TestWidthHeightOperation.setup_test_methods()
class TestCacheKey(unittest.TestCase):
def test_cache_key(self):
image = Image(width=1000, height=1000)
fil = Filter(spec='max-100x100')
cache_key = fil.get_cache_key(image)
self.assertEqual(cache_key, '')
def test_cache_key_fill_filter(self):
image = Image(width=1000, height=1000)
fil = Filter(spec='fill-100x100')
cache_key = fil.get_cache_key(image)
self.assertEqual(cache_key, '2e16d0ba')
def test_cache_key_fill_filter_with_focal_point(self):
image = Image(
width=1000,
height=1000,
focal_point_width=100,
focal_point_height=100,
focal_point_x=500,
focal_point_y=500,
)
fil = Filter(spec='fill-100x100')
cache_key = fil.get_cache_key(image)
self.assertEqual(cache_key, '0bbe3b2f')
| bsd-3-clause | -3,539,725,549,017,486,300 | 29.705263 | 100 | 0.542852 | false |
vjdorazio/TwoRavens | fabfile.py | 1 | 7730 | import os
import random
import string
#from os.path import abspath, dirname, join
import signal
import sys
from fabric.api import local
import django
from django.conf import settings
import subprocess
import re
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
FAB_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(FAB_BASE_DIR)
if FAB_BASE_DIR == '/srv/webapps/TwoRavens':
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tworavensproject.settings.dev_container')
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tworavensproject.settings.local_settings')
try:
django.setup()
except Exception as e:
print("WARNING: Can't configure Django. %s" % e)
def stop():
"""Kill any python/npm processes"""
try:
local("killall npm")
except:
pass
try:
local("killall python")
except:
pass
def restart():
"""Kill any python/npm processes and then run"""
stop()
run()
def make_d3m_config():
"""Make a D3M config based on local files in the /data directory"""
from tworaven_apps.configurations.util_config_maker import TestConfigMaker
TestConfigMaker.make_configs()
def load_d3m_config(config_file):
"""Load D3M config file, saving it as D3MConfiguration object. Pass the config file path: fab load_d3m_config:(path to config file)"""
from django.core import management
try:
management.call_command('load_config', config_file)
except management.base.CommandError as err_obj:
print('> Failed to load D3M config.\n%s' % err_obj)
def load_docker_ui_config():
"""Load config pk=3, name 'Docker Default configuration'"""
check_config()
from tworaven_apps.configurations.models import AppConfiguration
le_config = AppConfiguration.objects.get(pk=3)
le_config.is_active = True
le_config.save()
print('new config activated: ')
for k, val in le_config.__dict__.items():
if not k.startswith('_'):
print(' > %s: %s' % (k, val))
def check_config():
"""If there aren't any db configurations, then load the fixtures"""
from tworaven_apps.configurations.models import AppConfiguration
config_cnt = AppConfiguration.objects.count()
if config_cnt == 0:
local(('python manage.py loaddata'
' tworaven_apps/configurations/fixtures/initial_configs.json'))
else:
print('Configs exist in the db: %d' % config_cnt)
def run_with_rook():
"""In addition to the django dev server and webpack, run rook via the Terminal"""
run(with_rook=True)
def run(with_rook=False):
"""Run the django dev server and webpack--webpack watches the assets directory and rebuilds when appTwoRavens changes
with_rook=True - runs rook in "nonstop" mode
"""
clear_js() # clear any dev css/js files
init_db()
check_config() # make sure the db has something
commands = [
# start webpack
#'./node_modules/.bin/webpack --watch'
'npm start',
#'python manage.py runserver 8080'
#'celery -A firmament worker --loglevel=info -B'
]
if with_rook:
rook_run_cmd = 'cd rook; Rscript rook_nonstop.R'
commands.append(rook_run_cmd)
proc_list = [subprocess.Popen(command, shell=True, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr) for command in commands]
try:
local("python manage.py runserver 127.0.0.1:8080")
finally:
for proc in proc_list:
os.kill(proc.pid, signal.SIGKILL)
def webpack_prod():
"""Generate the webpack dist files for prod"""
#cmd_webpack = './node_modules/.bin/webpack --config webpack.config-prod.js --watch'
cmd_webpack = './node_modules/.bin/webpack --config webpack.prod.config.js'
local(cmd_webpack)
def clear_js():
"""Delete old webpack dev. build files"""
print(clear_js.__doc__)
# webpack build directory
webpack_build_dir = os.path.join(FAB_BASE_DIR, 'assets', 'build')
# find files
pat1 = r'^tworavens_(app|styles)\-(\w|-){20,50}\.(js|css)$'
build_file_names = [x for x in os.listdir(webpack_build_dir)
if re.match(pat1, x) is not None]
#if (x.startswith('log_') and x.endswith('.txt'))\
# or (x.startswith('output') and x.endswith('.png'))]
if not build_file_names:
print('No files found')
return
print('Deleting %s file(s)' % len(build_file_names))
print('-' * 40)
for fname in [os.path.join(webpack_build_dir, x) for x in build_file_names]:
print('removing... %s' % fname)
os.remove(fname)
print('-' * 40)
print('Deleted %s file(s)' % len(build_file_names))
def clear_logs():
"""Delete log files, image files, and preprocess files from rook"""
print(clear_logs.__doc__)
# rook directory
rook_log_dir = os.path.join(FAB_BASE_DIR, 'rook')
# find files
pat1 = r'^log_(\w|-){25,50}\.txt$'
pat2 = r'^output(\w|-){2,10}\.png$'
log_files_names = [x for x in os.listdir(rook_log_dir)
if re.match(pat1, x) is not None or
re.match(pat2, x) is not None]
if log_files_names:
print('Deleting %s log file(s)' % len(log_files_names))
print('-' * 40)
for fname in [os.path.join(rook_log_dir, x) for x in log_files_names]:
print('removing... %s' % fname)
os.remove(fname)
print('-' * 40)
print('Deleted %s log file(s)' % len(log_files_names))
# data directory
rook_data_dir = os.path.join(FAB_BASE_DIR, 'data')
pat3 = r'^preprocessSubset_(\w|-){15,50}\.txt$'
data_file_names = [x for x in os.listdir(rook_data_dir)
if re.match(pat3, x) is not None]
if data_file_names:
print('Deleting %s data file(s)' % len(data_file_names))
print('-' * 40)
for fname in [os.path.join(rook_data_dir, x) for x in data_file_names]:
print('removing... %s' % fname)
os.remove(fname)
print('-' * 40)
print('Deleted %s log file(s)' % len(data_file_names))
def create_django_superuser():
"""(Test only) Create superuser with username: dev_admin. Password is printed to the console."""
from django.contrib.auth.models import User
dev_admin_username = 'dev_admin'
#User.objects.filter(username=dev_admin_username).delete()
if User.objects.filter(username=dev_admin_username).count() > 0:
print('A "%s" superuser already exists' % dev_admin_username)
return
admin_pw = 'admin'
#''.join(random.choice(string.ascii_lowercase + string.digits)
# for _ in range(7))
new_user = User(username=dev_admin_username,
first_name='Dev',
last_name='Administrator',
is_staff=True,
is_active=True,
is_superuser=True)
new_user.set_password(admin_pw)
new_user.save()
print('superuser created: "%s"' % dev_admin_username)
print('password: "%s"' % admin_pw)
def init_db():
"""Run django check and migrate"""
local("python manage.py check")
local("python manage.py migrate")
create_django_superuser()
#local("python manage.py loaddata fixtures/users.json")
#Series(name_abbreviation="Mass.").save()
def test_front_matter():
pass
#from firmament.models import Volume
#Volume.objects.first().generate_front_matter()
def ubuntu_help():
"""Set up directories for ubuntu 16.04 (in progress)"""
from setup.ubuntu_setup import TwoRavensSetup
trs = TwoRavensSetup()
| bsd-3-clause | -4,884,388,285,163,601,000 | 30.169355 | 139 | 0.618499 | false |
xzturn/tensorflow | tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py | 4 | 13894 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.eager import function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
# TODO(b/123903858): Add eager and V2 test coverage
def _test_combinations():
return combinations.combine(tf_api_version=[1], mode=["graph"])
class MapDefunTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(_test_combinations())
def testNoIntraOpLimit(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(
simple_fn, [elems], [dtypes.int32], [(2,)],
max_intra_op_parallelism=0)[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunSimple(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunMismatchedTypes(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return math_ops.cast(x, dtypes.float64)
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
@combinations.generate(_test_combinations())
def testMapDefunReduceDim(self):
# Tests where the output has a different rank from the input
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return array_ops.gather(x, 0)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
expected = constant_op.constant([1, 3, 5])
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunMultipleOutputs(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return (x, math_ops.cast(x * 2 + 3, dtypes.float64))
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,),
(2,)])
expected = [elems, elems * 2 + 3]
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0]
self.assertEqual(result.get_shape(), (3, 2))
@combinations.generate(_test_combinations())
def testMapDefunPartialShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
elems = array_ops.placeholder(dtypes.int64, (None, 2))
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])
self.assertEqual(result[0].get_shape().as_list(), [None, 2])
@combinations.generate(_test_combinations())
def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec(None, dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32)
])
def fn(x, y):
return x, y
elems1 = array_ops.placeholder(dtypes.int32)
elems2 = array_ops.placeholder(dtypes.int32)
result = map_defun.map_defun(fn, [elems1, elems2],
[dtypes.int32, dtypes.int32], [(), ()])
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"All inputs must have the same dimension 0."):
sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]})
@combinations.generate(_test_combinations())
def testMapDefunRaisesDefunError(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
elems = constant_op.constant([0, 0, 0, 37, 0])
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
@combinations.generate(_test_combinations())
def testMapDefunCancelledCorrectly(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)])
def defun(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
c = array_ops.tile(
array_ops.expand_dims(
constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
[100, 1])
map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(map_defun_op)
@combinations.generate(_test_combinations())
def testMapDefunWithUnspecifiedOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
res = x * 2 + 3
return (res, res + 1, res + 2)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems],
[dtypes.int32, dtypes.int32, dtypes.int32],
[None, (None,), (2,)])
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected))
self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1))
self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2))
@combinations.generate(_test_combinations())
def testMapDefunWithDifferentOutputShapeEachRun(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
elems = array_ops.placeholder(dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0]
with session.Session() as sess:
self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3])
self.assertAllEqual(
sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]])
@combinations.generate(_test_combinations())
def testMapDefunWithWrongOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
@combinations.generate(_test_combinations())
def testMapDefunWithInvalidInput(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2
c = constant_op.constant(2)
with self.assertRaises(ValueError):
# Fails at graph construction time for inputs with known shapes.
r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0]
p = array_ops.placeholder(dtypes.int32)
r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0]
with session.Session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(r, feed_dict={p: 0})
@combinations.generate(_test_combinations())
def testMapDefunWithParentCancellation(self):
# Checks that a cancellation of the parent graph is threaded through to
# MapDefunOp correctly.
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def simple_fn(x):
del x
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
# Blocking
return queue.dequeue_many(5)
c = constant_op.constant([1, 2, 3, 4, 5])
map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0]
with self.cached_session() as sess:
thread = self.checkedThread(
self.assert_op_cancelled, args=(map_defun_op,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
@combinations.generate(_test_combinations())
def testMapDefunWithCapturedInputs(self):
c = constant_op.constant(2)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return x + c
x = constant_op.constant([1, 2, 3, 4])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0]
expected = x + c
self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op))
@combinations.generate(_test_combinations())
def testMapDefunWithVariantTensor(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.variant)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
@combinations.generate(_test_combinations())
def testMapDefunWithVariantTensorAsCaptured(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
del x
return serialized
x = constant_op.constant([0, 0])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
@combinations.generate(_test_combinations())
def testMapDefunWithStrTensor(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
if __name__ == "__main__":
test.main()
| apache-2.0 | 2,806,408,529,497,152,500 | 37.594444 | 80 | 0.653376 | false |
spywhere/Javatar | QuickMenu/QuickMenu_main.py | 1 | 3882 | # QuickMenu Example
# Type 'QuickMenu' in command pallete to see some possible commands
#
# QuickMenu Example by: spywhere
# Please give credit to me!
from QuickMenu.QuickMenu import *
import sublime_plugin
# Using it within WindowCommand or any command type you want
class QuickMenuCommand(sublime_plugin.WindowCommand):
# A variable to store a QuickMenu instance
qm = None
# An example menu
menu = {
# Startup menu
"main": {
# Its items
"items": [["Dialogs...", "All dialog items"], ["Items...", "Do action on item"], ["Commands...", "Run command"]],
# Item's actions
"actions": [
{
# Redirect to "dialogs" submenu
"name": "dialogs"
}, {
# Redirect to "items" submenu
"name": "items"
}, {
# Redirect to "commands" submenu
"name": "commands"
}
]
},
# Custom menu named "dialogs"
"dialogs": {
# Selected second item as default
"selected_index": 2,
"items": [["Back", "Back to previous menu"], ["Message Dialog", "Hello, World on Message Dialog"], ["Error Dialog", "Hello, World on Error Dialog"]],
"actions": [
{
"name": "main",
}, {
# This will select "Message Dialog command" on "commands" menu
"name": "commands",
"item": 2
}, {
"name": "commands",
"item": 3
}
]
},
"items": {
"selected_index": 2,
"items": [["Back", "Back to previous menu"], ["Item 2 on Dialogs", "Select item 2 in Dialogs"], ["Item 3 on Dialogs", "Select item 3 in Dialogs"], ["Item 4 on Commands", "Select item 4 in Commands"]],
"actions": [
{
"name": "main",
}, {
"name": "dialogs",
"item": 2
}, {
"name": "dialogs",
"item": 3
}, {
"name": "commands",
"item": 4
}
]
},
"commands": {
"selected_index": 2,
"items": [["Back", "Back to previous menu"], ["Message Dialog command", "Hello, World on Message Dialog"], ["Error Dialog command", "Hello, World on Error Dialog"], ["Custom command", "Open User's settings file"]],
"actions": [
{
"name": "main",
}, {
# Show a message dialog
"command": "message_dialog",
"args": "Message: Hello, World"
}, {
# Show an error dialog
"command": "error_dialog",
"args": "Error: Hello, World"
}, {
# Run custom command
"command": "open_file",
"args": {
"file": "${packages}/User/Preferences.sublime-settings"
}
}
]
}
}
# This method receive a passing menu and action which can be use like this in keymap or other part of your package
#
# "command": "quick_menu",
# "args": {
# "action": {
# "name": "main"
# }
# }
#
# or custom menu on the go!
#
# "command": "quick_menu",
# "args": {
# "menu": {
# "main": {
# //Blah Blah
# }
# }
# }
#
def run(self, menu=None, action=None):
# If QuickMenu is not instantiated yet
if self.qm is None:
# If passing menu is not None
if menu is not None:
# Set it
self.menu = menu
# Instantiate QuickMenu with menu from self.menu
self.qm = QuickMenu(self.menu)
# Show the menu on self.window and pass on_done to self.select with passing menu and action
# More API documentation on README file
self.qm.show(self.window, self.select, menu, action)
def select(self, info):
# if selected item's index is less than 0 (cancel menu selection)
if info["index"] < 0:
# Open console to see these messages (View > Show Console)
print("Exit menu level " + str(info["level"]) + " and is from sublime: " + str(info["from_sublime"]))
else:
# items = menu's items <list>
# index = item's index <int>
# level = menu level (this is used to prevent self recursion menu) <int>
# from_sublime = is selected item comes from menu opened by sublime? <bool>
print("Select item \"" + str(info["items"][info["index"]]) + "\" at menu level " + str(info["level"]) + " and is from sublime: " + str(info["from_sublime"])) | mit | -4,143,994,274,775,219,700 | 26.935252 | 217 | 0.598403 | false |
plepe/pgmapcss | pgmapcss/eval/eval_ge.py | 1 | 1195 | class config_eval_ge(config_base):
math_level = 7
op = '>='
def mutable(self, param_values, stat):
import pgmapcss.eval
config_metric = pgmapcss.eval.eval_functions.list()['metric']
ret = [ config_metric.mutable([p], stat) for p in param_values ]
return min(ret)
def possible_values(self, param_values, prop, stat):
if len(param_values) < 2:
return ( '', self.mutable(param_values, stat) )
if True in param_values:
return ( { 'true', 'false' }, 3 )
else:
return config_base.possible_values(self, param_values, prop, stat)
def eval_ge(param):
if len(param) < 2:
return ''
a = eval_metric(param[0:1])
a = float(a) if a != '' else 0.0
for p in param[1:]:
b = eval_metric([p])
b = float(b) if b != '' else 0.0
if not a >= b:
return 'false'
a = b
return 'true'
# TESTS
# IN ['1', '2']
# OUT 'false'
# IN ['2', '1']
# OUT 'true'
# IN ['1', '2', '3']
# OUT 'false'
# IN ['3', '2', '1']
# OUT 'true'
# IN ['1', '2', '2.0', '3']
# OUT 'false'
# IN ['3', '2', '2.0', '1']
# OUT 'true'
# IN ['1.0', '1']
# OUT 'true'
| agpl-3.0 | 279,862,619,292,182,940 | 22.431373 | 78 | 0.501255 | false |
Katello/katello-cli | test/katello/tests/core/repo/repo_status_test.py | 1 | 3038 | import unittest
from mock import Mock
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase, CLIActionTestCase
from katello.tests.core.repo import repo_data
import katello.client.core.repo
from katello.client.core.repo import Status
from katello.client.api.utils import ApiDataError
class RequiredCLIOptionsTests(CLIOptionTestCase):
#repo is defined by either (org, product, repo_name, env name) or repo_id
action = Status()
disallowed_options = [
('--name=repo1', '--product=product1'),
('--org=ACME', '--name=repo1'),
('--org=ACME', '--product=product1'),
(),
]
allowed_options = [
('--org=ACME', '--name=repo1', '--product=product1'),
('--id=repo_id1', ),
]
class RepoStatusTest(CLIActionTestCase):
ORG_NAME = "org_1"
PROD_NAME = "product_1"
REPO = repo_data.REPOS[0]
ENV_NAME = "env_1"
OPTIONS_WITH_ID = {
'id': REPO['id'],
}
OPTIONS_WITH_NAME = {
'name': REPO['name'],
'product': PROD_NAME,
'org': ORG_NAME,
'environment': ENV_NAME,
}
repo = None
def setUp(self):
self.set_action(Status())
self.set_module(katello.client.core.repo)
self.mock_printer()
self.mock_options(self.OPTIONS_WITH_NAME)
self.mock(self.action.api, 'repo', self.REPO)
self.mock(self.action.api, 'last_sync_status', repo_data.SYNC_RESULT_WITHOUT_ERROR)
self.repo = self.mock(self.module, 'get_repo', self.REPO).return_value
def tearDown(self):
self.restore_mocks()
def test_finds_repo_by_id(self):
self.mock_options(self.OPTIONS_WITH_ID)
self.run_action()
self.action.api.repo.assert_called_once_with(self.REPO['id'])
def test_finds_repo_by_name(self):
self.mock_options(self.OPTIONS_WITH_NAME)
self.run_action()
self.module.get_repo.assert_called_once_with(self.ORG_NAME,
self.REPO['name'],
self.PROD_NAME, None,
None, self.ENV_NAME,
False, None, None, None)
def test_returns_with_error_when_no_repo_found(self):
self.mock_options(self.OPTIONS_WITH_NAME)
self.mock(self.module, 'get_repo').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_calls_last_sync_status_api(self):
self.run_action()
self.action.api.last_sync_status.assert_called_once_with(self.REPO['id'])
def test_it_does_not_set_progress_for_not_running_sync(self):
self.run_action()
self.assertRaises(KeyError, lambda: self.repo['progress'] )
def test_it_sets_progress_for_running_sync(self):
self.mock(self.action.api, 'last_sync_status', repo_data.SYNC_RUNNING_RESULT)
self.run_action()
self.assertTrue(isinstance(self.repo['progress'], str))
| gpl-2.0 | -3,292,979,221,825,884,700 | 30.645833 | 91 | 0.592824 | false |
BGluth/Pi-Pedestrian-Counter | source/tests/Test_UbiConnect.py | 1 | 5289 | import mock
import unittest
from source import UbiConnect
from source.tests.custom_mocks.fake_ubivariable import Fake_UbiVariable
const_fakeUbiAccountKey = '12345'
const_fakeVariableName = 'MyFakeVariable'
const_fakeServerValueToChangeTo = 42
class WhenCreatingAnUbiConnectObjectWhenCanNotConnect(unittest.TestCase):
def given_a_connected_ubiconnect_object(self):
_createUbiConnectionObject(self)
def test_then_the_connection_object_itself_should_be_false(self):
self.assertFalse(self.ubiConnect)
def setUp(self):
self.given_a_connected_ubiconnect_object()
class WhenCreatingAnUbiConnectObjectWhenCanConnect(unittest.TestCase):
def given_a_unconnected_ubiconnect_object(self):
_createUbiConnectionObject(self)
def test_then_the_connection_object_itself_should_be_not_false(self):
self.assertTrue(self.ubiConnect is not False)
def clean_up_mocks(self):
_cleanUpApiClientMock(self)
def setUp(self):
_setUpApiClientMock(self)
self.given_a_unconnected_ubiconnect_object()
def tearDown(self):
self.clean_up_mocks()
class WhenUsingAnUbiConnectObjectWithAUnsuccessfulConnection(unittest.TestCase):
def given_a_unconnected_ubiconnect_object(self):
_createUbiConnectionObject(self)
def test_then_reading_a_non_existing_variable_should_throw_exception(self):
with self.assertRaises(Exception):
self.ubiConnect.tryReadVariableFromServer(const_fakeUbiVariableHandle)
def test_then_writting_to_a_non_existing_variable_should_throw_exception(self):
with self.assertRaises(Exception):
self.ubiConnect.tryWriteVariableToServer(const_fakeUbiVariableHandle, const_fakeServerValueToChangeTo)
def setUp(self):
_setUpApiClientMock(self)
self.given_a_unconnected_ubiconnect_object()
class WhenUsingAnUbiConnectObjectWithASuccessfulConnection(unittest.TestCase):
def given_a_connected_ubiconnect_object(self):
_createUbiConnectionObject(self)
def test_then_isConnected_returns_true(self):
result = self.ubiConnect.isConnected()
self.assertTrue(result)
def clean_up_mocks(self):
_cleanUpApiClientMock(self)
def setUp(self):
_setUpApiClientMock(self)
self.given_a_connected_ubiconnect_object()
def tearDown(self):
self.clean_up_mocks()
class WhenAddingAnUbiVariableWithAConnection(unittest.TestCase):
def given_a_connected_ubiconnection_object(self):
_createUbiConnectionObject(self)
def after_adding_a_ubivariable_successfully(self):
_addVariable(self)
def test_then_i_can_read_the_variables_value(self):
_assertUbiVariableValueIsEqualToValue(self, Fake_UbiVariable.const_valueToInitializeTo)
def clean_up_mocks(self):
_cleanUpApiClientMock(self)
def setUp(self):
_setUpApiClientAndVariableMock(self)
self.given_a_connected_ubiconnection_object()
self.after_adding_a_ubivariable_successfully()
def tearDown(self):
self.clean_up_mocks()
class WhenWritingToAVariableWithAConection(unittest.TestCase):
def given_a_connected_ubiconnection_object(self):
_createUbiConnectionObject(self)
def after_writing_to_an_ubivariable(self):
_writeToVariable(self)
def test_then_i_should_be_able_to_read_the_newly_changed_value(self):
_assertUbiVariableValueIsEqualToValue(self, const_fakeServerValueToChangeTo)
def clean_up_mocks(self):
_cleanUpApiClientMock(self)
def setUp(self):
_setUpApiClientAndVariableMock(self)
self.given_a_connected_ubiconnection_object()
self.after_writing_to_an_ubivariable()
def tearDown(self):
self.clean_up_mocks()
def _createUbiConnectionObject(objectToAddToo):
objectToAddToo.ubiConnect = UbiConnect.tryConnectToUbidotsAccount(const_fakeUbiAccountKey)
def _writeToVariable(testObject):
_addVariable(testObject)
testObject.ubiConnect.tryWriteVariableToServer(testObject.variableHandle, const_fakeServerValueToChangeTo)
def _addVariable(testObject):
testObject.variableHandle = testObject.ubiConnect.addNewVariableAndReturnHandle(const_fakeVariableName)
def _assertUbiVariableValueIsEqualToValue(testObject, valueToEqual):
readValue = testObject.ubiConnect.tryReadVariableFromServer(testObject.variableHandle)
testObject.assertEquals(readValue, valueToEqual)
def _setUpApiClientAndVariableMock(testObject):
_setUpApiClientMock(testObject)
_setUpUbiVariableFakeOnTestObject(testObject)
def _setUpApiClientMock(testObject):
testObject.ApiClientPatcher = mock.patch('ubidots.apiclient.ApiClient', autospec = True)
testObject.apiClientMock = testObject.ApiClientPatcher.start();
def _setUpUbiVariableFakeOnTestObject(testObject):
testObject.FakeUbiVariablePatcher = mock.patch('ubidots.apiclient.Variable', new = Fake_UbiVariable)
testObject.FakeUbiVariablePatcher.start()
apiClientInstance = testObject.apiClientMock.return_value
apiClientInstance.get_variable.return_value = Fake_UbiVariable()
def _cleanUpApiClientMock(testObject):
testObject.ApiClientPatcher.stop()
if __name__ == '__main__':
unittest.main()
| mit | -3,045,458,600,944,205,000 | 31.635802 | 114 | 0.749196 | false |
BlackHole/enigma2-obh10 | lib/python/Components/Lcd.py | 1 | 21994 | from __future__ import print_function
from boxbranding import getBoxType, getDisplayType
from sys import maxint
from twisted.internet import threads
from enigma import eDBoxLCD, eTimer, eActionMap
from config import config, ConfigSubsection, ConfigSelection, ConfigSlider, ConfigYesNo, ConfigNothing
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists
from Screens.InfoBar import InfoBar
from Screens.Screen import Screen
import Screens.Standby
import usb
class dummyScreen(Screen):
skin = """<screen position="0,0" size="0,0" transparent="1">
<widget source="session.VideoPicture" render="Pig" position="0,0" size="0,0" backgroundColor="transparent" zPosition="1"/>
</screen>"""
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.close()
def IconCheck(session=None, **kwargs):
if fileExists("/proc/stb/lcd/symbol_network") or fileExists("/proc/stb/lcd/symbol_usb"):
global networklinkpoller
networklinkpoller = IconCheckPoller()
networklinkpoller.start()
class IconCheckPoller:
def __init__(self):
self.timer = eTimer()
def start(self):
if self.iconcheck not in self.timer.callback:
self.timer.callback.append(self.iconcheck)
self.timer.startLongTimer(0)
def stop(self):
if self.iconcheck in self.timer.callback:
self.timer.callback.remove(self.iconcheck)
self.timer.stop()
def iconcheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(30)
def JobTask(self):
LinkState = 0
if fileExists('/sys/class/net/wlan0/operstate'):
LinkState = open('/sys/class/net/wlan0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/wlan0/operstate').read()
elif fileExists('/sys/class/net/eth0/operstate'):
LinkState = open('/sys/class/net/eth0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/eth0/carrier').read()
LinkState = LinkState[:1]
if fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write(str(LinkState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write('0')
f.close()
USBState = 0
busses = usb.busses()
for bus in busses:
devices = bus.devices
for dev in devices:
if dev.deviceClass != 9 and dev.deviceClass != 2 and dev.idVendor > 0:
# print ' '
# print "Device:", dev.filename
# print " Number:", dev.deviceClass
# print " idVendor: %d (0x%04x)" % (dev.idVendor, dev.idVendor)
# print " idProduct: %d (0x%04x)" % (dev.idProduct, dev.idProduct)
USBState = 1
if fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write(str(USBState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write('0')
f.close()
self.timer.startLongTimer(30)
class LCD:
def __init__(self):
eActionMap.getInstance().bindAction('', -maxint - 1, self.DimUpEvent)
self.autoDimDownLCDTimer = eTimer()
self.autoDimDownLCDTimer.callback.append(self.autoDimDownLCD)
self.autoDimUpLCDTimer = eTimer()
self.autoDimUpLCDTimer.callback.append(self.autoDimUpLCD)
self.currBrightness = self.dimBrightness = self.Brightness = None
self.dimDelay = 0
config.misc.standbyCounter.addNotifier(self.standbyCounterChanged, initial_call=False)
def standbyCounterChanged(self, configElement):
Screens.Standby.inStandby.onClose.append(self.leaveStandby)
self.autoDimDownLCDTimer.stop()
self.autoDimUpLCDTimer.stop()
eActionMap.getInstance().unbindAction('', self.DimUpEvent)
def leaveStandby(self):
eActionMap.getInstance().bindAction('', -maxint - 1, self.DimUpEvent)
def DimUpEvent(self, key, flag):
self.autoDimDownLCDTimer.stop()
if not Screens.Standby.inTryQuitMainloop:
if self.Brightness is not None and not self.autoDimUpLCDTimer.isActive():
self.autoDimUpLCDTimer.start(10, True)
def autoDimDownLCD(self):
if not Screens.Standby.inTryQuitMainloop:
if self.dimBrightness is not None and self.currBrightness > self.dimBrightness:
self.currBrightness = self.currBrightness - 1
eDBoxLCD.getInstance().setLCDBrightness(self.currBrightness)
self.autoDimDownLCDTimer.start(10, True)
def autoDimUpLCD(self):
if not Screens.Standby.inTryQuitMainloop:
self.autoDimDownLCDTimer.stop()
if self.currBrightness < self.Brightness:
self.currBrightness = self.currBrightness + 5
if self.currBrightness >= self.Brightness:
self.currBrightness = self.Brightness
eDBoxLCD.getInstance().setLCDBrightness(self.currBrightness)
self.autoDimUpLCDTimer.start(10, True)
else:
if self.dimBrightness is not None and self.currBrightness > self.dimBrightness and self.dimDelay is not None and self.dimDelay > 0:
self.autoDimDownLCDTimer.startLongTimer(self.dimDelay)
def setBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
self.autoDimDownLCDTimer.stop()
self.autoDimUpLCDTimer.stop()
self.currBrightness = self.Brightness = value
eDBoxLCD.getInstance().setLCDBrightness(self.currBrightness)
if self.dimBrightness is not None and self.currBrightness > self.dimBrightness:
if self.dimDelay is not None and self.dimDelay > 0:
self.autoDimDownLCDTimer.startLongTimer(self.dimDelay)
def setStandbyBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
self.autoDimDownLCDTimer.stop()
self.autoDimUpLCDTimer.stop()
self.Brightness = value
if self.dimBrightness is None:
self.dimBrightness = value
if self.currBrightness is None:
self.currBrightness = value
eDBoxLCD.getInstance().setLCDBrightness(self.Brightness)
def setDimBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
self.dimBrightness = value
def setDimDelay(self, value):
self.dimDelay = int(value)
def setContrast(self, value):
value *= 63
value /= 20
if value > 63:
value = 63
eDBoxLCD.getInstance().setLCDContrast(value)
def setInverted(self, value):
if value:
value = 255
eDBoxLCD.getInstance().setInverted(value)
def setFlipped(self, value):
eDBoxLCD.getInstance().setFlipped(value)
def isOled(self):
return eDBoxLCD.getInstance().isOled()
def setMode(self, value):
print("[LCD] setLCDMode", value)
f = open("/proc/stb/lcd/show_symbols", "w")
f.write(value)
f.close()
def setPower(self, value):
print("[LCD] setLCDPower", value)
f = open("/proc/stb/power/vfd", "w")
f.write(value)
f.close()
def setLEDNormalState(self, value):
eDBoxLCD.getInstance().setLED(value, 0)
def setLEDDeepStandbyState(self, value):
eDBoxLCD.getInstance().setLED(value, 1)
def setLEDBlinkingTime(self, value):
eDBoxLCD.getInstance().setLED(value, 2)
def setLCDMiniTVMode(self, value):
print("[LCD] setLCDMiniTVMode", value)
f = open('/proc/stb/lcd/mode', "w")
f.write(value)
f.close()
def setLCDMiniTVPIPMode(self, value):
print("[LCD] setLCDMiniTVPIPMode", value)
def setLCDMiniTVFPS(self, value):
print("[LCD] setLCDMiniTVFPS", value)
f = open('/proc/stb/lcd/fps', "w")
f.write("%d \n" % value)
f.close()
def leaveStandby():
config.lcd.bright.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightness.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def standbyCounterChanged(dummy):
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
config.lcd.standby.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightnessstandby.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def InitLcd():
if getBoxType() in ('vusolo'):
detected = False
else:
detected = eDBoxLCD.getInstance().detected()
ilcd = LCD()
SystemInfo["Display"] = detected
config.lcd = ConfigSubsection()
if fileExists("/proc/stb/lcd/mode"):
f = open("/proc/stb/lcd/mode", "r")
can_lcdmodechecking = f.read().strip().split(" ")
print("[LCD] LCDMiniTV", can_lcdmodechecking)
f.close()
else:
can_lcdmodechecking = False
if SystemInfo["PowerLED"]:
def setPowerLEDstate(configElement):
print("[LCD] PowerLED = %s" % SystemInfo["PowerLED"])
f = open("/proc/stb/power/powerled", "w")
f.write(configElement.value)
f.close()
config.lcd.powerled = ConfigSelection(default="on", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.powerled.addNotifier(setPowerLEDstate)
if SystemInfo["PowerLED2"]:
def setPowerLEDstate2(configElement):
print("[LCD] PowerLED2 = %s" % SystemInfo["PowerLED2"])
f = open("/proc/stb/power/powerled2", "w")
f.write(configElement.value)
f.close()
config.lcd.powerled2 = ConfigSelection(default="on", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.powerled2.addNotifier(setPowerLEDstate2)
if SystemInfo["StandbyLED"]:
def setPowerLEDstanbystate(configElement):
print("[LCD] StandbyLED = %s configElement = %s" % (SystemInfo["StandbyLED"], configElement.value))
f = open("/proc/stb/power/standbyled", "w")
f.write(configElement.value)
f.close()
config.lcd.standbyLED = ConfigSelection(default="on", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.standbyLED.addNotifier(setPowerLEDstanbystate)
if SystemInfo["SuspendLED"]:
def setPowerLEDdeepstanbystate(configElement):
print("[LCD] SuspendLED = %s configElement = %s" % (SystemInfo["SuspendLED"], configElement.value))
f = open("/proc/stb/power/suspendled", "w")
f.write(configElement.value)
f.close()
config.lcd.suspendLED = ConfigSelection(default="on", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.suspendLED.addNotifier(setPowerLEDdeepstanbystate)
if SystemInfo["LedPowerColor"]:
def setLedPowerColor(configElement):
f = open("/proc/stb/fp/ledpowercolor", "w")
f.write(configElement.value)
f.close()
config.lcd.ledpowercolor = ConfigSelection(default="1", choices=[("0", _("off")), ("1", _("blue")), ("2", _("red")), ("3", _("violet"))])
config.lcd.ledpowercolor.addNotifier(setLedPowerColor)
if SystemInfo["LedStandbyColor"]:
def setLedStandbyColor(configElement):
f = open("/proc/stb/fp/ledstandbycolor", "w")
f.write(configElement.value)
f.close()
config.lcd.ledstandbycolor = ConfigSelection(default="3", choices=[("0", _("off")), ("1", _("blue")), ("2", _("red")), ("3", _("violet"))])
config.lcd.ledstandbycolor.addNotifier(setLedStandbyColor)
if SystemInfo["LedSuspendColor"]:
def setLedSuspendColor(configElement):
f = open("/proc/stb/fp/ledsuspendledcolor", "w")
f.write(configElement.value)
f.close()
config.lcd.ledsuspendcolor = ConfigSelection(default="2", choices=[("0", _("off")), ("1", _("blue")), ("2", _("red")), ("3", _("violet"))])
config.lcd.ledsuspendcolor.addNotifier(setLedSuspendColor)
if SystemInfo["Power24x7On"]:
def setPower24x7On(configElement):
f = open("/proc/stb/fp/power4x7on", "w")
f.write(configElement.value)
f.close()
config.lcd.power24x7on = ConfigSelection(default="on", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.power24x7on.addNotifier(setPower24x7On)
if SystemInfo["Power24x7Standby"]:
def setPower24x7Standby(configElement):
f = open("/proc/stb/fp/power4x7standby", "w")
f.write(configElement.value)
f.close()
config.lcd.power24x7standby = ConfigSelection(default="off", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.power24x7standby.addNotifier(setPower24x7Standby)
if SystemInfo["Power24x7Suspend"]:
def setPower24x7Suspend(configElement):
f = open("/proc/stb/fp/power4x7suspend", "w")
f.write(configElement.value)
f.close()
config.lcd.power24x7suspend = ConfigSelection(default="off", choices=[("off", _("Off")), ("on", _("On"))])
config.lcd.power24x7suspend.addNotifier(setPower24x7Suspend)
if SystemInfo["LEDButtons"]:
def setLEDnormalstate(configElement):
ilcd.setLEDNormalState(configElement.value)
def setLEDdeepstandby(configElement):
ilcd.setLEDDeepStandbyState(configElement.value)
def setLEDblinkingtime(configElement):
ilcd.setLEDBlinkingTime(configElement.value)
config.lcd.ledblinkingtime = ConfigSlider(default=5, increment=1, limits=(0, 15))
config.lcd.ledblinkingtime.addNotifier(setLEDblinkingtime)
config.lcd.ledbrightnessdeepstandby = ConfigSlider(default=1, increment=1, limits=(0, 15))
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDdeepstandby)
config.lcd.ledbrightnessdeepstandby.apply = lambda: setLEDdeepstandby(config.lcd.ledbrightnessdeepstandby)
config.lcd.ledbrightnessstandby = ConfigSlider(default=1, increment=1, limits=(0, 15))
config.lcd.ledbrightnessstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessstandby.apply = lambda: setLEDnormalstate(config.lcd.ledbrightnessstandby)
config.lcd.ledbrightness = ConfigSlider(default=3, increment=1, limits=(0, 15))
config.lcd.ledbrightness.addNotifier(setLEDnormalstate)
config.lcd.ledbrightness.apply = lambda: setLEDnormalstate(config.lcd.ledbrightness)
if detected:
config.lcd.scroll_speed = ConfigSelection(default="300", choices=[
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
config.lcd.scroll_delay = ConfigSelection(default="10000", choices=[
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
def setLCDbright(configElement):
ilcd.setBright(configElement.value)
def setLCDstandbybright(configElement):
ilcd.setStandbyBright(configElement.value)
def setLCDdimbright(configElement):
ilcd.setDimBright(configElement.value)
def setLCDdimdelay(configElement):
ilcd.setDimDelay(configElement.value)
def setLCDcontrast(configElement):
ilcd.setContrast(configElement.value)
def setLCDinverted(configElement):
ilcd.setInverted(configElement.value)
def setLCDflipped(configElement):
ilcd.setFlipped(configElement.value)
def setLCDmode(configElement):
ilcd.setMode(configElement.value)
def setLCDpower(configElement):
ilcd.setPower(configElement.value)
def setLCDminitvmode(configElement):
ilcd.setLCDMiniTVMode(configElement.value)
def setLCDminitvpipmode(configElement):
ilcd.setLCDMiniTVPIPMode(configElement.value)
def setLCDminitvfps(configElement):
ilcd.setLCDMiniTVFPS(configElement.value)
standby_default = 0
if not ilcd.isOled():
config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20))
config.lcd.contrast.addNotifier(setLCDcontrast)
else:
config.lcd.contrast = ConfigNothing()
standby_default = 1
config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10))
config.lcd.standby.addNotifier(setLCDbright)
config.lcd.standby.apply = lambda: setLCDbright(config.lcd.standby)
config.lcd.bright = ConfigSlider(default=5, limits=(0, 10))
config.lcd.bright.addNotifier(setLCDbright)
config.lcd.bright.apply = lambda: setLCDbright(config.lcd.bright)
config.lcd.dimbright = ConfigSlider(default=standby_default, limits=(0, 10))
config.lcd.dimbright.addNotifier(setLCDdimbright)
config.lcd.dimbright.apply = lambda: setLCDdimbright(config.lcd.dimbright)
config.lcd.dimdelay = ConfigSelection(default="0", choices=[
("5", "5 " + _("seconds")),
("10", "10 " + _("seconds")),
("15", "15 " + _("seconds")),
("20", "20 " + _("seconds")),
("30", "30 " + _("seconds")),
("60", "1 " + _("minute")),
("120", "2 " + _("minutes")),
("300", "5 " + _("minutes")),
("0", _("off"))])
config.lcd.dimdelay.addNotifier(setLCDdimdelay)
config.lcd.invert = ConfigYesNo(default=False)
config.lcd.invert.addNotifier(setLCDinverted)
def PiconPackChanged(configElement):
configElement.save()
config.lcd.picon_pack = ConfigYesNo(default=False)
config.lcd.picon_pack.addNotifier(PiconPackChanged)
config.lcd.flip = ConfigYesNo(default=False)
config.lcd.flip.addNotifier(setLCDflipped)
if SystemInfo["LcdPowerOn"]:
config.lcd.power = ConfigSelection([("0", _("Off")), ("1", _("On"))], "1")
config.lcd.power.addNotifier(setLCDpower)
else:
config.lcd.power = ConfigNothing()
if SystemInfo["LcdLiveTV"]:
def lcdLiveTvChanged(configElement):
setLCDLiveTv(configElement.value)
configElement.save()
config.lcd.showTv = ConfigYesNo(default=False)
config.lcd.showTv.addNotifier(lcdLiveTvChanged)
if "live_enable" in SystemInfo["LcdLiveTV"]:
config.misc.standbyCounter.addNotifier(standbyCounterChangedLCDLiveTV, initial_call=False)
if SystemInfo["LCDMiniTV"] and getBoxType() not in ('gbquad4k', 'gbue4k'):
config.lcd.minitvmode = ConfigSelection([("0", _("normal")), ("1", _("MiniTV")), ("2", _("OSD")), ("3", _("MiniTV with OSD"))], "0")
config.lcd.minitvmode.addNotifier(setLCDminitvmode)
config.lcd.minitvpipmode = ConfigSelection([("0", _("off")), ("5", _("PIP")), ("7", _("PIP with OSD"))], "0")
config.lcd.minitvpipmode.addNotifier(setLCDminitvpipmode)
config.lcd.minitvfps = ConfigSlider(default=30, limits=(0, 30))
config.lcd.minitvfps.addNotifier(setLCDminitvfps)
elif can_lcdmodechecking and getBoxType() in ('gbquad4k', 'gbue4k'):
# (0:normal, 1:video0, 2:fb, 3:vide0+fb, 4:video1, 5:vide0+video1, 6:video1+fb, 7:video0+video1+fb)
config.lcd.minitvmode = ConfigSelection(default="0", choices=[
("0", _("normal")),
("1", _("MiniTV") + _(" - video0")),
("3", _("MiniTV with OSD") + _(" - video0")),
("2", _("OSD")),
("4", _("MiniTV") + _(" - video1")),
("6", _("MiniTV with OSD") + _(" - video1")),
("5", _("MiniTV") + _(" - video0+video1")),
("7", _("MiniTV with OSD") + _(" - video0+video1"))])
config.lcd.minitvmode.addNotifier(setLCDminitvmode)
config.lcd.minitvpipmode = ConfigSelection(default="0", choices=[
("0", _("off")),
("4", _("PIP")),
("6", _("PIP with OSD"))])
config.lcd.minitvpipmode.addNotifier(setLCDminitvpipmode)
config.lcd.minitvfps = ConfigSlider(default=30, limits=(0, 30))
config.lcd.minitvfps.addNotifier(setLCDminitvfps)
else:
config.lcd.minitvmode = ConfigNothing()
config.lcd.minitvpipmode = ConfigNothing()
config.lcd.minitvfps = ConfigNothing()
if SystemInfo["VFD_scroll_repeats"] and getDisplayType() not in ('7segment'):
def scroll_repeats(el):
open(SystemInfo["VFD_scroll_repeats"], "w").write(el.value)
choicelist = [("0", _("None")), ("1", _("1X")), ("2", _("2X")), ("3", _("3X")), ("4", _("4X")), ("500", _("Continues"))]
config.usage.vfd_scroll_repeats = ConfigSelection(default="3", choices=choicelist)
config.usage.vfd_scroll_repeats.addNotifier(scroll_repeats, immediate_feedback=False)
if SystemInfo["VFD_scroll_delay"] and getDisplayType() not in ('7segment'):
def scroll_delay(el):
open(SystemInfo["VFD_scroll_delay"], "w").write(str(el.value))
config.usage.vfd_scroll_delay = ConfigSlider(default=150, increment=10, limits=(0, 500))
config.usage.vfd_scroll_delay.addNotifier(scroll_delay, immediate_feedback=False)
if SystemInfo["VFD_initial_scroll_delay"] and getDisplayType() not in ('7segment'):
def initial_scroll_delay(el):
open(SystemInfo["VFD_initial_scroll_delay"], "w").write(el.value)
choicelist = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("0", _("no delay"))]
config.usage.vfd_initial_scroll_delay = ConfigSelection(default="1000", choices=choicelist)
config.usage.vfd_initial_scroll_delay.addNotifier(initial_scroll_delay, immediate_feedback=False)
if SystemInfo["VFD_final_scroll_delay"] and getDisplayType() not in ('7segment'):
def final_scroll_delay(el):
open(SystemInfo["VFD_final_scroll_delay"], "w").write(el.value)
choicelist = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("0", _("no delay"))]
config.usage.vfd_final_scroll_delay = ConfigSelection(default="1000", choices=choicelist)
config.usage.vfd_final_scroll_delay.addNotifier(final_scroll_delay, immediate_feedback=False)
if fileExists("/proc/stb/lcd/show_symbols"):
config.lcd.mode = ConfigSelection([("0", _("no")), ("1", _("yes"))], "1")
config.lcd.mode.addNotifier(setLCDmode)
else:
config.lcd.mode = ConfigNothing()
else:
def doNothing():
pass
config.lcd.contrast = ConfigNothing()
config.lcd.bright = ConfigNothing()
config.lcd.standby = ConfigNothing()
config.lcd.bright.apply = lambda: doNothing()
config.lcd.standby.apply = lambda: doNothing()
config.lcd.mode = ConfigNothing()
config.lcd.power = ConfigNothing()
config.lcd.ledbrightness = ConfigNothing()
config.lcd.ledbrightness.apply = lambda: doNothing()
config.lcd.ledbrightnessstandby = ConfigNothing()
config.lcd.ledbrightnessstandby.apply = lambda: doNothing()
config.lcd.ledbrightnessdeepstandby = ConfigNothing()
config.lcd.ledbrightnessdeepstandby.apply = lambda: doNothing()
config.lcd.ledblinkingtime = ConfigNothing()
config.lcd.picon_pack = ConfigNothing()
config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call=False)
def setLCDLiveTv(value):
if "live_enable" in SystemInfo["LcdLiveTV"]:
open(SystemInfo["LcdLiveTV"], "w").write(value and "enable" or "disable")
else:
open(SystemInfo["LcdLiveTV"], "w").write(value and "0" or "1")
try:
InfoBarInstance = InfoBar.instance
InfoBarInstance and InfoBarInstance.session.open(dummyScreen)
except:
pass
def leaveStandbyLCDLiveTV():
if config.lcd.showTv.value:
setLCDLiveTv(True)
def standbyCounterChangedLCDLiveTV(dummy):
if config.lcd.showTv.value:
from Screens.Standby import inStandby
if leaveStandbyLCDLiveTV not in inStandby.onClose:
inStandby.onClose.append(leaveStandbyLCDLiveTV)
setLCDLiveTv(False)
| gpl-2.0 | 5,197,624,091,695,589,000 | 35.717863 | 141 | 0.705874 | false |
timkrentz/SunTracker | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/copyright.py | 2 | 1507 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Copyright information for Twisted.
"""
from __future__ import division, absolute_import
from twisted import __version__ as version, version as longversion
longversion = str(longversion)
copyright="""\
Copyright (c) 2001-2014 Twisted Matrix Laboratories.
See LICENSE for details."""
disclaimer='''
Twisted, the Framework of Your Internet
%s
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''' % (copyright,)
| mit | 8,516,713,586,774,794,000 | 34.756098 | 70 | 0.764433 | false |
apache/aurora | src/test/python/apache/aurora/client/cli/test_status.py | 5 | 23724 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import json
import re
import textwrap
from mock import patch
from apache.aurora.client.cli import EXIT_INVALID_PARAMETER, EXIT_OK
from apache.aurora.client.cli.client import AuroraCommandLine
from apache.aurora.common.aurora_job_key import AuroraJobKey
from .util import AuroraClientCommandTest, FakeAuroraCommandContext
from gen.apache.aurora.api.ttypes import (
AssignedTask,
GetJobsResult,
JobConfiguration,
JobKey,
Metadata,
Resource,
ResponseCode,
Result,
ScheduledTask,
ScheduleStatus,
ScheduleStatusResult,
TaskConfig,
TaskEvent,
TaskQuery
)
class TestJobStatus(AuroraClientCommandTest):
@classmethod
def create_scheduled_tasks(cls):
tasks = AuroraClientCommandTest.create_scheduled_tasks()
instance = 0
for task in tasks:
instance += 1
task.assignedTask.instanceId = instance
task.assignedTask.task.job = JobKey(role='bozo', environment='test', name='woops')
return tasks
@classmethod
def create_inactive_tasks(cls):
instance = 0
INACTIVE_STATUSES = [ScheduleStatus.KILLED, ScheduleStatus.FINISHED, ScheduleStatus.FAILED]
tasks = cls.create_scheduled_tasks()
for task in tasks:
events = []
for i in range(3):
event = TaskEvent(
timestamp=28234726395 + (273 * i),
status=INACTIVE_STATUSES[i],
message="Hi there")
events.append(event)
task.taskEvents = events
task.status = INACTIVE_STATUSES[instance]
task.assignedTask.instanceId = instance
instance += 1
return set(tasks)
@classmethod
def create_mock_scheduled_task_no_metadata(cls):
result = cls.create_scheduled_tasks()
for job in result:
job.assignedTask.task.metadata = None
return result
@classmethod
def create_mock_scheduled_task_with_metadata(cls):
result = cls.create_scheduled_tasks()
for job in result:
job.assignedTask.task.metadata = [Metadata("meta", "data"), Metadata("data", "meta")]
return result
@classmethod
def create_getjobs_response(cls):
mock_job_one = JobConfiguration(
key=JobKey(
role='RoleA',
environment='test',
name='hithere'))
mock_job_two = JobConfiguration(
key=JobKey(
role='bozo',
environment='test',
name='hello'))
result = cls.create_simple_success_response()
result.result = Result(
getJobsResult=GetJobsResult(configs=[mock_job_one, mock_job_two]))
return result
@classmethod
def create_status_response(cls):
resp = cls.create_simple_success_response()
resp.result = Result(
scheduleStatusResult=ScheduleStatusResult(tasks=set(cls.create_scheduled_tasks())))
return resp
@classmethod
def create_status_null_metadata(cls):
resp = cls.create_simple_success_response()
resp.result = Result(
scheduleStatusResult=ScheduleStatusResult(
tasks=set(cls.create_mock_scheduled_task_no_metadata())))
return resp
@classmethod
def create_status_with_inactives(cls):
resp = cls.create_status_null_metadata()
resp.result.scheduleStatusResult.tasks |= cls.create_inactive_tasks()
return resp
@classmethod
def create_empty_status(cls):
resp = cls.create_simple_success_response()
resp.result = Result(scheduleStatusResult=ScheduleStatusResult(tasks=None))
return resp
def get_task_status_json(cls):
def create_task_events(start_time):
"""Create a list of task events, tracing the task from pending to assigned to running"""
return [
TaskEvent(timestamp=start_time, status=0, message="looking for a host"),
TaskEvent(timestamp=start_time + 10, status=9, message="found a host"),
TaskEvent(timestamp=start_time + 20, status=2, message="running")
]
def create_scheduled_task(instance, start_time):
task = ScheduledTask(
assignedTask=AssignedTask(
taskId="task_%s" % instance,
slaveId="random_machine_id",
slaveHost="junk.nothing",
task=TaskConfig(
job=JobKey(role="nobody", environment="prod", name='flibber'),
isService=False,
resources=frozenset(
[Resource(numCpus=2),
Resource(ramMb=2048),
Resource(diskMb=4096)]),
priority=7,
maxTaskFailures=3,
production=False),
assignedPorts={"http": 1001},
instanceId=instance),
status=2,
failureCount=instance + 4,
taskEvents=create_task_events(start_time),
ancestorId="random_task_ancestor%s" % instance)
return task
resp = cls.create_simple_success_response()
scheduleStatus = ScheduleStatusResult()
scheduleStatus.tasks = [
create_scheduled_task(0, 123456),
create_scheduled_task(1, 234567)
]
resp.result = Result(scheduleStatusResult=scheduleStatus)
return resp
@classmethod
def create_status_with_metadata(cls):
resp = cls.create_simple_success_response()
resp.result = Result(scheduleStatusResult=ScheduleStatusResult(
tasks=set(cls.create_mock_scheduled_task_with_metadata())))
return resp
@classmethod
def create_failed_status_response(cls):
return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
@classmethod
def create_nojobs_status_response(cls):
resp = cls.create_simple_success_response()
resp.result = Result(scheduleStatusResult=ScheduleStatusResult(tasks=set()))
return resp
def test_successful_status_shallow(self):
"""Test the status command at the shallowest level: calling status should end up invoking
the local APIs get_status method."""
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_status_response()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
mock_api.check_status.assert_called_with(AuroraJobKey('west', 'bozo', 'test', 'hello'))
def test_successful_status_shallow_nometadata(self):
"""Regression test: there was a crasher bug when metadata was None."""
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_status_null_metadata()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
mock_api.check_status.assert_called_with(AuroraJobKey('west', 'bozo', 'test', 'hello'))
def test_successful_status_deep(self):
"""Test the status command more deeply: in a request with a fully specified
job, it should end up doing a query using getTasksWithoutConfigs."""
_, mock_scheduler_proxy = self.create_mock_api()
mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_null_metadata()
with patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
mock_scheduler_proxy.getTasksWithoutConfigs.assert_called_with(
TaskQuery(jobKeys=[JobKey(role='bozo', environment='test', name='hello')]),
retry=True)
def test_successful_status_output_no_metadata(self):
"""Test the status command more deeply: in a request with a fully specified
job, it should end up doing a query using getTasksWithoutConfigs."""
mock_context = FakeAuroraCommandContext()
mock_context.add_expected_status_query_result(self.create_status_null_metadata())
with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out()))
expected = textwrap.dedent("""\
Active tasks (3):
\tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
Inactive tasks (0):
""")
assert actual == expected
def test_successful_status_output_with_inactives(self):
"""Test the status command more deeply: in a request with a fully specified
job, it should end up doing a query using getTasksWithoutConfigs."""
mock_context = FakeAuroraCommandContext()
mock_context.add_expected_status_query_result(self.create_status_with_inactives())
with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out()))
print("==actual======================\n%s\n========================" % actual)
expected = textwrap.dedent("""\
Active tasks (3):
\tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
Inactive tasks (3):
\tTask role: bozo, env: test, name: woops, instance: 0, status: KILLED on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## KILLED: Hi there
\t 1970-11-23 ##:##:## FINISHED: Hi there
\t 1970-11-23 ##:##:## FAILED: Hi there
\tTask role: bozo, env: test, name: woops, instance: 1, status: FINISHED on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## KILLED: Hi there
\t 1970-11-23 ##:##:## FINISHED: Hi there
\t 1970-11-23 ##:##:## FAILED: Hi there
\tTask role: bozo, env: test, name: woops, instance: 2, status: FAILED on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## KILLED: Hi there
\t 1970-11-23 ##:##:## FINISHED: Hi there
\t 1970-11-23 ##:##:## FAILED: Hi there
""")
print("==expected======================\n%s\n========================" % expected)
assert actual == expected
def test_successful_status_output_with_metadata(self):
"""Test the status command more deeply: in a request with a fully specified
job, it should end up doing a query using getTasksWithoutConfigs."""
mock_context = FakeAuroraCommandContext()
mock_context.add_expected_status_query_result(self.create_status_with_metadata())
with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out()))
expected = textwrap.dedent("""\
Active tasks (3):
\tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\t metadata:
\t\t (key: 'meta', value: 'data')
\t\t (key: 'data', value: 'meta')
\tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\t metadata:
\t\t (key: 'meta', value: 'data')
\t\t (key: 'data', value: 'meta')
\tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost
\t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB
\t events:
\t 1970-11-23 ##:##:## RUNNING: Hi there
\t metadata:
\t\t (key: 'meta', value: 'data')
\t\t (key: 'data', value: 'meta')
Inactive tasks (0):
""")
print("=======actual======\n%s\n==================" % actual)
print("==expected======================\n%s\n========================" % expected)
assert actual == expected
def test_successful_status_deep_null_metadata(self):
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_null_metadata()
with patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'west/bozo/test/hello'])
mock_scheduler_proxy.getTasksWithoutConfigs.assert_called_with(
TaskQuery(jobKeys=[JobKey(role='bozo', environment='test', name='hello')]),
retry=True)
def test_status_wildcard(self):
"""Test status using a wildcard. It should first call api.get_jobs, and then do a
getTasksWithoutConfigs on each job."""
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_status_response()
mock_api.get_jobs.return_value = self.create_getjobs_response()
with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', '*'])
# Wildcard should have expanded to two jobs, so there should be two calls
# to check_status.
assert mock_api.check_status.call_count == 2
assert mock_api.check_status.call_args_list[0][0][0].cluster == 'west'
assert mock_api.check_status.call_args_list[0][0][0].role == 'RoleA'
assert mock_api.check_status.call_args_list[0][0][0].env == 'test'
assert mock_api.check_status.call_args_list[0][0][0].name == 'hithere'
assert mock_api.check_status.call_args_list[1][0][0].cluster == 'west'
assert mock_api.check_status.call_args_list[1][0][0].role == 'bozo'
assert mock_api.check_status.call_args_list[1][0][0].env == 'test'
assert mock_api.check_status.call_args_list[1][0][0].name == 'hello'
def test_status_wildcard_two(self):
"""Test status using a wildcard. It should first call api.get_jobs, and then do a
getTasksWithoutConfigs on each job. This time, use a pattern that doesn't match
all of the jobs."""
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_status_response()
mock_api.get_jobs.return_value = self.create_getjobs_response()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', 'example/*/*/hello'])
# Wildcard should have expanded to two jobs, but only matched one,
# so there should be one call to check_status.
assert mock_api.check_status.call_count == 1
mock_api.check_status.assert_called_with(
AuroraJobKey('example', 'bozo', 'test', 'hello'))
def test_unsuccessful_status_shallow(self):
"""Test the status command at the shallowest level: calling status should end up invoking
the local APIs get_status method."""
# Calls api.check_status, which calls scheduler_proxy.getJobs
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_failed_status_response()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
result = cmd.execute(['job', 'status', 'west/bozo/test/hello'])
assert result == EXIT_INVALID_PARAMETER
def test_no_jobs_found_status_shallow(self):
# Calls api.check_status, which calls scheduler_proxy.getJobs
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_nojobs_status_response()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
result = cmd.execute(['job', 'status', '--write-json', 'west/bozo/test/hello'])
assert mock_context.get_out() == [
'{"jobspec":"west/bozo/test/hello","error":"No matching jobs found"}']
assert result == EXIT_OK
def test_successful_status_json_output_no_metadata(self):
"""Test the status command more deeply: in a request with a fully specified
job, it should end up doing a query using getTasksWithoutConfigs."""
mock_context = FakeAuroraCommandContext()
mock_context.add_expected_status_query_result(self.get_task_status_json())
with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(['job', 'status', '--write-json', 'west/bozo/test/hello'])
actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out()))
actual_sorted = json.loads(actual)
expected = [
{
"active": [
{
"status": "RUNNING",
"assignedTask": {
"task": {
"isService": False,
"container": {
"mesos": {}
},
"maxTaskFailures": 3,
"priority": 7,
"job": {
"environment": "prod",
"role": "nobody",
"name": "flibber"
},
"production": False,
"resources": [
{
"numCpus": 2
},
{
"ramMb": 2048
},
{
"diskMb": 4096
}
]
},
"taskId": "task_0",
"instanceId": 0,
"assignedPorts": {
"http": 1001
},
"slaveHost": "junk.nothing",
"slaveId": "random_machine_id"
},
"ancestorId": "random_task_ancestor0",
"taskEvents": [
{
"status": "PENDING",
"timestamp": 123456,
"message": "looking for a host"
},
{
"status": "ASSIGNED",
"timestamp": 123466,
"message": "found a host"
},
{
"status": "RUNNING",
"timestamp": 123476,
"message": "running"
}
],
"failureCount": 4
},
{
"status": "RUNNING",
"assignedTask": {
"task": {
"isService": False,
"container": {
"mesos": {}
},
"maxTaskFailures": 3,
"priority": 7,
"job": {
"environment": "prod",
"role": "nobody",
"name": "flibber"
},
"production": False,
"resources": [
{
"numCpus": 2
},
{
"ramMb": 2048
},
{
"diskMb": 4096
}
]
},
"taskId": "task_1",
"instanceId": 1,
"assignedPorts": {
"http": 1001
},
"slaveHost": "junk.nothing",
"slaveId": "random_machine_id"
},
"ancestorId": "random_task_ancestor1",
"taskEvents": [
{
"status": "PENDING",
"timestamp": 234567,
"message": "looking for a host"
},
{
"status": "ASSIGNED",
"timestamp": 234577,
"message": "found a host"
},
{
"status": "RUNNING",
"timestamp": 234587,
"message": "running"
}
],
"failureCount": 5
}
],
"job": "west/bozo/test/hello",
"inactive": []
}
]
for entry in actual_sorted[0]["active"]:
entry["assignedTask"]["task"]["resources"] = sorted(
entry["assignedTask"]["task"]["resources"], key=str)
for entry in expected[0]["active"]:
entry["assignedTask"]["task"]["resources"] = sorted(
entry["assignedTask"]["task"]["resources"], key=str)
assert actual_sorted == expected
def test_status_job_not_found(self):
"""Regression test: there was a crasher bug when metadata was None."""
mock_context = FakeAuroraCommandContext()
mock_api = mock_context.get_api('west')
mock_api.check_status.return_value = self.create_empty_status()
with contextlib.nested(
patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
cmd = AuroraCommandLine()
result = cmd.execute(['job', 'status', 'west/bozo/test/hello'])
assert result == EXIT_INVALID_PARAMETER
assert mock_context.get_err() == ["Found no jobs matching west/bozo/test/hello"]
| apache-2.0 | -5,639,774,579,405,374,000 | 40.548161 | 97 | 0.579371 | false |
jcpowermac/ansible-modules-extras | cloud/cloudstack/cs_user.py | 15 | 13222 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_user
short_description: Manages users on Apache CloudStack based clouds.
description:
- Create, update, disable, lock, enable and remove users.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
username:
description:
- Username of the user.
required: true
account:
description:
- Account the user will be created under.
- Required on C(state=present).
required: false
default: null
password:
description:
- Password of the user to be created.
- Required on C(state=present).
- Only considered on creation and will not be updated if user exists.
required: false
default: null
first_name:
description:
- First name of the user.
- Required on C(state=present).
required: false
default: null
last_name:
description:
- Last name of the user.
- Required on C(state=present).
required: false
default: null
email:
description:
- Email of the user.
- Required on C(state=present).
required: false
default: null
timezone:
description:
- Timezone of the user.
required: false
default: null
domain:
description:
- Domain the user is related to.
required: false
default: 'ROOT'
state:
description:
- State of the user.
- C(unlocked) is an alias for C(enabled).
required: false
default: 'present'
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create an user in domain 'CUSTOMERS'
local_action:
module: cs_user
account: developers
username: johndoe
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
# Lock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: locked
# Disable an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: disabled
# Enable/unlock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: enabled
# Remove an user in domain 'CUSTOMERS'
local_action:
module: cs_user
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
id:
description: UUID of the user.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
username:
description: Username of the user.
returned: success
type: string
sample: johndoe
fist_name:
description: First name of the user.
returned: success
type: string
sample: John
last_name:
description: Last name of the user.
returned: success
type: string
sample: Doe
email:
description: Emailof the user.
returned: success
type: string
sample: [email protected]
api_key:
description: API key of the user.
returned: success
type: string
sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg
api_secret:
description: API secret of the user.
returned: success
type: string
sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g
account:
description: Account name of the user.
returned: success
type: string
sample: developers
account_type:
description: Type of the account.
returned: success
type: string
sample: user
timezone:
description: Timezone of the user.
returned: success
type: string
sample: enabled
created:
description: Date the user was created.
returned: success
type: string
sample: Doe
state:
description: State of the user.
returned: success
type: string
sample: enabled
domain:
description: Domain the user is related.
returned: success
type: string
sample: ROOT
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackUser(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackUser, self).__init__(module)
self.returns = {
'username': 'username',
'firstname': 'first_name',
'lastname': 'last_name',
'email': 'email',
'secretkey': 'api_secret',
'apikey': 'api_key',
'timezone': 'timezone',
}
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
self.user = None
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_user(self):
if not self.user:
args = {}
args['domainid'] = self.get_domain('id')
users = self.cs.listUsers(**args)
if users:
user_name = self.module.params.get('username')
for u in users['user']:
if user_name.lower() == u['username'].lower():
self.user = u
break
return self.user
def enable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'enabled':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
res = self.cs.enableUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
return user
def lock_user(self):
user = self.get_user()
if not user:
user = self.present_user()
# we need to enable the user to lock it.
if user['state'].lower() == 'disabled':
user = self.enable_user()
if user['state'].lower() != 'locked':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
res = self.cs.lockUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
return user
def disable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'disabled':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
user = self.cs.disableUser(**args)
if 'errortext' in user:
self.module.fail_json(msg="Failed: '%s'" % user['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
user = self._poll_job(user, 'user')
return user
def present_user(self):
missing_params = []
for required_params in [
'account',
'email',
'password',
'first_name',
'last_name',
]:
if not self.module.params.get(required_params):
missing_params.append(required_params)
if missing_params:
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
user = self.get_user()
if user:
user = self._update_user(user)
else:
user = self._create_user(user)
return user
def _create_user(self, user):
self.result['changed'] = True
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain('id')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if not self.module.check_mode:
res = self.cs.createUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
# register user api keys
res = self.cs.registerUserKeys(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user.update(res['userkeys'])
return user
def _update_user(self, user):
args = {}
args['id'] = user['id']
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if self.has_changed(args, user):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
# register user api keys
if 'apikey' not in user:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.registerUserKeys(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user.update(res['userkeys'])
return user
def absent_user(self):
user = self.get_user()
if user:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteUser(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return user
def get_result(self, user):
super(AnsibleCloudStackUser, self).get_result(user)
if user:
if 'accounttype' in user:
for key,value in self.account_types.items():
if value == user['accounttype']:
self.result['account_type'] = key
break
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
username = dict(required=True),
account = dict(default=None),
state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
domain = dict(default='ROOT'),
email = dict(default=None),
first_name = dict(default=None),
last_name = dict(default=None),
password = dict(default=None),
timezone = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_acc = AnsibleCloudStackUser(module)
state = module.params.get('state')
if state in ['absent']:
user = acs_acc.absent_user()
elif state in ['enabled', 'unlocked']:
user = acs_acc.enable_user()
elif state in ['disabled']:
user = acs_acc.disable_user()
elif state in ['locked']:
user = acs_acc.lock_user()
else:
user = acs_acc.present_user()
result = acs_acc.get_result(user)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 7,736,647,982,484,695,000 | 27.73913 | 116 | 0.579803 | false |
dannyperry571/theapprentice | script.module.youtube.dl/lib/youtube_dl/extractor/tva.py | 33 | 1987 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
smuggle_url,
)
class TVAIE(InfoExtractor):
_VALID_URL = r'https?://videos\.tva\.ca/episode/(?P<id>\d+)'
_TEST = {
'url': 'http://videos.tva.ca/episode/85538',
'info_dict': {
'id': '85538',
'ext': 'mp4',
'title': 'Épisode du 25 janvier 2017',
'description': 'md5:e9e7fb5532ab37984d2dc87229cadf98',
'upload_date': '20170126',
'timestamp': 1485442329,
},
'params': {
# m3u8 download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
"https://d18jmrhziuoi7p.cloudfront.net/isl/api/v1/dataservice/Items('%s')" % video_id,
video_id, query={
'$expand': 'Metadata,CustomId',
'$select': 'Metadata,Id,Title,ShortDescription,LongDescription,CreatedDate,CustomId,AverageUserRating,Categories,ShowName',
'$format': 'json',
})
metadata = video_data.get('Metadata', {})
return {
'_type': 'url_transparent',
'id': video_id,
'title': video_data['Title'],
'url': smuggle_url('ooyala:' + video_data['CustomId'], {'supportedformats': 'm3u8,hds'}),
'description': video_data.get('LongDescription') or video_data.get('ShortDescription'),
'series': video_data.get('ShowName'),
'episode': metadata.get('EpisodeTitle'),
'episode_number': int_or_none(metadata.get('EpisodeNumber')),
'categories': video_data.get('Categories'),
'average_rating': video_data.get('AverageUserRating'),
'timestamp': parse_iso8601(video_data.get('CreatedDate')),
'ie_key': 'Ooyala',
}
| gpl-2.0 | 1,902,547,462,150,656,300 | 35.777778 | 139 | 0.550352 | false |
avlach/univbris-ocf | optin_manager/src/python/openflow/optin_manager/auto_approval_scripts/approve_sender_ip.py | 4 | 3540 | from openflow.optin_manager.flowspace.utils import *
def approve(request_info):
'''
The function for auto-approving all the user flowspace requests.
@param request_info: a dictionary containing the following key-value pairs:
@key req_ip_addr_src: @value: requested source IP address x.x.x.x - string
@key req_ip_addr_dst: @value: requested destination IP address x.x.x.x - string
@key req_mac_addr_src: @value: requested source MAC address xx:xx:xx:xx:xx:xx - string
@key req_mac_addr_dst: @value: requested destination MAC address xx:xx:xx:xx:xx:xx - string
@key user_last_name: @value: the last name of user - string
@key user_first_name: @value: the first name of user - string
@key user_email: @value: email address of the user - string
@key remote_addr: @value: the IP address of the user when requesting the flowpsace (x.x.x.x) - string
@return: An array of dictionaries that specifies the limitations on approved flowpsaces.
Each element in the returned array is treated as one approved flowspace.
each dictionary has the following possible key-value pairs:
@key: can be any of the following strings, as in a FlowSpace object:
"mac_src_s", "mac_src_e"
"mac_dst_s", "mac_dst_e"
"eth_type_s", "eth_type_e"
"vlan_id_s", "vlan_id_e"
"ip_src_s", "ip_src_e"
"ip_dst_s", "ip_dst_e"
"ip_proto_s", "ip_proto_e"
"tp_src_s", "tp_src_e"
"tp_dst_s", "tp_dst_e"
@value: should be an integer in the correct range for each field
@note: The approved flowpscae should be a strict subset of requested flowspace.
@note: To POSTPONE the decision for a manual consideration, return None
@note: to REJECT/Approve NOTHING return an empty array
'''
if "remote_addr" not in request_info:
return []
if ("req_ip_addr_src" in request_info and
dotted_ip_to_int(request_info["req_ip_addr_src"]) !=
dotted_ip_to_int(request_info["remote_addr"])):
return []
if ("req_ip_addr_dst" in request_info and
dotted_ip_to_int(request_info["req_ip_addr_dst"]) !=
dotted_ip_to_int(request_info["remote_addr"])):
return []
if ("req_mac_addr_src" in request_info) and ("req_ip_addr_src" not in request_info):
request_info["req_ip_addr_src"] = request_info["remote_addr"]
if ("req_mac_addr_dst" in request_info) and ("req_ip_addr_dst" not in request_info):
request_info["req_ip_addr_dst"] = request_info["remote_addr"]
return_array =[{}]
if "req_ip_addr_src" in request_info:
return_array[0]["ip_src_s"] = dotted_ip_to_int(request_info["req_ip_addr_src"])
return_array[0]["ip_src_e"] = dotted_ip_to_int(request_info["req_ip_addr_src"])
if "req_ip_addr_dst" in request_info:
return_array[0]["ip_dst_s"] = dotted_ip_to_int(request_info["req_ip_addr_dst"])
return_array[0]["ip_dst_e"] = dotted_ip_to_int(request_info["req_ip_addr_dst"])
if "req_mac_addr_src" in request_info:
return_array[0]["mac_src_s"] = mac_to_int(request_info["req_mac_addr_src"])
return_array[0]["mac_src_e"] = mac_to_int(request_info["req_mac_addr_src"])
if "req_mac_addr_dst" in request_info:
return_array[0]["mac_dst_s"] = mac_to_int(request_info["req_mac_addr_dst"])
return_array[0]["mac_dst_e"] = mac_to_int(request_info["req_mac_addr_dst"])
return return_array
| bsd-3-clause | 9,173,703,786,422,742,000 | 50.304348 | 109 | 0.627119 | false |
chinmaygarde/mojo | testing/legion/rpc_server.py | 13 | 4291 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The task RPC server code.
This server is an XML-RPC server which serves code from
rpc_methods.RPCMethods.
This server will run until shutdown is called on the server object. This can
be achieved in 2 ways:
- Calling the Quit RPC method defined in RPCMethods
- Not receiving any calls within the idle_timeout_secs time.
"""
import logging
import threading
import time
import xmlrpclib
import SimpleXMLRPCServer
import SocketServer
#pylint: disable=relative-import
import common_lib
import rpc_methods
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
"""Restricts access to only specified IP address.
This call assumes the server is RPCServer.
"""
def do_POST(self):
"""Verifies the task is authorized to perform RPCs."""
if self.client_address[0] != self.server.authorized_address:
logging.error('Received unauthorized RPC request from %s',
self.task_address[0])
self.send_response(403)
response = 'Forbidden'
self.send_header('Content-type', 'text/plain')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(response)
else:
return SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.do_POST(self)
class RPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer,
SocketServer.ThreadingMixIn):
"""Restricts all endpoints to only specified IP addresses."""
def __init__(self, authorized_address,
idle_timeout_secs=common_lib.DEFAULT_TIMEOUT_SECS):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(
self, (common_lib.SERVER_ADDRESS, common_lib.SERVER_PORT),
allow_none=True, logRequests=False,
requestHandler=RequestHandler)
self.authorized_address = authorized_address
self.idle_timeout_secs = idle_timeout_secs
self.register_instance(rpc_methods.RPCMethods(self))
self._shutdown_requested_event = threading.Event()
self._rpc_received_event = threading.Event()
self._idle_thread = threading.Thread(target=self._CheckForIdleQuit)
def shutdown(self):
"""Shutdown the server.
This overloaded method sets the _shutdown_requested_event to allow the
idle timeout thread to quit.
"""
self._shutdown_requested_event.set()
SimpleXMLRPCServer.SimpleXMLRPCServer.shutdown(self)
logging.info('Server shutdown complete')
def serve_forever(self, poll_interval=0.5):
"""Serve forever.
This overloaded method starts the idle timeout thread before calling
serve_forever. This ensures the idle timer thread doesn't get started
without the server running.
Args:
poll_interval: The interval to poll for shutdown.
"""
logging.info('RPC server starting')
self._idle_thread.start()
SimpleXMLRPCServer.SimpleXMLRPCServer.serve_forever(self, poll_interval)
def _dispatch(self, method, params):
"""Dispatch the call to the correct method with the provided params.
This overloaded method adds logging to help trace connection and
call problems.
Args:
method: The method name to call.
params: A tuple of parameters to pass.
Returns:
The result of the parent class' _dispatch method.
"""
logging.debug('Calling %s%s', method, params)
self._rpc_received_event.set()
return SimpleXMLRPCServer.SimpleXMLRPCServer._dispatch(self, method, params)
def _CheckForIdleQuit(self):
"""Check for, and exit, if the server is idle for too long.
This method must be run in a separate thread to avoid a deadlock when
calling server.shutdown.
"""
timeout = time.time() + self.idle_timeout_secs
while time.time() < timeout:
if self._shutdown_requested_event.is_set():
# An external source called shutdown()
return
elif self._rpc_received_event.is_set():
logging.debug('Resetting the idle timeout')
timeout = time.time() + self.idle_timeout_secs
self._rpc_received_event.clear()
time.sleep(1)
# We timed out, kill the server
logging.warning('Shutting down the server due to the idle timeout')
self.shutdown()
| bsd-3-clause | 8,067,291,218,343,199,000 | 32.523438 | 80 | 0.710324 | false |
mickele77/FreeCAD | src/Mod/Path/PathScripts/PathPocket.py | 7 | 14268 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2014 Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,Path
from PySide import QtCore,QtGui
from PathScripts import PathUtils
FreeCADGui = None
if FreeCAD.GuiUp:
import FreeCADGui
"""Path Pocket object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
def frange(start, stop, step, finish):
x = []
curdepth = start
if step == 0:
return x
# do the base cuts until finishing round
while curdepth >= stop + step + finish:
curdepth = curdepth - step
if curdepth <= stop + finish:
curdepth = stop + finish
x.append(curdepth)
# we might have to do a last pass or else finish round might be too far away
if curdepth - stop > finish:
x.append(stop + finish)
# do the the finishing round
if curdepth >= stop:
curdepth = stop
x.append(curdepth)
# Why this?
# if start >= stop:
# start = stop
# x.append (start)
return x
class ObjectPocket:
def __init__(self,obj):
obj.addProperty("App::PropertyLinkSub","Base","Path",translate("PathProject","The base geometry of this object"))
obj.addProperty("App::PropertyIntegerConstraint","ToolNumber","Tool",
translate("PathProfile","The tool number in use"))
obj.ToolNumber = (0, 0, 1000, 0)
obj.addProperty("App::PropertyFloat", "ClearanceHeight", "Pocket", translate("PathProject","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyFloatConstraint", "StepDown", "Pocket", translate("PathProject","Incremental Step Down of Tool"))
obj.StepDown = (0.0, 0.0, 100.0, 1.0)
obj.addProperty("App::PropertyFloat", "StartDepth", "Pocket", translate("PathProject","Starting Depth of Tool- first cut depth in Z"))
obj.addProperty("App::PropertyBool","UseStartDepth","Pocket",translate("PathProject","make True, if manually specifying a Start Start Depth"))
obj.addProperty("App::PropertyFloat", "FinalDepth", "Pocket", translate("PathProject","Final Depth of Tool- lowest value in Z"))
obj.addProperty("App::PropertyFloat", "RetractHeight", "Pocket", translate("PathProject","The height desired to retract tool when path is finished"))
obj.addProperty("App::PropertyEnumeration", "CutMode", "Pocket",translate("PathProject", "The direction that the toolpath should go around the part ClockWise CW or CounterClockWise CCW"))
obj.CutMode = ['Climb','Conventional']
obj.addProperty("App::PropertyFloat", "MaterialAllowance", "Pocket", translate("PathProject","Amount of material to leave"))
obj.addProperty("App::PropertyFloat", "FinishDepth", "Pocket", translate("PathProject","Maximum material removed on final pass."))
obj.addProperty("App::PropertyEnumeration", "StartAt", "Pocket",translate("PathProject", "Start pocketing at center or boundary"))
obj.StartAt = ['Center', 'Edge']
obj.addProperty("App::PropertyFloatConstraint", "VertFeed", "Feed",translate("Vert Feed","Feed rate for vertical moves in Z"))
obj.VertFeed = (0.0, 0.0, 100000.0, 1.0)
obj.addProperty("App::PropertyFloatConstraint", "HorizFeed", "Feed",translate("Horiz Feed","Feed rate for horizontal moves"))
obj.HorizFeed = (0.0, 0.0, 100000.0, 1.0)
obj.addProperty("App::PropertyBool","Active","Path",translate("PathProject","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyString","Comment","Path",translate("PathProject","An optional comment for this profile"))
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def getStock(self,obj):
"retrieves a stock object from hosting project if any"
for o in obj.InList:
if hasattr(o,"Group"):
for g in o.Group:
if hasattr(g,"Height_Allowance"):
return o
# not found? search one level up
for o in obj.InList:
return self.getStock(o)
return None
def execute(self,obj):
if obj.Base:
tool = PathUtils.getLastTool(obj)
if tool:
radius = tool.Diameter/2
if radius < 0:# safe guard
radius -= radius
else:
# temporary value, to be taken from the properties later on
radius = 1
import Part, DraftGeomUtils
if "Face" in obj.Base[1][0]:
shape = getattr(obj.Base[0].Shape,obj.Base[1][0])
else:
edges = [getattr(obj.Base[0].Shape,sub) for sub in obj.Base[1]]
shape = Part.Wire(edges)
print len(edges)
# absolute coords, millimeters, cancel offsets
output = "G90\nG21\nG40\n"
# save tool
if obj.ToolNumber > 0 and tool.ToolNumber != obj.ToolNumber:
output += "M06 T" + str(tool.ToolNumber) + "\n"
# build offsets
offsets = []
nextradius = radius
result = DraftGeomUtils.pocket2d(shape,nextradius)
while result:
offsets.extend(result)
nextradius += radius
result = DraftGeomUtils.pocket2d(shape,nextradius)
# first move will be rapid, subsequent will be at feed rate
first = True
startPoint = None
fastZPos = max(obj.StartDepth + 2, obj.RetractHeight)
# revert the list so we start with the outer wires
if obj.StartAt != 'Edge':
offsets.reverse()
# print "startDepth: " + str(obj.StartDepth)
# print "finalDepth: " + str(obj.FinalDepth)
# print "stepDown: " + str(obj.StepDown)
# print "finishDepth" + str(obj.FinishDepth)
# print "offsets:", len(offsets)
def prnt(vlu): return str(round(vlu, 4))
for vpos in frange(obj.StartDepth, obj.FinalDepth, obj.StepDown, obj.FinishDepth):
# print "vpos: " + str(vpos)
# loop over successive wires
for currentWire in offsets:
# print "new line (offset)"
last = None
for edge in currentWire.Edges:
# print "new edge"
if not last:
# we set the base GO to our fast move to our starting pos
if first:
startPoint = edge.Vertexes[0].Point
output += "G0 X" + prnt(startPoint.x) + " Y" + prnt(startPoint.y) +\
" Z" + prnt(fastZPos) + "\n"
first = False
#then move slow down to our starting point for our profile
last = edge.Vertexes[0].Point
output += "G1 X" + prnt(last.x) + " Y" + prnt(last.y) + " Z" + prnt(vpos) + "\n"
if isinstance(edge.Curve,Part.Circle):
point = edge.Vertexes[-1].Point
if point == last: # edges can come flipped
point = edge.Vertexes[0].Point
# print "flipped"
center = edge.Curve.Center
relcenter = center.sub(last)
v1 = last.sub(center)
v2 = point.sub(center)
if v1.cross(v2).z < 0:
output += "G2"
else:
output += "G3"
output += " X" + prnt(point.x) + " Y" + prnt(point.y) + " Z" + prnt(vpos)
output += " I" + prnt(relcenter.x) + " J" +prnt(relcenter.y) + " K" + prnt(relcenter.z)
output += "\n"
last = point
else:
point = edge.Vertexes[-1].Point
if point == last: # edges can come flipped
point = edge.Vertexes[0].Point
output += "G1 X" + prnt(point.x) + " Y" + prnt(point.y) + " Z" + prnt(vpos) + "\n"
last = point
#move back up
output += "G1 Z" + prnt(fastZPos) + "\n"
# print output
# path = Path.Path(output)
# obj.Path = path
if obj.Active:
path = Path.Path(output)
obj.Path = path
obj.ViewObject.Visibility = True
else:
path = Path.Path("(inactive operation)")
obj.Path = path
obj.ViewObject.Visibility = False
class ViewProviderPocket:
def __init__(self,vobj):
vobj.Proxy = self
def attach(self,vobj):
self.Object = vobj.Object
return
def getIcon(self):
return ":/icons/Path-Pocket.svg"
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class CommandPathPocket:
def GetResources(self):
return {'Pixmap' : 'Path-Pocket',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathPocket","Pocket"),
'Accel': "P, O",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathPocket","Creates a Path Pocket object from a loop of edges or a face")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelectionEx()
if len(selection) != 1:
FreeCAD.Console.PrintError(translate("PathPocket","Please select an edges loop from one object, or a single face\n"))
return
if len(selection[0].SubObjects) == 0:
FreeCAD.Console.PrintError(translate("PathPocket","Please select an edges loop from one object, or a single face\n"))
return
for s in selection[0].SubObjects:
if s.ShapeType != "Edge":
if (s.ShapeType != "Face") or (len(selection[0].SubObjects) != 1):
FreeCAD.Console.PrintError(translate("PathPocket","Please select only edges or a single face\n"))
return
if selection[0].SubObjects[0].ShapeType == "Edge":
try:
import Part
w = Part.Wire(selection[0].SubObjects)
except:
FreeCAD.Console.PrintError(translate("PathPocket","The selected edges don't form a loop\n"))
return
# if everything is ok, execute and register the transaction in the undo/redo stack
FreeCAD.ActiveDocument.openTransaction(translate("PathPocket","Create Pocket"))
FreeCADGui.addModule("PathScripts.PathPocket")
FreeCADGui.doCommand('prjexists = False')
FreeCADGui.doCommand('obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Pocket")')
FreeCADGui.doCommand('PathScripts.PathPocket.ObjectPocket(obj)')
FreeCADGui.doCommand('PathScripts.PathPocket.ViewProviderPocket(obj.ViewObject)')
subs = "["
for s in selection[0].SubElementNames:
subs += '"' + s + '",'
subs += "]"
FreeCADGui.doCommand('obj.Base = (FreeCAD.ActiveDocument.' + selection[0].ObjectName + ',' + subs + ')')
FreeCADGui.doCommand('obj.Active = True')
snippet = '''
from PathScripts import PathUtils
PathUtils.addToProject(obj)
ZMax = obj.Base[0].Shape.BoundBox.ZMax
ZMin = obj.Base[0].Shape.BoundBox.ZMin
obj.StepDown = 1.0
obj.StartDepth = ZMax
obj.FinalDepth = ZMin
obj.ClearanceHeight = ZMax + 5.0
'''
FreeCADGui.doCommand(snippet)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Pocket',CommandPathPocket())
FreeCAD.Console.PrintLog("Loading PathPocket... done\n")
| lgpl-2.1 | 3,329,035,247,245,390,300 | 42.367781 | 195 | 0.540861 | false |
yourlabs/django-cities-light | src/cities_light/receivers.py | 1 | 5888 | from django.db.models import signals
from .abstract_models import to_ascii, to_search
from .settings import *
from .signals import *
from .exceptions import *
def set_name_ascii(sender, instance=None, **kwargs):
"""
Signal reciever that sets instance.name_ascii from instance.name.
Ascii versions of names are often useful for autocompletes and search.
"""
name_ascii = to_ascii(instance.name).strip()
if name_ascii and not instance.name_ascii:
instance.name_ascii = name_ascii
def set_display_name(sender, instance=None, **kwargs):
"""
Set instance.display_name to instance.get_display_name(), avoid spawning
queries during __str__().
"""
instance.display_name = instance.get_display_name()
def city_country(sender, instance, **kwargs):
if instance.region_id and not instance.country_id:
instance.country = instance.region.country
def city_search_names(sender, instance, **kwargs):
search_names = set()
country_names = {instance.country.name, }
if instance.country.alternate_names:
for n in instance.country.alternate_names.split(';'):
country_names.add(n)
city_names = {instance.name, }
if instance.alternate_names:
for n in instance.alternate_names.split(';'):
city_names.add(n)
if instance.region_id:
region_names = {instance.region.name, }
if instance.region.alternate_names:
for n in instance.region.alternate_names.split(';'):
region_names.add(n)
else:
region_names = set()
for city_name in city_names:
for country_name in country_names:
name = to_search(city_name + country_name)
search_names.add(name)
for region_name in region_names:
name = to_search(city_name + region_name + country_name)
search_names.add(name)
instance.search_names = ' '.join(sorted(search_names))
def connect_default_signals(model_class):
"""
Use this function to connect default signals to your custom model.
It is called automatically, if default cities_light models are used,
i.e. settings `CITIES_LIGHT_APP_NAME` is not changed.
"""
if 'Country' in model_class.__name__:
signals.pre_save.connect(set_name_ascii, sender=model_class)
elif 'SubRegion' in model_class.__name__:
signals.pre_save.connect(set_name_ascii, sender=model_class)
signals.pre_save.connect(set_display_name, sender=model_class)
elif 'Region' in model_class.__name__:
signals.pre_save.connect(set_name_ascii, sender=model_class)
signals.pre_save.connect(set_display_name, sender=model_class)
elif 'City' in model_class.__name__:
signals.pre_save.connect(set_name_ascii, sender=model_class)
signals.pre_save.connect(set_display_name, sender=model_class)
signals.pre_save.connect(city_country, sender=model_class)
signals.pre_save.connect(city_search_names, sender=model_class)
def filter_non_cities(sender, items, **kwargs):
"""
Exclude any **city** which feature code must not be included.
By default, this receiver is connected to
:py:func:`~cities_light.signals.city_items_pre_import`, it raises
:py:class:`~cities_light.exceptions.InvalidItems` if the row feature code
is not in the :py:data:`~cities_light.settings.INCLUDE_CITY_TYPES` setting.
"""
if items[7] not in INCLUDE_CITY_TYPES:
raise InvalidItems()
city_items_pre_import.connect(filter_non_cities)
def filter_non_included_countries_country(sender, items, **kwargs):
"""
Exclude any **country** which country must not be included.
This is slot is connected to the
:py:func:`~cities_light.signals.country_items_pre_import` signal and does
nothing by default. To enable it, set the
:py:data:`~cities_light.settings.INCLUDE_COUNTRIES` setting.
"""
if INCLUDE_COUNTRIES is None:
return
if items[0].split('.')[0] not in INCLUDE_COUNTRIES:
raise InvalidItems()
country_items_pre_import.connect(filter_non_included_countries_country)
def filter_non_included_countries_region(sender, items, **kwargs):
"""
Exclude any **region** which country must not be included.
This is slot is connected to the
:py:func:`~cities_light.signals.region_items_pre_import` signal and does
nothing by default. To enable it, set the
:py:data:`~cities_light.settings.INCLUDE_COUNTRIES` setting.
"""
if INCLUDE_COUNTRIES is None:
return
if items[0].split('.')[0] not in INCLUDE_COUNTRIES:
raise InvalidItems()
region_items_pre_import.connect(filter_non_included_countries_region)
def filter_non_included_countries_subregion(sender, items, **kwargs):
"""
Exclude any **subregion** which country must not be included.
This is slot is connected to the
:py:func:`~cities_light.signals.subregion_items_pre_import` signal and does
nothing by default. To enable it, set the
:py:data:`~cities_light.settings.INCLUDE_COUNTRIES` setting.
"""
if INCLUDE_COUNTRIES is None:
return
if items[0].split('.')[0] not in INCLUDE_COUNTRIES:
raise InvalidItems()
subregion_items_pre_import.connect(filter_non_included_countries_subregion)
def filter_non_included_countries_city(sender, items, **kwargs):
"""
Exclude any **city** which country must not be included.
This is slot is connected to the
:py:func:`~cities_light.signals.city_items_pre_import` signal and does
nothing by default. To enable it, set the
:py:data:`~cities_light.settings.INCLUDE_COUNTRIES` setting.
"""
if INCLUDE_COUNTRIES is None:
return
if items[8].split('.')[0] not in INCLUDE_COUNTRIES:
raise InvalidItems()
city_items_pre_import.connect(filter_non_included_countries_city)
| mit | 2,927,385,031,341,265,400 | 33.432749 | 79 | 0.678329 | false |
codekaki/odoo | addons/project_timesheet/project_timesheet.py | 13 | 15612 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.osv import fields, osv
from openerp import pooler
from openerp import tools
from openerp.tools.translate import _
class project_project(osv.osv):
_inherit = 'project.project'
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
res = super(project_project, self).onchange_partner_id(cr, uid, ids, part, context)
if part and res and ('value' in res):
# set Invoice Task Work to 100%
data_obj = self.pool.get('ir.model.data')
data_id = data_obj._get_id(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
if data_id:
factor_id = data_obj.browse(cr, uid, data_id).res_id
res['value'].update({'to_invoice': factor_id})
return res
_defaults = {
'use_timesheets': True,
}
def open_timesheets(self, cr, uid, ids, context=None):
""" open Timesheets view """
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
project = self.browse(cr, uid, ids[0], context)
view_context = {
'search_default_account_id': [project.analytic_account_id.id],
'default_account_id': project.analytic_account_id.id,
}
help = _("""<p class="oe_view_nocontent_create">Record your timesheets for the project '%s'.</p>""") % (project.name,)
try:
if project.to_invoice and project.partner_id:
help+= _("""<p>Timesheets on this project may be invoiced to %s, according to the terms defined in the contract.</p>""" ) % (project.partner_id.name,)
except:
# if the user do not have access rights on the partner
pass
res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['name'] = _('Timesheets')
result['context'] = view_context
result['help'] = help
return result
project_project()
class project_work(osv.osv):
_inherit = "project.task.work"
def get_user_related_details(self, cr, uid, user_id):
res = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)])
if not emp_id:
user_name = self.pool.get('res.users').read(cr, uid, [user_id], ['name'])[0]['name']
raise osv.except_osv(_('Bad Configuration!'),
_('Please define employee for user "%s". You must create one.')% (user_name,))
emp = emp_obj.browse(cr, uid, emp_id[0])
if not emp.product_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings tab of the employee form.'))
if not emp.journal_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define journal on the related employee.\nFill in the timesheet tab of the employee form.'))
acc_id = emp.product_id.property_account_expense.id
if not acc_id:
acc_id = emp.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the timesheet tab of the employee form.'))
res['product_id'] = emp.product_id.id
res['journal_id'] = emp.journal_id.id
res['general_account_id'] = acc_id
res['product_uom_id'] = emp.product_id.uom_id.id
return res
def _create_analytic_entries(self, cr, uid, vals, context):
"""Create the hr analytic timesheet from project task work"""
timesheet_obj = self.pool['hr.analytic.timesheet']
task_obj = self.pool['project.task']
vals_line = {}
timeline_id = False
acc_id = False
task_obj = task_obj.browse(cr, uid, vals['task_id'], context=context)
result = self.get_user_related_details(cr, uid, vals.get('user_id', uid))
vals_line['name'] = '%s: %s' % (tools.ustr(task_obj.name), tools.ustr(vals['name'] or '/'))
vals_line['user_id'] = vals['user_id']
vals_line['product_id'] = result['product_id']
if vals.get('date'):
timestamp = datetime.datetime.strptime(vals['date'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
vals_line['date'] = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
# Calculate quantity based on employee's product's uom
vals_line['unit_amount'] = vals['hours']
default_uom = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.project_time_mode_id.id
if result['product_uom_id'] != default_uom:
vals_line['unit_amount'] = self.pool['product.uom']._compute_qty(cr, uid, default_uom, vals['hours'], result['product_uom_id'])
acc_id = task_obj.project_id and task_obj.project_id.analytic_account_id.id or acc_id
if acc_id:
vals_line['account_id'] = acc_id
res = timesheet_obj.on_change_account_id(cr, uid, False, acc_id)
if res.get('value'):
vals_line.update(res['value'])
vals_line['general_account_id'] = result['general_account_id']
vals_line['journal_id'] = result['journal_id']
vals_line['amount'] = 0.0
vals_line['product_uom_id'] = result['product_uom_id']
amount = vals_line['unit_amount']
prod_id = vals_line['product_id']
unit = False
timeline_id = timesheet_obj.create(cr, uid, vals=vals_line, context=context)
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, timeline_id,
prod_id, amount, False, unit, vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
updv = { 'amount': amount_unit['value']['amount'] }
timesheet_obj.write(cr, uid, [timeline_id], updv, context=context)
return timeline_id
def create(self, cr, uid, vals, *args, **kwargs):
context = kwargs.get('context', {})
if not context.get('no_analytic_entry',False):
vals['hr_analytic_timesheet_id'] = self._create_analytic_entries(cr, uid, vals, context=context)
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
"""
When a project task work gets updated, handle its hr analytic timesheet.
"""
if context is None:
context = {}
timesheet_obj = self.pool.get('hr.analytic.timesheet')
uom_obj = self.pool.get('product.uom')
result = {}
if isinstance(ids, (long, int)):
ids = [ids]
for task in self.browse(cr, uid, ids, context=context):
line_id = task.hr_analytic_timesheet_id
if not line_id:
# if a record is deleted from timesheet, the line_id will become
# null because of the foreign key on-delete=set null
continue
vals_line = {}
if 'name' in vals:
vals_line['name'] = '%s: %s' % (tools.ustr(task.task_id.name), tools.ustr(vals['name'] or '/'))
if 'user_id' in vals:
vals_line['user_id'] = vals['user_id']
if 'date' in vals:
vals_line['date'] = vals['date'][:10]
if 'hours' in vals:
vals_line['unit_amount'] = vals['hours']
prod_id = vals_line.get('product_id', line_id.product_id.id) # False may be set
# Put user related details in analytic timesheet values
details = self.get_user_related_details(cr, uid, vals.get('user_id', task.user_id.id))
for field in ('product_id', 'general_account_id', 'journal_id', 'product_uom_id'):
if details.get(field, False):
vals_line[field] = details[field]
# Check if user's default UOM differs from product's UOM
user_default_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id.id
if details.get('product_uom_id', False) and details['product_uom_id'] != user_default_uom_id:
vals_line['unit_amount'] = uom_obj._compute_qty(cr, uid, user_default_uom_id, vals['hours'], details['product_uom_id'])
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, line_id.id,
prod_id=prod_id, company_id=False,
unit_amount=vals_line['unit_amount'], unit=False, journal_id=vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
vals_line['amount'] = amount_unit['value']['amount']
if vals_line:
self.pool.get('hr.analytic.timesheet').write(cr, uid, [line_id.id], vals_line, context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, *args, **kwargs):
hat_obj = self.pool.get('hr.analytic.timesheet')
hat_ids = []
for task in self.browse(cr, uid, ids):
if task.hr_analytic_timesheet_id:
hat_ids.append(task.hr_analytic_timesheet_id.id)
# Delete entry from timesheet too while deleting entry to task.
if hat_ids:
hat_obj.unlink(cr, uid, hat_ids, *args, **kwargs)
return super(project_work,self).unlink(cr, uid, ids, *args, **kwargs)
_columns={
'hr_analytic_timesheet_id':fields.many2one('hr.analytic.timesheet','Related Timeline Id', ondelete='set null'),
}
project_work()
class task(osv.osv):
_inherit = "project.task"
def unlink(self, cr, uid, ids, *args, **kwargs):
for task_obj in self.browse(cr, uid, ids, *args, **kwargs):
if task_obj.work_ids:
work_ids = [x.id for x in task_obj.work_ids]
self.pool.get('project.task.work').unlink(cr, uid, work_ids, *args, **kwargs)
return super(task,self).unlink(cr, uid, ids, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
task_work_obj = self.pool['project.task.work']
acc_id = False
missing_analytic_entries = {}
if vals.get('project_id',False) or vals.get('name',False):
vals_line = {}
hr_anlytic_timesheet = self.pool.get('hr.analytic.timesheet')
if vals.get('project_id',False):
project_obj = self.pool.get('project.project').browse(cr, uid, vals['project_id'], context=context)
acc_id = project_obj.analytic_account_id.id
for task_obj in self.browse(cr, uid, ids, context=context):
if len(task_obj.work_ids):
for task_work in task_obj.work_ids:
if not task_work.hr_analytic_timesheet_id:
if acc_id :
# missing timesheet activities to generate
missing_analytic_entries[task_work.id] = {
'name' : task_work.name,
'user_id' : task_work.user_id.id,
'date' : task_work.date and task_work.date[:10] or False,
'account_id': acc_id,
'hours' : task_work.hours,
'task_id' : task_obj.id
}
continue
line_id = task_work.hr_analytic_timesheet_id.id
if vals.get('project_id',False):
vals_line['account_id'] = acc_id
if vals.get('name',False):
vals_line['name'] = '%s: %s' % (tools.ustr(vals['name']), tools.ustr(task_work.name) or '/')
hr_anlytic_timesheet.write(cr, uid, [line_id], vals_line, {})
res = super(task,self).write(cr, uid, ids, vals, context)
for task_work_id, analytic_entry in missing_analytic_entries.items():
timeline_id = task_work_obj._create_analytic_entries(cr, uid, analytic_entry, context=context)
task_work_obj.write(cr, uid, task_work_id, {'hr_analytic_timesheet_id' : timeline_id}, context=context)
return res
task()
class res_partner(osv.osv):
_inherit = 'res.partner'
def unlink(self, cursor, user, ids, context=None):
parnter_id=self.pool.get('project.project').search(cursor, user, [('partner_id', 'in', ids)])
if parnter_id:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a partner which is assigned to project, but you can uncheck the active box.'))
return super(res_partner,self).unlink(cursor, user, ids,
context=context)
res_partner()
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def get_product(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
employee = emp_obj.browse(cr, uid, emp_ids, context=context)[0]
if employee.product_id:return employee.product_id.id
return False
_defaults = {'product_id': get_product,}
def on_change_account_id(self, cr, uid, ids, account_id):
res = {}
if not account_id:
return res
res.setdefault('value',{})
acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id)
st = acc.to_invoice.id
res['value']['to_invoice'] = st or False
if acc.state == 'close' or acc.state == 'cancelled':
raise osv.except_osv(_('Invalid Analytic Account!'), _('You cannot select a Analytic Account which is in Close or Cancelled state.'))
return res
account_analytic_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,814,952,488,180,640,000 | 46.166163 | 166 | 0.572957 | false |
qilicun/python | python2/PyMOTW-1.132/PyMOTW/glob/glob_extension.py | 1 | 1230 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Using glob to find files matching a pattern with a filename extension.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import glob
for name in glob.glob('*.py'):
print name
| gpl-3.0 | -6,245,000,569,980,625,000 | 33.166667 | 73 | 0.754472 | false |
tectronics/gsiege | gsiege.py | 3 | 2769 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of Resistencia Cadiz 1812. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Pablo Recio Quijano, <[email protected]> #
# Copyright (C) 2008-2009 Adam Olsen (Exaile project) #
###############################################################################
###############################################################################
# The following execution script was modified from the Exaile project, using
# it as base for the construction of this application.
# To get the original version see <http://www.exaile.org/>
###############################################################################
"""
This file includes the main execution of the application
"""
import sys
import os
import os.path
# Si estamos en un sistema con kernel GNU/Linux
if 'linux' in sys.platform:
# Biblioteca para trabajar con tipos de datos de C
import ctypes
# Carga la biblioteca de C
__libc__ = ctypes.CDLL('libc.so.6')
__libc__.prctl(15, 'gsiege', 0, 0, 0)
__basedir__ = os.path.dirname(os.path.realpath(__file__))
# WTF?
if not os.path.exists(os.path.join(__basedir__, __file__)):
__cwd__ = os.getcwd()
if os.path.exists(os.path.join(__cwd__, __file__)):
__basedir__ = __cwd__
sys.path.insert(0, __basedir__)
def main_execution():
"""
This function intializes the gsiege evironment
"""
from resistencia import main
main.main()
if __name__ == "__main__":
main_execution()
# vim: et sts=4 sw=4
| gpl-3.0 | 429,964,579,170,997,700 | 39.720588 | 79 | 0.470928 | false |
jhunkeler/hstcal | tests/wfc3/test_uvis_12single.py | 1 | 1098 | import subprocess
import pytest
from ..helpers import BaseWFC3
class TestUVIS12Single(BaseWFC3):
"""
Test pos UVIS2 BIAS data
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vt'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname),
'{}_flt_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_12``.
@pytest.mark.parametrize(
'rootname', ['iaao09l0q'])
# 'rootname', ['iaao09l0q',
# 'iaao09l1q',
# 'iaao11odq',
# 'iaao11oeq',
# 'ibbq01n4q',
# 'ibbq01n5q',
# 'iblk57bzq',
# 'iblk57c0q',
# 'iblk57c3q'])
def test_uvis_12single(self, rootname):
self._single_raw_calib(rootname)
| bsd-3-clause | -3,015,081,682,545,373,000 | 25.780488 | 56 | 0.495446 | false |
miminus/youtube-dl | youtube_dl/extractor/generic.py | 8 | 76028 | # encoding: utf-8
from __future__ import unicode_literals
import os
import re
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
orderedSet,
parse_xml,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
url_basename,
xpath_text,
)
from .brightcove import BrightcoveIE
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxEmbedIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .bliptv import BlipTVIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .vimeo import VimeoIE
from .dailymotion import DailymotionCloudIE
from .onionstudios import OnionStudiosIE
from .snagfilms import SnagFilmsEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented'
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': 're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
{
'add_ie': ['Brightcove'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/rg3/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['Brightcove'],
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/rg3/youtube-dl/issues/3541
'add_ie': ['Brightcove'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
},
'params': {
'skip_download': True,
},
},
# multiple ooyala embeds on SBN network websites
{
'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
'info_dict': {
'id': 'national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
'title': '25 lies you will tell yourself on National Signing Day - SBNation.com',
},
'playlist_mincount': 3,
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Вести Экономика ― Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embeded Ustream video
{
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417',
'info_dict': {
'id': '45734260',
'ext': 'flv',
'uploader': 'AU SPA: The NSA and Privacy',
'title': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman'
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
}
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'uploader': 'Spi0n',
},
'add_ie': ['Dailymotion'],
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.gametrailers.com/news-post/76093/north-america-europe-is-getting-that-mario-kart-8-mercedes-dlc-too',
'md5': '35727f82f58c76d996fc188f9755b0d5',
'info_dict': {
'id': '0306a69b-8adf-4fb5-aace-75f8e8cbfca9',
'ext': 'mp4',
'title': 'Review',
'description': 'Mario\'s life in the fast lane has never looked so good.',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/rg3/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '8788b683c777a5cf25621eaf286d0c23',
'info_dict': {
'id': '1cfaf6b7ea',
'ext': 'mov',
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
'duration': 643.0,
'filesize': 182808282,
'uploader': 'education-portal.com',
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1 - ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
},
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# LazyYT
{
'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
'info_dict': {
'id': '1986',
'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
},
'playlist_mincount': 2,
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen | RTL Nieuws',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed
{
'url': 'http://www.monumentalnetwork.com/videos/john-carlson-postgame-2-25-15',
'info_dict': {
'id': '1_eergr3h1',
'ext': 'mp4',
'upload_date': '20150226',
'uploader_id': '[email protected]',
'timestamp': int,
'title': 'John Carlson Postgame 2/25/15',
},
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Eagle.Platform embed (generic URL)
{
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
},
# ClipYou (Eagle.Platform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': 're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
},
},
# SVT embed
{
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '2900353',
'ext': 'flv',
'title': 'Här trycker Jagr till Giroux (under SVT-intervjun)',
'duration': 27,
'age_limit': 0,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
},
},
# UDN embed
{
'url': 'http://www.udn.com/news/story/7314/822787',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': 're:^https?://.*\.jpg$',
}
},
# Ooyala embed
{
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
'description': 'VIDEO: Index/Match versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Contains a SMIL manifest
{
'url': 'http://www.telewebion.com/fa/1263668/%D9%82%D8%B1%D8%B9%D9%87%E2%80%8C%DA%A9%D8%B4%DB%8C-%D9%84%DB%8C%DA%AF-%D9%82%D9%87%D8%B1%D9%85%D8%A7%D9%86%D8%A7%D9%86-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7/%2B-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84.html',
'info_dict': {
'id': 'file',
'ext': 'flv',
'title': '+ Football: Lottery Champions League Europe',
'uploader': 'www.telewebion.com',
},
'params': {
# rtmpe downloads
'skip_download': True,
}
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
},
},
# Dailymotion Cloud video
{
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
'md5': '49444254273501a64675a7e68c502681',
'info_dict': {
'id': '5585de919473990de4bee11b',
'ext': 'mp4',
'title': 'Le débat',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
},
# OnionStudios embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '2855',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'thumbnail': 're:^https?://.*\.jpe?g$',
'uploader': 'ClickHole',
'uploader_id': 'clickhole',
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# ScreenwaveMedia embed
{
'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1',
'md5': '24ace5baba0d35d55c6810b51f34e9e0',
'info_dict': {
'id': 'cinemasnob-55d26273809dd',
'ext': 'mp4',
'title': 'cinemasnob',
},
}
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
continue
entries.append({
'_type': 'url',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return {
'_type': 'url',
'url': self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if '/' in url:
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = compat_urllib_request.Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '')
m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
if m:
upload_date = unified_strdate(
head_response.headers.get('Last-Modified'))
return {
'id': video_id,
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
'direct': True,
'formats': [{
'format_id': m.group('format_id'),
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}],
'upload_date': upload_date,
}
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = compat_urllib_request.Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
first_bytes = full_response.read(512)
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
upload_date = unified_strdate(
head_response.headers.get('Last-Modified'))
return {
'id': video_id,
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
'direct': True,
'url': url,
'upload_date': upload_date,
}
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file or a XSPF playlist?
try:
doc = parse_xml(webpage)
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
return self._parse_smil(doc, url, video_id)
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
# Look for BrightCove:
bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen('Brightcove video detected.')
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'Brightcove'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage)
if matches:
return _playlist_from_matches(matches, ie='RtlNl')
vimeo_url = VimeoIE._extract_vimeo_url(url, webpage)
if vimeo_url is not None:
return self.url_result(vimeo_url)
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(
r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for embedded Dailymotion player
matches = re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return _playlist_from_matches(
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
embed_url = self._proto_relative_url(
unescapeHTML(match.group('url')))
return {
'_type': 'url_transparent',
'url': embed_url,
'ie_key': 'Wistia',
'uploader': video_uploader,
'title': video_title,
'id': video_id,
}
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
'ie_key': 'Wistia',
'uploader': video_uploader,
'title': video_title,
'id': match.group('id')
}
# Look for embedded blip.tv player
bliptv_url = BlipTVIE._extract_url(webpage)
if bliptv_url:
return self.url_result(bliptv_url, 'BlipTV')
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage)
if matches:
return {
'_type': 'playlist',
'entries': [{
'_type': 'url',
'ie_key': 'CondeNast',
'url': ma,
} for ma in matches],
'title': video_title,
'id': video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
return OoyalaIE._build_url_result(mobj.group('ec'))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds, getter=lambda v: OoyalaIE._url_for_embed_code(v['provider_video_id']), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded NovaMov-based player
mobj = re.search(
r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1''', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Facebook player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Facebook')
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return _playlist_from_matches(
matches, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return _playlist_from_matches(matches, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
# Look for embedded PornHub player
pornhub_url = PornHubIE._extract_url(webpage)
if pornhub_url:
return self.url_result(pornhub_url, 'PornHub')
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed')
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ustream')
# Look for embedded arte.tv player
mobj = re.search(
r'<script [^>]*?src="(?P<url>http://www\.arte\.tv/playerv2/embed[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embeded soundcloud player
mobj = re.search(
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
return self.url_result(url)
# Look for embedded vulture.com player
mobj = re.search(
r'<iframe src="(?P<url>https?://video\.vulture\.com/[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
return self.url_result(url, ie='Vulture')
# Look for embedded mtvservices player
mobj = re.search(
r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
return self.url_result(url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) or
re.search(r'(?s)(["\'])(?:https?:)?//cdnapisec\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?\1.*?entry_id\s*:\s*(["\'])(?P<id>[^\2]+?)\2', webpage))
if mobj is not None:
return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura')
# Look for Eagle.Platform embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://.+?\.media\.eagleplatform\.com/index/player\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'EaglePlatform')
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Pladform')
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for SnagFilms embeds
snagfilms_url = SnagFilmsEmbedIE._extract_url(webpage)
if snagfilms_url:
return self.url_result(snagfilms_url)
# Look for ScreenwaveMedia embeds
mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
if mobj is not None:
return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
if not found:
raise UnsupportedError(url)
entries = []
for video_url in found:
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
ext = determine_ext(video_url)
if ext == 'smil':
entries.append({
'id': video_id,
'formats': self._extract_smil_formats(video_url, video_id),
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
})
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
else:
entries.append({
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
})
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
| unlicense | 6,896,504,099,982,509,000 | 39.562466 | 255 | 0.491798 | false |
vperron/sentry | src/sentry/api/client.py | 11 | 2049 | from __future__ import absolute_import
__all__ = ('ApiClient',)
from django.core.urlresolvers import resolve
from rest_framework.test import APIRequestFactory, force_authenticate
from sentry.utils import json
class ApiError(Exception):
def __init__(self, status_code, body):
self.status_code = status_code
self.body = body
def __unicode__(self):
return 'status=%s body=%s' % (self.status_code, self.body)
class ApiClient(object):
prefix = '/api/0'
ApiError = ApiError
def request(self, method, path, user, auth=None, params=None, data=None,
is_sudo=False):
full_path = self.prefix + path
resolver_match = resolve(full_path)
callback, callback_args, callback_kwargs = resolver_match
if data:
# we encode to ensure compatibility
data = json.loads(json.dumps(data))
rf = APIRequestFactory()
mock_request = getattr(rf, method.lower())(full_path, data)
mock_request.auth = auth
mock_request.user = user
mock_request.is_sudo = lambda: is_sudo
force_authenticate(mock_request, user, auth)
if params:
mock_request.GET._mutable = True
mock_request.GET.update(params)
mock_request.GET._mutable = False
if data:
mock_request.POST._mutable = True
mock_request.POST.update(data)
mock_request.POST._mutable = False
response = callback(mock_request, *callback_args, **callback_kwargs)
if 200 <= response.status_code < 400:
return response
raise self.ApiError(response.status_code, response.data)
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def put(self, *args, **kwargs):
return self.request('PUT', *args, **kwargs)
def delete(self, *args, **kwargs):
return self.request('DELETE', *args, **kwargs)
| bsd-3-clause | 515,049,718,499,158,200 | 29.132353 | 76 | 0.612494 | false |
waterponey/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 10 | 26399 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause | -4,752,976,251,760,320,000 | 38.401493 | 80 | 0.616046 | false |
prgmrbill/limnoria-plugins | RelevantGif/__init__.py | 3 | 1170 | ###
# Copyright (c) 2015, lunchdump
# All Rights Reserved
#
###
"""
RelevantGif: Obtain one GIF related to a phrase
Powered By Giphy!
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "0.0.1"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('Skeeter Alvarez', 'lunchdump',
'lunchdump@freenode')
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = ''
from . import config
from . import plugin
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| mit | -4,281,590,213,308,496,000 | 23.893617 | 78 | 0.717094 | false |
zhuolinho/linphone | submodules/externals/antlr3/runtime/Python/tests/t022scopes.py | 17 | 3917 | import antlr3
import testbase
import unittest
import textwrap
class t022scopes(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def parserClass(self, base):
class TParser(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TParser
def testa1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.a()
def testb1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
try:
parser.b(False)
self.fail()
except antlr3.RecognitionException:
pass
def testb2(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.b(True)
def testc1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
symbols = parser.c()
self.failUnlessEqual(
symbols,
set(['i', 'j'])
)
def testc2(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
x = 4;
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
try:
parser.c()
self.fail()
except RuntimeError, exc:
self.failUnlessEqual(exc.args[0], 'x')
def testd1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
{
int i;
int x;
x = 5;
}
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
symbols = parser.d()
self.failUnlessEqual(
symbols,
set(['i', 'j'])
)
def teste1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { { { 12 } } } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.e()
self.failUnlessEqual(res, 12)
def testf1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { { { 12 } } } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.f()
self.failUnlessEqual(res, None)
def testf2(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { 12 } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.f()
self.failUnlessEqual(res, None)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,967,083,267,610,268,000 | 22.45509 | 52 | 0.482767 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/operations/_express_route_circuits_operations.py | 1 | 53999 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuit"]
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _list_arp_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def begin_list_arp_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsArpTableListResult"]
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def _list_routes_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def begin_list_routes_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def _list_routes_table_summary_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def begin_list_routes_table_summary(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def get_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
def get_peering_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| mit | -9,008,447,738,317,798,000 | 49.325256 | 261 | 0.642956 | false |
Rademade/taiga-back | taiga/projects/votes/models.py | 3 | 2714 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Votes(models.Model):
content_type = models.ForeignKey("contenttypes.ContentType")
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
count = models.PositiveIntegerField(null=False, blank=False, default=0, verbose_name=_("count"))
class Meta:
verbose_name = _("Votes")
verbose_name_plural = _("Votes")
unique_together = ("content_type", "object_id")
@property
def project(self):
if hasattr(self.content_object, 'project'):
return self.content_object.project
return None
def __str__(self):
return self.count
class Vote(models.Model):
content_type = models.ForeignKey("contenttypes.ContentType")
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False,
related_name="votes", verbose_name=_("user"))
created_date = models.DateTimeField(null=False, blank=False, auto_now_add=True,
verbose_name=_("created date"))
class Meta:
verbose_name = _("Vote")
verbose_name_plural = _("Votes")
unique_together = ("content_type", "object_id", "user")
@property
def project(self):
if hasattr(self.content_object, 'project'):
return self.content_object.project
return None
def __str__(self):
return self.user.get_full_name()
| agpl-3.0 | 7,772,643,659,870,915,000 | 39.462687 | 100 | 0.688676 | false |
haf/support-tools | googlecode-issues-exporter/issues.py | 4 | 22719 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to an issue service.
"""
import collections
import datetime
import json
import re
import sys
import HTMLParser
class IdentityDict(dict):
def __missing__(self, key):
return key
def TryFormatDate(date):
"""Attempt to clean up a timestamp date."""
try:
if date.endswith(":"):
date = date[:len(date) - 1]
datetime_version = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S.%fZ")
return str(datetime_version)
except ValueError as ve:
return date
def WrapText(text, max):
"""Inserts a newline if any line of a file is > max chars.
Note that the newline is inserted at the first whitespace
character, so there may be lines longer than max.
"""
char_list = list(text)
last_linebreak = 0
for i in range(0, len(char_list)):
if char_list[i] == '\n' or char_list[i] == '\r':
last_linebreak = i
if i - last_linebreak > max and char_list[i] == ' ':
# Replace ' ' with '\n'
char_list.pop(i)
char_list.insert(i, '\n')
last_linebreak = i
return ''.join(char_list)
class Error(Exception):
"""Base error class."""
class InvalidUserError(Error):
"""Error for an invalid user."""
class ProjectNotFoundError(Error):
"""Error for a non-existent project."""
class ServiceError(Error):
"""Error when communicating with the issue or user service."""
class UserService(object):
"""Abstract user operations.
Handles user operations on an user API.
"""
def IsUser(self, username):
"""Checks if the user exists.
Args:
username: The username to check.
Returns:
True if the username exists.
"""
raise NotImplementedError()
class GoogleCodeIssue(object):
"""Google Code issue.
Handles parsing and viewing a Google Code issue.
"""
def __init__(self, issue, project_name, user_map):
"""Initialize the GoogleCodeIssue.
Args:
issue: The Google Code Issue as a dictionary.
project_name: The name of the project the issue belongs to.
user_map: A map from Google Code usernames to issue service names.
"""
self._issue = issue
self._project_name = project_name
self._user_map = user_map
def GetProjectName(self):
"""Returns the project name."""
return self._project_name
def GetUserMap(self):
"""Returns the user map."""
return self._user_map
def GetOwner(self):
"""Get the owner username of a Google Code issue.
This will ALWAYS be the person requesting the issue export.
"""
return self._user_map["user_requesting_export"]
def GetContentUpdatedOn(self):
"""Get the date the content was last updated from a Google Code issue.
Returns:
The time stamp when the issue content was last updated
"""
return self._issue["updated"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code issue.
Returns:
The time stamp when the issue content was created
"""
return self._issue["published"]
def GetId(self):
"""Get the id from a Google Code issue.
Returns:
The issue id
"""
return self._issue["id"]
def GetLabels(self):
"""Get the labels from a Google Code issue.
Returns:
A list of the labels of this issue.
"""
return self._issue.get("labels", [])
def GetKind(self):
"""Get the kind from a Google Code issue.
Returns:
The issue kind, if none is found defaults to 'Defect'
"""
types = [t for t in self.GetLabels() if "Type-" in t]
if types:
return types[0][len("Type-"):]
return "Defect"
def GetPriority(self):
"""Get the priority from a Google Code issue.
Returns:
The issue priority, if none is found defaults to 'Medium'
"""
priorities = [p for p in self.GetLabels() if "Priority-" in p]
if priorities:
return priorities[0][len("Priority-"):]
return "Medium"
def GetAuthor(self):
"""Get the author's username of a Google Code issue.
Returns:
The Google Code username that the issue is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._issue:
return None
author = self._issue["author"]["name"]
return self._user_map[author]
def GetStatus(self):
"""Get the status from a Google Code issue.
Returns:
The issue status
"""
status = self._issue["status"].lower()
if status == "accepted":
status = "open"
return status
def GetTitle(self):
"""Get the title from a Google Code issue.
Returns:
The issue title
"""
return self._issue["title"]
def GetUpdatedOn(self):
"""Get the date the issue was last updated.
Returns:
The time stamp when the issue was last updated
"""
return self.GetCreatedOn()
def _GetDescription(self):
"""Returns the raw description of the issue.
Returns:
The raw issue description as a comment.
"""
return self._issue["comments"]["items"][0]
def GetComments(self):
"""Get the list of comments for the issue (if any).
Returns:
The list of comments attached to the issue
"""
# The 0th comment is the issue's description. Also, filter out
# any deleted comments.
comments = self._issue["comments"]["items"][1:]
return [c for c in comments if not "deletedBy" in c]
def IsOpen(self):
"""Check if an issue is marked as open.
Returns:
True if the issue was open.
"""
return "state" in self._issue and self._issue["state"] == "open"
def GetDescription(self):
"""Returns the Description of the issue."""
# Just return the description of the underlying comment.
googlecode_comment = GoogleCodeComment(self, self._GetDescription())
return googlecode_comment.GetDescription()
class GoogleCodeComment(object):
"""Google Code Comment.
Handles parsing and viewing a Google Code Comment.
"""
def __init__(self, googlecode_issue, comment):
"""Initialize the GoogleCodeComment.
Args:
googlecode_issue: A GoogleCodeIssue instance.
comment: The Google Code Comment as dictionary.
"""
self._comment = comment
self._googlecode_issue = googlecode_issue
def GetContent(self):
"""Get the content from a Google Code comment.
Returns:
The issue comment
"""
return self._comment["content"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code comment.
Returns:
The time stamp when the issue comment content was created
"""
return self._comment["published"]
def GetId(self):
"""Get the id from a Google Code comment.
Returns:
The issue comment id
"""
return self._comment["id"]
def GetLabels(self):
"""Get the labels modified with the comment."""
if "updates" in self._comment:
if "labels" in self._comment["updates"]:
return self._comment["updates"]["labels"]
return []
def GetIssue(self):
"""Get the GoogleCodeIssue this comment belongs to.
Returns:
The issue id
"""
return self._googlecode_issue
def GetUpdatedOn(self):
"""Get the date the issue comment content was last updated.
Returns:
The time stamp when the issue comment content was last updated
"""
return self.GetCreatedOn()
def GetAuthor(self):
"""Get the author's username of a Google Code issue comment.
Returns:
The Google Code username that the issue comment is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._comment:
return None
author = self._comment["author"]["name"]
return self.GetIssue().GetUserMap()[author]
def GetDescription(self):
"""Returns the Description of the comment."""
author = self.GetAuthor()
comment_date = self.GetCreatedOn()
comment_text = self.GetContent()
if not comment_text:
comment_text = "(No text was entered with this change)"
# Google Takeout includes expected HTML characters such as > and á.
html_parser = HTMLParser.HTMLParser()
comment_text = html_parser.unescape(comment_text)
# Remove <b> tags, which Codesite automatically includes if issue body is
# based on a prompt.
comment_text = comment_text.replace("<b>", "")
comment_text = comment_text.replace("</b>", "")
comment_text = WrapText(comment_text, 82) # In case it was already wrapped...
body = "```\n" + comment_text + "\n```"
footer = "\n\nOriginal issue reported on code.google.com by `%s` on %s" % (
author, TryFormatDate(comment_date))
# Add label adjustments.
if self.GetLabels():
labels_added = []
labels_removed = []
for label in self.GetLabels():
if label.startswith("-"):
labels_removed.append(label[1:])
else:
labels_added.append(label)
footer += "\n"
if labels_added:
footer += "- **Labels added**: %s\n" % (", ".join(labels_added))
if labels_removed:
footer += "- **Labels removed**: %s\n" % (", ".join(labels_removed))
# Add references to attachments as appropriate.
attachmentLines = []
for attachment in self._comment["attachments"] if "attachments" in self._comment else []:
if "isDeleted" in attachment:
# Deleted attachments won't be found on the issue mirror.
continue
link = "https://storage.googleapis.com/google-code-attachments/%s/issue-%d/comment-%d/%s" % (
self.GetIssue().GetProjectName(), self.GetIssue().GetId(),
self.GetId(), attachment["fileName"])
def has_extension(extension):
return attachment["fileName"].lower().endswith(extension)
is_image_attachment = False
for extension in [".png", ".jpg", ".jpeg", ".bmp", ".tif", ".gif"]:
is_image_attachment |= has_extension(".png")
if is_image_attachment:
line = " * *Attachment: %s<br>*" % (
attachment["fileName"], attachment["fileName"], link)
else:
line = " * *Attachment: [%s](%s)*" % (attachment["fileName"], link)
attachmentLines.append(line)
if len(attachmentLines) > 0:
footer += "\n<hr>\n" + "\n".join(attachmentLines)
# Return the data to send to generate the comment.
return body + footer
class IssueService(object):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues for the given repository.
Raises:
IOError: An error occurred accessing previously created issues.
"""
raise NotImplementedError()
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
raise NotImplementedError()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
raise NotImplementedError()
def CreateComment(self, issue_number, source_issue_id,
googlecode_comment, project_name):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
source_issue_id: The Google Code issue id.
googlecode_comment: An instance of GoogleCodeComment
project_name: The Google Code project name.
"""
raise NotImplementedError()
def LoadIssueData(issue_file_path, project_name):
"""Loads issue data from a file.
Args:
issue_file_path: path to the file to load
project_name: name of the project to load
Returns:
Issue data as a list of dictionaries.
Raises:
ProjectNotFoundError: the project_name was not found in the file.
"""
with open(issue_file_path) as user_file:
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name == project["name"]:
return project["issues"]["items"]
raise ProjectNotFoundError("Project %s not found" % project_name)
def LoadUserData(user_file_path, user_service):
"""Loads user data from a file. If not present, the user name will
just return whatever is passed to it.
Args:
user_file_path: path to the file to load
user_service: an instance of UserService
"""
identity_dict = IdentityDict()
if not user_file_path:
return identity_dict
with open(user_file_path) as user_data:
user_json = user_data.read()
user_map = json.loads(user_json)["users"]
for username in user_map.values():
if not user_service.IsUser(username):
raise InvalidUserError("%s is not a User" % username)
result.update(user_map)
return result
class IssueExporter(object):
"""Issue Migration.
Handles the uploading issues from Google Code to an issue service.
"""
def __init__(self, issue_service, user_service, issue_json_data,
project_name, user_map):
"""Initialize the IssueExporter.
Args:
issue_service: An instance of IssueService.
user_service: An instance of UserService.
project_name: The name of the project to export to.
issue_json_data: A data object of issues from Google Code.
user_map: A map from user email addresses to service usernames.
"""
self._issue_service = issue_service
self._user_service = user_service
self._issue_json_data = issue_json_data
self._project_name = project_name
self._user_map = user_map
# Mapping from issue ID to the issue's metadata. This is used to verify
# consistency with an previous attempts at exporting issues.
self._previously_created_issues = {}
self._issue_total = 0
self._issue_number = 0
self._comment_number = 0
self._comment_total = 0
self._skipped_issues = 0
def Init(self):
"""Initialize the needed variables."""
self._GetAllPreviousIssues()
def _GetAllPreviousIssues(self):
"""Gets all previously uploaded issues."""
print "Getting any previously added issues..."
open_issues = self._issue_service.GetIssues("open")
closed_issues = self._issue_service.GetIssues("closed")
issues = open_issues + closed_issues
for issue in issues:
# Yes, GitHub's issues API has both ID and Number, and they are
# the opposite of what you think they are.
issue["number"] not in self._previously_created_issues or die(
"GitHub returned multiple issues with the same ID?")
self._previously_created_issues[issue["number"]] = {
"title": issue["title"],
"comment_count": issue["comments"],
}
def _UpdateProgressBar(self):
"""Update issue count 'feed'.
This displays the current status of the script to the user.
"""
feed_string = ("\rIssue: %d/%d -> Comment: %d/%d " %
(self._issue_number, self._issue_total,
self._comment_number, self._comment_total))
sys.stdout.write(feed_string)
sys.stdout.flush()
def _CreateIssue(self, googlecode_issue):
"""Converts an issue from Google Code to an issue service.
This will take the Google Code issue and create a corresponding issue on
the issue service. If the issue on Google Code was closed it will also
be closed on the issue service.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number assigned by the service.
"""
return self._issue_service.CreateIssue(googlecode_issue)
def _CreateComments(self, comments, issue_number, googlecode_issue):
"""Converts a list of issue comment from Google Code to an issue service.
This will take a list of Google Code issue comments and create
corresponding comments on an issue service for the given issue number.
Args:
comments: A list of comments (each comment is just a string).
issue_number: The issue number.
source_issue_id: The Google Code issue id.
"""
self._comment_total = len(comments)
self._comment_number = 0
for comment in comments:
googlecode_comment = GoogleCodeComment(googlecode_issue, comment)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.CreateComment(issue_number,
googlecode_issue.GetId(),
googlecode_comment,
self._project_name)
def Start(self):
"""The primary function that runs this script.
This will traverse the issues and attempt to create each issue and its
comments.
"""
print "Starting issue export for '%s'" % (self._project_name)
DELETED_ISSUE_PLACEHOLDER = GoogleCodeIssue(
{
"title": "[Issue Deleted]",
"id": -1,
"published": "NA",
"author": "NA",
"comments": {
"items": [{
"id": 0,
"author": {
"name": "NA",
},
"content": "...",
"published": "NA"}],
},
},
self._project_name, self._user_map)
# If there are existing issues, then confirm they exactly match the Google
# Code issues. Otherwise issue IDs will not match and/or there may be
# missing data.
self._AssertInGoodState()
self._issue_total = len(self._issue_json_data)
self._issue_number = 0
self._skipped_issues = 0
# ID of the last issue that was posted to the external service. Should
# be one less than the current issue to import.
last_issue_id = 0
for issue in self._issue_json_data:
googlecode_issue = GoogleCodeIssue(
issue, self._project_name, self._user_map)
self._issue_number += 1
self._UpdateProgressBar()
if googlecode_issue.GetId() in self._previously_created_issues:
self._skipped_issues += 1
last_issue_id = googlecode_issue.GetId()
continue
# If the Google Code JSON dump skipped any issues (e.g. they were deleted)
# then create placeholder issues so the ID count matches.
while int(googlecode_issue.GetId()) > int(last_issue_id) + 1:
last_issue_id = self._CreateIssue(DELETED_ISSUE_PLACEHOLDER)
print "\nCreating deleted issue placeholder for #%s" % (last_issue_id)
self._issue_service.CloseIssue(last_issue_id)
# Create the issue on the remote site. Verify that the issue number
# matches. Otherwise the counts will be off. e.g. a new GitHub issue
# was created during the export, so Google Code issue 100 refers to
# GitHub issue 101, and so on.
last_issue_id = self._CreateIssue(googlecode_issue)
if last_issue_id < 0:
continue
if int(last_issue_id) != int(googlecode_issue.GetId()):
error_message = (
"Google Code and GitHub issue numbers mismatch. Created\n"
"Google Code issue #%s, but it was saved as issue #%s." % (
googlecode_issue.GetId(), last_issue_id))
raise RuntimeError(error_message)
comments = googlecode_issue.GetComments()
self._CreateComments(comments, last_issue_id, googlecode_issue)
if not googlecode_issue.IsOpen():
self._issue_service.CloseIssue(last_issue_id)
if self._skipped_issues > 0:
print ("\nSkipped %d/%d issue previously uploaded." %
(self._skipped_issues, self._issue_total))
def _AssertInGoodState(self):
"""Checks if the last issue exported is sound, otherwise raises an error.
Checks the existing issues that have been exported and confirms that it
matches the issue on Google Code. (Both Title and ID match.) It then
confirms that it has all of the expected comments, adding any missing ones
as necessary.
"""
if len(self._previously_created_issues) == 0:
return
print ("Existing issues detected for the repo. Likely due to a previous\n"
" run being aborted or killed. Checking consistency...")
# Get the last exported issue, and its dual on Google Code.
last_gh_issue_id = -1
for id in self._previously_created_issues:
if id > last_gh_issue_id:
last_gh_issue_id = id
last_gh_issue = self._previously_created_issues[last_gh_issue_id]
last_gc_issue = None
for issue in self._issue_json_data:
if int(issue["id"]) == int(last_gh_issue_id) and (
issue["title"] == last_gh_issue["title"]):
last_gc_issue = GoogleCodeIssue(issue,
self._project_name,
self._user_map)
break
if last_gc_issue is None:
raise RuntimeError(
"Unable to find Google Code issue #%s '%s'.\n"
" Were issues added to GitHub since last export attempt?" % (
last_gh_issue_id, last_gh_issue["title"]))
print "Last issue (#%s) matches. Checking comments..." % (last_gh_issue_id)
# Check comments. Add any missing ones as needed.
num_gc_issue_comments = len(last_gc_issue.GetComments())
if last_gh_issue["comment_count"] != num_gc_issue_comments:
print "GitHub issue has fewer comments than Google Code's. Fixing..."
for idx in range(last_gh_issue["comment_count"], num_gc_issue_comments):
comment = last_gc_issue.GetComments()[idx]
googlecode_comment = GoogleCodeComment(last_gc_issue, comment)
# issue_number == source_issue_id
self._issue_service.CreateComment(
int(last_gc_issue.GetId()), int(last_gc_issue.GetId()),
googlecode_comment, self._project_name)
print " Added comment #%s." % (idx + 1)
print "Done! Issue tracker now in expected state. Ready for more exports."
| apache-2.0 | 3,992,344,661,054,526,000 | 29.53629 | 99 | 0.639993 | false |
jrowan/zulip | zerver/lib/feedback.py | 3 | 3063 | from __future__ import absolute_import
from django.conf import settings
from django.core.mail import EmailMessage
from typing import Any, Mapping, Optional, Text
from zerver.lib.actions import internal_send_message
from zerver.lib.send_email import FromAddress
from zerver.lib.redis_utils import get_redis_client
from zerver.models import get_realm, get_system_bot, \
UserProfile, Realm
import time
client = get_redis_client()
def has_enough_time_expired_since_last_message(sender_email, min_delay):
# type: (Text, float) -> bool
# This function returns a boolean, but it also has the side effect
# of noting that a new message was received.
key = 'zilencer:feedback:%s' % (sender_email,)
t = int(time.time())
last_time = client.getset(key, t)
if last_time is None:
return True
delay = t - int(last_time)
return delay > min_delay
def get_ticket_number():
# type: () -> int
num_file = '/var/tmp/.feedback-bot-ticket-number'
try:
ticket_number = int(open(num_file).read()) + 1
except Exception:
ticket_number = 1
open(num_file, 'w').write('%d' % (ticket_number,))
return ticket_number
def deliver_feedback_by_zulip(message):
# type: (Mapping[str, Any]) -> None
subject = "%s" % (message["sender_email"],)
if len(subject) > 60:
subject = subject[:57].rstrip() + "..."
content = u''
sender_email = message['sender_email']
# We generate ticket numbers if it's been more than a few minutes
# since their last message. This avoids some noise when people use
# enter-send.
need_ticket = has_enough_time_expired_since_last_message(sender_email, 180)
if need_ticket:
ticket_number = get_ticket_number()
content += '\n~~~'
content += '\nticket Z%03d (@support please ack)' % (ticket_number,)
content += '\nsender: %s' % (message['sender_full_name'],)
content += '\nemail: %s' % (sender_email,)
if 'sender_realm_str' in message:
content += '\nrealm: %s' % (message['sender_realm_str'],)
content += '\n~~~'
content += '\n\n'
content += message['content']
user_profile = get_system_bot(settings.FEEDBACK_BOT)
internal_send_message(user_profile.realm, settings.FEEDBACK_BOT,
"stream", settings.FEEDBACK_STREAM, subject, content)
def handle_feedback(event):
# type: (Mapping[str, Any]) -> None
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], FromAddress.SUPPORT)
headers = {'Reply-To': '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
if settings.FEEDBACK_STREAM is not None:
deliver_feedback_by_zulip(event)
| apache-2.0 | 3,693,579,660,881,846,000 | 35.464286 | 96 | 0.634672 | false |
opensourcemukul/conversioncodes | gdf2gml.py | 1 | 1091 | from Tkinter import *
from tkMessageBox import *
import tkFileDialog
from ctypes import *
## choose your gdf file
path = tkFileDialog.askopenfilename(
title='Open GDF File')
print path
f = open(path)
fo=open('C:/socialnw/fgdf.txt','w')
global arr
arr=[]
try:
for line in f:
arr[len(arr):]=[line]
i=1
fo.write('graph\n')
fo.write('[\n')
while arr[i] != 'edgedef>node1 VARCHAR,node2 VARCHAR\n':
string_this=arr[i].split(',')
iden=string_this[0]
sex=string_this[2]
fo.write('\tnode\n')
fo.write('\t[\n')
fo.write('\t\tid '+iden+'\n')
fo.write('\t\tsex "'+sex+'"\n')
fo.write('\t]\n')
i=i+1
print i
i=i+1
while i<len(arr):
string_this=arr[i].split(',')
iden_s=string_this[0]
iden_t=string_this[1]
fo.write('\tedge\n')
fo.write('\t[\n')
fo.write('\t\tsource '+iden_s+'\n')
fo.write('\t\ttarget '+iden_t+'\n')
fo.write('\t]\n')
i=i+1
fo.write(']\n')
finally:
f.close()
fo.close()
| gpl-2.0 | 7,312,440,405,422,122,000 | 21.729167 | 60 | 0.522456 | false |
IronLanguages/ironpython3 | Tests/test_isinstance2.py | 1 | 1308 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
class IsInstanceTest(unittest.TestCase):
def test_isinstance_metaclass(self):
class AlwaysFalse(type):
def __instancecheck__(cls, instance):
return False
class A(metaclass=AlwaysFalse):
pass
self.assertFalse(isinstance(int, A))
self.assertTrue(isinstance(A(), A)) # does not call __instancecheck__
class AlwaysTrue(type):
def __instancecheck__(cls, instance):
return True
class B(metaclass=AlwaysTrue):
pass
self.assertTrue(isinstance(int, B))
self.assertTrue(isinstance(B(), B)) # does not call __instancecheck__
def test_isinstance_bigint(self):
# check that isinstance(x, int) returns True on BigInteger values
l = sys.maxsize + 1
if sys.implementation.name == "ironpython":
# https://github.com/IronLanguages/ironpython3/issues/52
self.assertNotEqual(type(0), type(l))
self.assertTrue(isinstance(l, int))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,127,862,250,860,583,000 | 31.7 | 77 | 0.632263 | false |
david672orford/pykarta | pykarta/maps/layers/tilesets_bing.py | 1 | 1991 | # pykarta/maps/layers/tilesets_bing.py
# Copyright 2013--2018, Trinity College
# Last modified: 26 April 2018
from tilesets_base import tilesets, MapTilesetRaster
import json
from pykarta.misc.http import simple_urlopen
from pykarta.maps.image_loaders import surface_from_file_data
#-----------------------------------------------------------------------------
# Microsoft Bing map layers
# See http://www.bingmapsportal.com/
# See http://www.microsoft.com/maps/product/terms.html
# OsmGpsMap does not document #W but implements it
# FIXME: add include=ImageryProviders to query and use result
#-----------------------------------------------------------------------------
class MapTilesetBing(MapTilesetRaster):
def __init__(self, key, metadata_url=None, **kwargs):
MapTilesetRaster.__init__(self, key, **kwargs)
self.metadata_url = metadata_url
def online_init(self):
url = self.metadata_url.replace("{api_key}", self.api_key)
response = simple_urlopen(url, extra_headers=self.extra_headers)
metadata = json.load(response)
print "Bing metadata:", json.dumps(metadata, indent=4, separators=(',', ': '))
resource = metadata['resourceSets'][0]['resources'][0]
url_template = resource['imageUrl'].replace("{subdomain}","{s}").replace("{culture}","en-us")
print "Bing URL template:", url_template
self.set_url_template(url_template)
self.subdomains = resource['imageUrlSubdomains']
self.zoom_min = resource['zoomMin']
self.zoom_max = resource['zoomMax']
#print "Bing zoom levels: %d thru %d" % (self.zoom_min, self.zoom_max)
self.attribution = surface_from_file_data(simple_urlopen(metadata['brandLogoUri']).read())
for our_layer_key, bing_layer_key in (
('road', 'Road'),
('aerial', 'Aerial'),
('aerial-with-labels', 'AerialWithLabels')
):
tilesets.append(MapTilesetBing('bing-%s' % our_layer_key,
metadata_url='http://dev.virtualearth.net/REST/v1/Imagery/Metadata/%s?key={api_key}' % bing_layer_key,
attribution="Bing",
api_key_name="bing"
))
| gpl-2.0 | 347,094,186,903,179,800 | 42.282609 | 104 | 0.665997 | false |
shreddd/newt-2.0 | account/adapters/template_adapter.py | 3 | 2347 | """Account Adapter Template File
IMPORTANT: NOT A FUNCTIONAL ADAPTER. FUNCTIONS MUST BE IMPLEMENTED
Notes:
- Each of the functions defined below must return a json serializable
object, json_response, or valid HttpResponse object
- A json_response creates an HttpResponse object given parameters:
- content: string with the contents of the response
- status: string with the status of the response
- status_code: HTTP status code
- error: string with the error message if there is one
"""
from common.response import json_response
import logging
import re
logger = logging.getLogger("newt." + __name__)
def get_user_info(user_name=None, uid=None):
"""Returns information about the user
Keyword arguments:
user_name -- username
uid -- user id
"""
pass
def get_group_info(group_name=None, gid=None):
"""Returns information about the group
Keyword arguments:
group_name -- group name
gid -- group id
"""
pass
"""A tuple list in the form of:
(
(compiled_regex_exp, associated_function, request_required),
...
)
Note: The compiled_regex_exp must have named groups corresponding to
the arguments of the associated_function
Note: if request_required is True, the associated_function must have
request as the first argument
Example:
patterns = (
(re.compile(r'/usage/(?P<path>.+)$'), get_usage, False),
(re.compile(r'/image/(?P<query>.+)$'), get_image, False),
(re.compile(r'/(?P<path>.+)$'), get_resource, False),
)
"""
patterns = (
)
def extras_router(request, query):
"""Maps a query to a function if the pattern matches and returns result
Keyword arguments:
request -- Django HttpRequest
query -- the query to be matched against
"""
for pattern, func, req in patterns:
match = pattern.match(query)
if match and req:
return func(request, **match.groupdict())
elif match:
return func(**match.groupdict())
# Returns an Unimplemented response if no pattern matches
return json_response(status="Unimplemented",
status_code=501,
error="",
content="query: %s" % query) | bsd-2-clause | -5,526,485,707,202,828,000 | 28.721519 | 75 | 0.622497 | false |
cloudify-cosmo/cloudify-manager | tests/integration_tests/tests/agentless_tests/test_environments.py | 1 | 41318 | import time
import pytest
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
from cloudify.models_states import DeploymentState
from cloudify_rest_client.exceptions import CloudifyClientError
pytestmark = pytest.mark.group_environments
@pytest.mark.usefixtures('cloudmock_plugin')
@pytest.mark.usefixtures('mock_workflows_plugin')
@pytest.mark.usefixtures('testmockoperations_plugin')
class EnvironmentTest(AgentlessTestCase):
def _deploy_main_environment(self, resource_path,
blueprint_id=None,
deployment_id=None):
dsl_path = resource(resource_path)
deployment, _ = self.deploy_application(dsl_path,
blueprint_id=blueprint_id,
deployment_id=deployment_id)
self.client.deployments.update_labels(
deployment.id,
[
{
'csys-obj-type': 'Environment'
},
]
)
return deployment
def _assert_main_environment_after_installation(self, environment_id,
deployment_status):
environment = self.client.deployments.get(environment_id)
# The environment itself is deployed and installed correctly
self.assertEqual(
environment.deployment_status,
deployment_status
)
def _assert_deployment_environment_attr(self,
deployment,
deployment_status,
sub_services_status=None,
sub_environments_status=None,
sub_services_count=0,
sub_environments_count=0):
self.assertEqual(
deployment.deployment_status,
deployment_status
)
self.assertEqual(
deployment.sub_services_status,
sub_services_status
)
self.assertEqual(
deployment.sub_environments_status,
sub_environments_status
)
self.assertEqual(
deployment.sub_services_count,
sub_services_count
)
self.assertEqual(
deployment.sub_environments_count,
sub_environments_count
)
def _verify_statuses_and_count_for_deployment(self,
deployment_id,
deployment_status,
sub_services_status=None,
sub_environments_status=None,
sub_services_count=0,
sub_environments_count=0):
deployment = self.client.deployments.get(deployment_id)
self._assert_deployment_environment_attr(
deployment,
deployment_status,
sub_services_status,
sub_environments_status,
sub_services_count,
sub_environments_count
)
def _attach_deployment_to_parents(self, deployment_id, parents_ids,
deployment_type):
if not parents_ids:
return
parents = []
for parent_id in parents_ids:
parents.append({'csys-obj-parent': parent_id})
labels = [{'csys-obj-type': deployment_type}]
labels.extend(parents)
self.client.deployments.update_labels(deployment_id, labels)
def _deploy_deployment_to_environment(self,
environment,
resource_path,
deployment_type,
blueprint_id=None,
deployment_id=None,
install=False):
dsl_path = resource(resource_path)
if not install:
deployment = self.deploy(dsl_path,
blueprint_id=blueprint_id,
deployment_id=deployment_id)
else:
deployment, _ = self.deploy_application(dsl_path)
self._attach_deployment_to_parents(
deployment.id,
[environment.id],
deployment_type
)
return deployment
def _deploy_environment_with_two_levels(self, main_environment):
# # First environment
env1 = self._deploy_deployment_to_environment(
main_environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
# # Second environment
env2 = self._deploy_deployment_to_environment(
main_environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
# # Add service + environment to the env1
service1, environment1 = \
self._deploy_environment_with_service_and_environment(env1)
# # Add service + environment to the env2
service2, environment2 = \
self._deploy_environment_with_service_and_environment(env2)
return service1, environment1, service2, environment2
def _deploy_environment_with_three_levels(self, main_environment):
_, environment11, _, environment21 = \
self._deploy_environment_with_two_levels(main_environment)
service111, environment111 = \
self._deploy_environment_with_service_and_environment(
environment11
)
service211, environment211 = \
self._deploy_environment_with_service_and_environment(
environment21
)
return service111, environment111, service211, environment211
def _deploy_environment_with_service_and_environment(self,
main_environment):
service = self._deploy_deployment_to_environment(
main_environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
environment = self._deploy_deployment_to_environment(
main_environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
main_environment = self.client.deployments.get(main_environment.id)
self._assert_deployment_environment_attr(
main_environment,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_services_count=1,
sub_environments_count=1
)
return service, environment
def _create_deployment_group_from_blueprint(self,
resource_path,
blueprint_id,
group_id,
group_size,
labels_to_add=None,
wait_on_labels_add=12):
# Upload group base blueprint
self.upload_blueprint_resource(
resource_path,
blueprint_id
)
# Handle group actions
self.client.deployment_groups.put(
group_id, blueprint_id=blueprint_id
)
self.client.deployment_groups.add_deployments(
group_id,
count=group_size
)
# Wait till the deployment created successfully before adding any
# labels in order to avoid any race condition
if labels_to_add:
time.sleep(wait_on_labels_add)
self.client.deployment_groups.put(
group_id,
labels=labels_to_add,
)
def _execute_workflow_on_group(self, group_id, workflow_id):
execution_group = self.client.execution_groups.start(
deployment_group_id=group_id,
workflow_id=workflow_id
)
self.wait_for_execution_to_end(execution_group, is_group=True)
def test_create_deployment_with_invalid_parent_label(self):
dsl_path = resource('dsl/basic.yaml')
deployment = self.deploy(dsl_path)
with self.assertRaises(CloudifyClientError):
self._attach_deployment_to_parents(
deployment.id,
['invalid_parent'],
'service'
)
def test_environment_with_cyclic_dependencies(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service'
)
with self.assertRaises(CloudifyClientError):
self._attach_deployment_to_parents(
environment.id,
[deployment.id],
'environment'
)
def test_environment_after_deploy_single_service(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service'
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=1
)
def test_environment_after_deploy_multiple_services(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service'
)
self._deploy_deployment_to_environment(
environment,
'dsl/empty_blueprint.yaml',
'service'
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=2
)
def test_environment_after_install_single_service(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
def test_environment_after_install_multiple_services(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
self._deploy_deployment_to_environment(
environment,
'dsl/empty_blueprint.yaml',
'service',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=2
)
def test_environment_after_install_single_service_with_failure(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/workflow_api.yaml',
'service',
install=True
)
with self.assertRaises(RuntimeError):
self.execute_workflow(
workflow_name='execute_operation',
deployment_id=deployment.id,
parameters={
'operation': 'test.fail',
'node_ids': ['test_node'],
'operation_kwargs': {'non_recoverable': True}
},
wait_for_execution=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=1
)
def test_environment_after_install_multiple_services_with_failure(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
service2 = self._deploy_deployment_to_environment(
environment,
'dsl/workflow_api.yaml',
'service',
install=True
)
with self.assertRaises(RuntimeError):
self.execute_workflow(
workflow_name='execute_operation',
deployment_id=service2.id,
parameters={
'operation': 'test.fail',
'node_ids': ['test_node'],
'operation_kwargs': {'non_recoverable': True}
},
wait_for_execution=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=2
)
def test_environment_after_deploy_single_environment(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment'
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=1
)
def test_environment_after_deploy_multiple_environments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment'
)
self._deploy_deployment_to_environment(
environment,
'dsl/empty_blueprint.yaml',
'environment'
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=2
)
def test_environment_after_install_single_environment(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=1
)
def test_environment_after_install_multiple_environments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
self._deploy_deployment_to_environment(
environment,
'dsl/empty_blueprint.yaml',
'environment',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=2
)
def test_environment_after_install_single_environment_with_failure(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/workflow_api.yaml',
'environment',
install=True
)
with self.assertRaises(RuntimeError):
self.execute_workflow(
workflow_name='execute_operation',
deployment_id=deployment.id,
parameters={
'operation': 'test.fail',
'node_ids': ['test_node'],
'operation_kwargs': {'non_recoverable': True}
},
wait_for_execution=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=1
)
def test_environment_after_install_multiple_environments_with_failure(
self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/workflow_api.yaml',
'environment',
install=True
)
with self.assertRaises(RuntimeError):
self.execute_workflow(
workflow_name='execute_operation',
deployment_id=deployment.id,
parameters={
'operation': 'test.fail',
'node_ids': ['test_node'],
'operation_kwargs': {'non_recoverable': True}
},
wait_for_execution=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=2
)
def test_environment_after_install_service_and_environment(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1,
sub_environments_count=1
)
def test_environment_after_removing_service(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service'
)
environment = self.client.deployments.get(environment.id)
self._assert_deployment_environment_attr(
environment,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=1
)
self.delete_deployment(deployment.id, validate=True)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_count=0
)
def test_environment_after_uninstall_service(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
environment = self.client.deployments.get(environment.id)
self._assert_deployment_environment_attr(
environment,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=deployment.id,
wait_for_execution=True)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_count=1
)
def test_environment_after_removing_environment(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment'
)
environment = self.client.deployments.get(environment.id)
self._assert_deployment_environment_attr(
environment,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=1
)
self.delete_deployment(deployment.id, validate=True)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_count=0
)
def test_environment_after_uninstall_environment(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=1
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=deployment.id,
wait_for_execution=True)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_count=1
)
def test_environment_after_update_workflow(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
main_environment,
'dsl/simple_deployment.yaml',
'service',
install=True
)
main_environment = self.client.deployments.get(main_environment.id)
self._assert_deployment_environment_attr(
main_environment,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
# Deploy new parent 1
environment_1 = self._deploy_main_environment(
'dsl/basic.yaml',
blueprint_id='new_parent_1',
deployment_id='new_parent_1'
)
# Deploy new parent 2
environment_2 = self._deploy_main_environment(
'dsl/basic.yaml',
blueprint_id='new_parent_2',
deployment_id='new_parent_2'
)
self._assert_main_environment_after_installation(
environment_1.id, DeploymentState.GOOD
)
self._assert_main_environment_after_installation(
environment_2.id, DeploymentState.GOOD
)
self.upload_blueprint_resource(
'dsl/simple_deployment_with_parents.yaml',
'updated-blueprint'
)
dep_up = self.client.deployment_updates.update_with_existing_blueprint(
deployment.id,
blueprint_id='updated-blueprint'
)
self.wait_for_execution_to_end(
self.client.executions.get(dep_up.execution_id))
self._verify_statuses_and_count_for_deployment(
environment_1.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
self._verify_statuses_and_count_for_deployment(
environment_2.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
def test_uninstall_environment_linked_with_multiple_deployments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_service_and_environment(environment)
with self.assertRaises(CloudifyClientError):
self.execute_workflow(workflow_name='uninstall',
deployment_id=environment.id)
def test_stop_environment_linked_with_multiple_deployments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_service_and_environment(environment)
with self.assertRaises(CloudifyClientError):
self.execute_workflow(workflow_name='stop',
deployment_id=environment.id)
def test_delete_environment_linked_with_multiple_deployments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_service_and_environment(environment)
with self.assertRaises(CloudifyClientError):
self.client.deployments.delete(environment.id)
def test_update_environment_linked_with_multiple_deployments(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_service_and_environment(environment)
self.upload_blueprint_resource(
'dsl/basic_get_secret.yaml',
'updated_basic'
)
with self.assertRaises(CloudifyClientError):
self.client.deployment_updates.update_with_existing_blueprint(
environment.id, 'updated_basic')
def test_uninstall_environment_and_parent(self):
environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment.id, DeploymentState.GOOD
)
deployment = self._deploy_deployment_to_environment(
environment,
'dsl/simple_deployment.yaml',
'environment',
install=True
)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=1
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=deployment.id,
wait_for_execution=True)
self.delete_deployment(deployment.id, validate=True)
self._verify_statuses_and_count_for_deployment(
environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_count=0
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=environment.id,
wait_for_execution=True)
self.delete_deployment(environment.id, validate=True)
def test_environment_with_two_levels(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_two_levels(main_environment)
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_environments_count=4,
sub_services_count=2
)
def test_environment_with_three_levels(self):
# Main parent
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
self._deploy_environment_with_three_levels(main_environment)
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_environments_count=6,
sub_services_count=4
)
def test_environment_with_delete_child(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
service, environment = \
self._deploy_environment_with_service_and_environment(
main_environment
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=service.id)
self.delete_deployment(service.id, validate=True)
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=1
)
def test_environment_with_uninstall_child(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
service, environment = \
self._deploy_environment_with_service_and_environment(
main_environment
)
self.execute_workflow(workflow_name='uninstall',
deployment_id=service.id)
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.REQUIRE_ATTENTION,
sub_services_status=DeploymentState.REQUIRE_ATTENTION,
sub_environments_status=DeploymentState.GOOD,
sub_services_count=1,
sub_environments_count=1
)
def test_environment_after_removing_parent_label(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
service, environment = \
self._deploy_environment_with_service_and_environment(
main_environment
)
self.client.deployments.update_labels(environment.id, [
{
'csys-obj-type': 'environment'
}
])
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=1
)
def test_environment_after_conversion_to_service_type(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
service, environment = \
self._deploy_environment_with_service_and_environment(
main_environment
)
self.client.deployments.update_labels(environment.id, [
{
'csys-obj-type': 'service'
},
{
'csys-obj-parent': main_environment.id
},
])
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=2
)
def test_environment_after_conversion_to_environment_type(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
service, environment = \
self._deploy_environment_with_service_and_environment(
main_environment
)
self.client.deployments.update_labels(service.id, [
{
'csys-obj-type': 'environment'
},
{
'csys-obj-parent': main_environment.id
},
])
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=2
)
def test_environment_with_adding_single_parent_to_group(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
self._create_deployment_group_from_blueprint(
'dsl/simple_deployment.yaml',
'grp-blueprint',
'group1',
4,
labels_to_add=[{'csys-obj-parent': main_environment.id}]
)
self._execute_workflow_on_group('group1', 'install')
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=4
)
def test_environment_with_adding_multiple_parents_to_group(self):
environment1 = self._deploy_main_environment('dsl/basic.yaml')
environment2 = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
environment1.id, DeploymentState.GOOD
)
self._assert_main_environment_after_installation(
environment2.id, DeploymentState.GOOD
)
self._create_deployment_group_from_blueprint(
'dsl/simple_deployment.yaml',
'grp-blueprint',
'group1',
4,
labels_to_add=[{'csys-obj-parent': environment1.id},
{'csys-obj-parent': environment2.id}]
)
self._execute_workflow_on_group('group1', 'install')
self._verify_statuses_and_count_for_deployment(
environment1.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=4
)
self._verify_statuses_and_count_for_deployment(
environment2.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=4
)
def test_environment_with_removing_parent_from_group(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
self._create_deployment_group_from_blueprint(
'dsl/simple_deployment.yaml',
'grp-blueprint',
'group1',
4,
labels_to_add=[{'csys-obj-parent': main_environment.id}]
)
self._execute_workflow_on_group('group1', 'install')
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=4
)
self.client.deployment_groups.put('group1', labels=[])
main_environment = self.client.deployments.get(main_environment.id)
self._assert_deployment_environment_attr(
main_environment,
deployment_status=DeploymentState.GOOD
)
def test_environment_after_conversion_to_environment_type_for_group(self):
main_environment = self._deploy_main_environment('dsl/basic.yaml')
self._assert_main_environment_after_installation(
main_environment.id, DeploymentState.GOOD
)
self._create_deployment_group_from_blueprint(
'dsl/simple_deployment.yaml',
'grp-blueprint',
'group1',
4,
labels_to_add=[{'csys-obj-parent': main_environment.id}]
)
self._execute_workflow_on_group('group1', 'install')
self._verify_statuses_and_count_for_deployment(
main_environment.id,
deployment_status=DeploymentState.GOOD,
sub_services_status=DeploymentState.GOOD,
sub_services_count=4
)
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': main_environment.id},
{'csys-obj-type': 'environment'}],
)
main_environment = self.client.deployments.get(main_environment.id)
self._assert_deployment_environment_attr(
main_environment,
deployment_status=DeploymentState.GOOD,
sub_environments_status=DeploymentState.GOOD,
sub_environments_count=4
)
| apache-2.0 | 8,541,324,695,537,363,000 | 38.05293 | 79 | 0.581853 | false |
MGautier/security-sensor | trunk/Documentacion/Memoria/trozos-codigo/codigo-10.py | 1 | 2806 | class VisualizationsTestCase(TestCase):
def setUp(self):
log_sources = LogSources.objects.create(
Description="Firewall of gnu/linux kernel",
Type="Iptables",
Model="iptables v1.4.21",
Active=1,
Software_Class="Firewall",
Path="iptables",
)
Visualizations.objects.create(
Week_Month=1,
Week_Day=2,
Name_Day="Wednesday",
Date=date(2016, 8, 10),
Hour=18,
ID_Source=log_sources,
Process_Events=5
)
def test_visualizations_week_month(self):
"""
Comprobacion de que la semana del mes pertenece a la asociada
Returns:
"""
visualizations = Visualizations.objects.get(Week_Month=1)
self.assertEqual(visualizations.get_week_month(), 1)
def test_visualizations_week_day(self):
"""
Comprobacion de que el dia de la semana pertenece a la asociada
Returns:
"""
visualizations = Visualizations.objects.get(Week_Day=2)
self.assertEqual(visualizations.get_week_day(), 2)
def test_visualizations_name_day(self):
"""
Comprobacion de que el nombre del dia procesado coincide con su asociado
Returns:
"""
visualizations = Visualizations.objects.get(Name_Day="Wednesday")
self.assertEqual(visualizations.get_name_day(), "Wednesday")
def test_visualizations_date(self):
"""
Comprobacion de que la fecha registrada en el sistema coincide con la asociada
Returns:
"""
visualizations = Visualizations.objects.get(Date=date(2016, 8, 10))
self.assertEqual(visualizations.get_date(), date(2016, 8, 10))
def test_visualizations_hour(self):
"""
Comprobacion de que la hora registrada en el sistema para la fecha procesada, coincide con la asociada
Returns:
"""
visualizations = Visualizations.objects.get(Hour=18)
self.assertEqual(visualizations.get_hour(), 18)
def test_visualizations_source(self):
"""
Comprobacion de que la fuente de seguridad a la que pertenece, es igual a la asociada
Returns:
"""
log_sources = LogSources.objects.get(Type="Iptables")
visualizations = Visualizations.objects.get(ID_Source=log_sources)
self.assertEqual(visualizations.get_source(), log_sources)
def test_visualizations_process_events(self):
"""
Comprobacion de que el numero de eventos registrados para la fecha coincide con el asociado
Returns:
"""
visualizations = Visualizations.objects.get(Process_Events=5)
self.assertEqual(visualizations.get_process_events(), 5)
| mit | 5,106,144,274,954,064,000 | 32.404762 | 110 | 0.619387 | false |
jaywink/shoop | shoop/notify/admin_module/views/editor.py | 2 | 6176 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.shortcuts import render
from django.utils.text import camel_case_to_spaces
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import DetailView
from shoop.admin.toolbar import Toolbar, JavaScriptActionButton, URLActionButton, get_discard_button
from shoop.admin.utils.urls import get_model_url
from shoop.admin.utils.views import CreateOrUpdateView, add_create_or_change_message, get_create_or_change_title
from shoop.notify.admin_module.forms import ScriptItemEditForm
from shoop.notify.admin_module.utils import get_enum_choices_dict
from shoop.notify.base import Action, Condition, Event
from shoop.notify.enums import StepConditionOperator, StepNext
from shoop.utils.text import snake_case
from django.shortcuts import redirect
from shoop.notify.admin_module.forms import ScriptForm
from shoop.notify.models.script import Script
from django.utils.translation import ugettext_lazy as _
@csrf_exempt # This is fine -- the editor itself saves naught
def script_item_editor(request):
# This is a regular non-CBV view because the way it processes the data it received
# would be more awkward to do in a CBV.
request.POST = dict(request.POST.items()) # Make it mutable
init_data_json = request.POST.pop("init_data")
init_data = json.loads(init_data_json)
item_class = {"action": Action, "condition": Condition}[init_data["itemType"]]
form = ScriptItemEditForm(
script_item=item_class.unserialize(init_data["data"], validate=False),
event_class=Event.class_for_identifier(init_data["eventIdentifier"]),
data=(request.POST if request.POST else None),
files=(request.FILES if request.FILES else None)
)
form.initial = form.get_initial()
context = {
"form": form,
"script_item": form.script_item,
"event_class": form.event_class,
"init_data": init_data_json,
}
if form.data and form.is_valid():
try:
form.save()
except ValidationError as verr:
form.add_error(None, verr)
else:
context["post_message"] = {"new_data": form.script_item.data}
# Unbind so we'll use the initial data
form.is_bound = False
form.data = {}
form.initial = form.get_initial()
return render(request, "notify/admin/script_item_editor.jinja", context)
class ScriptAPI(object):
def __init__(self, request, script):
"""
:param request: Request
:type request: django.http.HttpRequest
:param script: Script
:type script: shoop.notify.models.Script
"""
self.request = request
self.script = script
def dispatch(self):
data = json.loads(self.request.body.decode("UTF-8"))
command = data.pop("command")
func_name = "handle_%s" % snake_case(camel_case_to_spaces(command))
func = getattr(self, func_name, None)
if not callable(func):
return JsonResponse({"error": "No handler: %s" % func_name})
return func(data)
def handle_get_data(self, data):
return JsonResponse({
"steps": self.script.get_serialized_steps(),
})
def handle_save_data(self, data):
try:
self.script.set_serialized_steps(data["steps"])
except Exception as exc:
if settings.DEBUG:
raise
return JsonResponse({"error": exc})
self.script.save(update_fields=("_step_data",))
return JsonResponse({"success": "Changes saved."})
class EditScriptContentView(DetailView):
template_name = "notify/admin/script_content_editor.jinja"
model = Script
context_object_name = "script"
def post(self, request, *args, **kwargs):
return ScriptAPI(request, self.get_object()).dispatch()
def get_context_data(self, **kwargs):
context = super(EditScriptContentView, self).get_context_data(**kwargs)
context["title"] = get_create_or_change_title(self.request, self.object)
context["action_infos"] = Action.get_ui_info_map()
context["condition_infos"] = Condition.get_ui_info_map()
context["cond_op_names"] = get_enum_choices_dict(StepConditionOperator)
context["step_next_names"] = get_enum_choices_dict(StepNext)
context["toolbar"] = Toolbar([
JavaScriptActionButton(
text="Save", icon="fa fa-save", extra_css_class="btn-success",
onclick="ScriptEditor.save();return false"
),
get_discard_button(get_model_url(self.object, "edit"))
])
return context
class EditScriptView(CreateOrUpdateView):
model = Script
form_class = ScriptForm
template_name = "notify/admin/edit_script.jinja"
context_object_name = "script"
def get_context_data(self, **kwargs):
context = super(EditScriptView, self).get_context_data(**kwargs)
if self.object.pk:
context["toolbar"] = Toolbar([
URLActionButton(
text=_(u"Edit Script Contents..."),
icon="fa fa-pencil",
extra_css_class="btn-info",
url=reverse("shoop_admin:notify.script.edit-content", kwargs={"pk": self.object.pk})
)
])
return context
def form_valid(self, form):
is_new = (not self.object.pk)
wf = form.save()
if is_new:
return redirect("shoop_admin:notify.script.edit-content", pk=wf.pk)
else:
add_create_or_change_message(self.request, self.object, is_new=is_new)
return redirect("shoop_admin:notify.script.edit", pk=wf.pk)
| agpl-3.0 | 689,958,596,899,569,900 | 38.589744 | 112 | 0.649935 | false |
upeu-jul-20161-epis-ads2/MedicFast | apps/space/models.py | 3 | 8558 | # _*_ coding: utf-8 _*_
"""
@copyright Copyright (c) 2014 Submit Consulting
@author Angel Sullon (@asullom)
@package space
Descripcion: Registro de los modelos de la app space
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from django.dispatch import receiver
from django.db.models import signals
from unicodedata import normalize
from django.core.exceptions import ValidationError
# models
#from apps.params.models import Locality
# managers
#from .managers import SolutionManager
# others
#from django.utils import timezone
#from django.core.exceptions import NON_FIELD_ERRORS
#from django.template.defaultfilters import slugify
GOVERMENT = 'GOVERMENT'
PRIVATE = 'PRIVATE'
MIXED = 'MIXED'
OTHERS = 'OTHERS'
TYPE_CHOICES = (
(GOVERMENT, _('Government')),
(PRIVATE, _('Private')),
(MIXED, _('Mixed')),
(OTHERS, _('Others'))
)
def unique_name(value):
if value == 'angel':
raise ValidationError(u'%s is not an angel' % value)
class Solution(models.Model):
"""
Tabla que contiene las soluciones o planes o servicios del sistema
"""
name = models.CharField(capfirst(_('name')), max_length=50)
description = models.TextField(_('Description'), null=True, blank=True)
price = models.FloatField(_('Price'), null=True, blank=True)
is_active = models.BooleanField(capfirst(_('active')), default=True)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
test_image = models.ImageField(
_('Test image'), upload_to='test_images',
default='test_images/default.png', null=True, blank=True)
test_date = models.DateTimeField(_('Test date'),
null=True, blank=True)
class Meta:
verbose_name = _('Solution')
verbose_name_plural = _('Solutions')
permissions = (
('solution', 'Can ALL solution'),
)
unique_together = ('name',)
def __str__(self):
return self.name
def validate_unique(self, exclude=None):
if normalize('NFKD', self.name).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', c['name']).encode('ascii', 'ignore').lower()
for c in self.__class__.objects.values('name').exclude(pk=self.pk)
):
raise ValidationError({
'name':
(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Solution'),
'field_label': capfirst(_('name')),
},),
})
super(Solution, self).validate_unique(exclude=exclude)
class Association(models.Model):
"""
Tabla que contiene las asociaciones suscritas a un plan
"""
name = models.CharField(capfirst(_('name')), max_length=50)
logo = models.ImageField(
_('Logo'),
upload_to='associations', default='associations/default.png')
type_a = models.CharField(
_('Type'),
max_length=10, choices=TYPE_CHOICES, default=PRIVATE)
is_active = models.BooleanField(capfirst(_('active')), default=True)
is_actived = models.BooleanField(_('Actived'), default=False)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
solution = models.ForeignKey(
Solution, verbose_name=_('Solution'), null=True, blank=True)
class Meta:
verbose_name = _('Association')
verbose_name_plural = _('Associations')
permissions = (
('association', 'Can ALL association'),
)
unique_together = ('name',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if normalize('NFKD', u'%s' % self.name).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', u'%s' % c['name']).encode('ascii', 'ignore').lower() for c in Association.objects.values('name').exclude(pk=self.pk)
):
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Association'),
'field_label': capfirst(_(u'name')),
})
super(Association, self).save(*args, **kwargs)
class Enterprise(models.Model):
"""
Tabla que contiene las empresas suscritas a un plan
"""
name = models.CharField(capfirst(_('name')), max_length=50)
#name_sm = models.CharField(capfirst(_('siglas')), max_length=50)
logo = models.ImageField(
_('Logo'), upload_to='enterprises', default='enterprises/default.png')
tax_id = models.CharField(_('Tax id'), max_length=50)
type_e = models.CharField(
_('Type'),
max_length=10, choices=TYPE_CHOICES, default=PRIVATE)
is_active = models.BooleanField(capfirst(_('active')), default=True)
is_actived = models.BooleanField(_('Actived'), default=False)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
solution = models.ForeignKey(
Solution, verbose_name=_('Solution'), null=True, blank=True)
class Meta:
verbose_name = _('Enterprise')
verbose_name_plural = _('Enterprises')
permissions = (
('enterprise', 'Can ALL enterprise'),
)
# no duplicate name, no duplicate tax_id
unique_together = ('name', 'tax_id',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if normalize('NFKD', u'%s' % self.name).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', u'%s' % c['name']).encode('ascii', 'ignore').lower() for c in Enterprise.objects.values('name').exclude(pk=self.pk)
):
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Enterprise'),
'field_label': capfirst(_(u'name')),
})
if Enterprise.objects.exclude(pk=self.pk).filter(tax_id=self.tax_id).count() > 0:
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Enterprise'),
'field_label': capfirst(_(u'Tax id')),
})
super(Enterprise, self).save(*args, **kwargs)
class Headquar(models.Model):
"""
Tabla que contiene las sedes de las empresas, asociadas a una Asociación
Un Headquar o sede o sucursal, es la unidad principal del sistema
Los accesos del sistema serán entregados a un Headquarters
"""
name = models.CharField(capfirst(_('name')), max_length=50)
phone = models.CharField(_('Phone'), max_length=50, null=True, blank=True)
address = models.TextField(_('Address'), null=True, blank=True)
is_main = models.BooleanField(_('Main'), default=False)
is_active = models.BooleanField(capfirst(_('active')), default=True)
is_actived = models.BooleanField(_('Actived'), default=False)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
# locality = models.ForeignKey(Locality, verbose_name=_('Locality'),
# null=True, blank=True)
association = models.ForeignKey(
Association, verbose_name=_('Association'), null=True, blank=True)
enterprise = models.ForeignKey(Enterprise, verbose_name=_('Enterprise'))
class Meta:
verbose_name = _('Headquar')
verbose_name_plural = _('Headquars')
permissions = (
('headquar', 'Can ALL headquar(sedes)'),
)
# no duplicate name by enterprise
unique_together = (('name', 'enterprise'),)
def __str__(self):
return '%s %s (%s)' % (self.name, self.enterprise, self.association)
def save(self, *args, **kwargs):
if normalize('NFKD', u'%s' % self.name).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', u'%s' % c['name']).encode('ascii', 'ignore').lower() for c in Headquar.objects.values('name').exclude(pk=self.pk).filter(enterprise=self.enterprise)
):
raise Exception(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Headquar'),
'field_label': capfirst(_(u'name')),
})
super(Headquar, self).save(*args, **kwargs)
| bsd-3-clause | -7,848,413,578,363,401,000 | 36.038961 | 182 | 0.608111 | false |
simone-campagna/zirkon | tests/unit/toolbox/serializer/test_serializer.py | 2 | 2958 | # -*- coding: utf-8 -*-
from zirkon import Config
from zirkon.toolbox.serializer import (
Serializer,
ZirkonSerializer,
DotsSerializer,
PathSerializer,
)
import pytest
@pytest.fixture(params=tuple(Serializer.get_class_tags()))
def fmt(request):
return request.param
@pytest.mark.regression('8973ae1af7a0b9baddaa811f667af1c45bc58b40')
def test_serializer_empty(fmt):
# test to fix toml library bug (toml->pytoml)
serializer = Serializer.get_class(fmt)(scope=Config.scope())
dct = {
'a': {},
'b': {
'b0': 10,
'b1': {},
'b2': {'b2_0': 10},
}
}
s = serializer.to_string(dct)
dct1 = serializer.from_string(s)
print(dct)
print(dct1)
assert dct == dct1
@pytest.fixture(params=[
ZirkonSerializer,
DotsSerializer,
PathSerializer,
])
def cont_serializer(request):
return request.param(scope=Config.scope())
def get_key(key, serializer):
if isinstance(serializer, PathSerializer):
return '/' + key
else:
return key
def test_explicit_continuation_line(cont_serializer):
source = """\
{key} = [10, \\
20, \\
30]""".format(key=get_key('a', cont_serializer))
print(repr(source))
x = cont_serializer.from_string(source)
assert x['a'] == [10, 20, 30]
def test_explicit_continuation_last_line(cont_serializer):
source = "{key} = 10 \\".format(key=get_key('a', cont_serializer))
with pytest.raises(SyntaxError) as exc_info:
x = cont_serializer.from_string(source)
assert str(exc_info.value) == "incomplete last line"
@pytest.fixture(params=[
("{key} =\n 10\n", {'a': 10}),
("{key} = [1, 2\n,3,4,\n5\n]\n", {'a': [1, 2, 3, 4, 5]}),
("{key} = (1, 2\n,3,4,\n5\n)\n", {'a': (1, 2, 3, 4, 5)}),
("{key} = (1, 2\n,3,4 #, 19\n,5\n)\n", {'a': (1, 2, 3, 4, 5)}),
('''{key} = """abc
def"""''', {'a': "abc\ndef"}),
("""{key} = '''abc
def'''""", {'a': "abc\ndef"}),
])
def impl_cont_source(request):
return request.param
def test_implicit_continuation_line(cont_serializer, impl_cont_source):
source, result = impl_cont_source
source = source.format(key=get_key('a', cont_serializer))
print(cont_serializer)
print(repr(source))
x = cont_serializer.from_string(source)
assert x == result
@pytest.fixture(params=[
("{key} = \n[1, 2,\n3a,4]\n", 3, "[1, 2,\n3a,4]"),
("{key} = \\\n[1, 2,\\\n3a,4]\n", 3, "[1, 2,3a,4]"),
])
def err_cont_source(request):
return request.param
def test_continuation_line_error(cont_serializer, err_cont_source):
source, line_number, srcval = err_cont_source
source = source.format(key=get_key('a', cont_serializer))
with pytest.raises(ValueError) as exc_info:
cont_serializer.from_string(source)
msg = "line {}@<string>: cannot decode value {!r}" .format(line_number, srcval)
# print(exc_info.value)
# print(msg)
assert str(exc_info.value) == msg
| apache-2.0 | 5,047,899,778,021,241,000 | 28 | 83 | 0.600068 | false |
cjcjameson/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/lib/pg_controldata.py | 12 | 1207 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
class PgControlData(object):
def __init__(self, datadir):
self.datadir = datadir
cmd = Command('pg_controldata',
'$GPHOME/bin/pg_controldata {0}'.format(datadir))
cmd.run()
lookup = {}
for line in cmd.get_stdout_lines():
(key, val) = line.split(':', 1)
key = key.strip()
val = val.strip()
lookup[key] = val
self.lookup = lookup
def get(self, key):
return self.lookup[key]
| apache-2.0 | 8,043,284,176,844,925,000 | 33.485714 | 72 | 0.671085 | false |
nicolashainaux/mathmaker | tests/integration/mental_calculation/03_yellow/test_03_yellow_W03a.py | 1 | 1240 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2018 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from mathmaker.lib import shared
from mathmaker.lib.document.frames import Sheet
def test_W03a():
"""Check this sheet is generated without any error."""
shared.machine.write_out(str(Sheet('mental_calculation',
'03_yellow',
'W03a')),
pdf_output=True)
| gpl-3.0 | -835,733,945,411,911,200 | 36.575758 | 76 | 0.691935 | false |
nugget/home-assistant | homeassistant/components/wink/fan.py | 2 | 3106 | """Support for Wink fans."""
import logging
from homeassistant.components.fan import (
SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SUPPORT_DIRECTION,
SUPPORT_SET_SPEED, FanEntity)
from homeassistant.components.wink import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_AUTO = 'auto'
SPEED_LOWEST = 'lowest'
SUPPORTED_FEATURES = SUPPORT_DIRECTION + SUPPORT_SET_SPEED
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for fan in pywink.get_fans():
if fan.object_id() + fan.name() not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkFanDevice(fan, hass)])
class WinkFanDevice(WinkDevice, FanEntity):
"""Representation of a Wink fan."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['fan'].append(self)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self.wink.set_fan_direction(direction)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self.wink.set_state(True, speed)
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
self.wink.set_state(True, speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
self.wink.set_state(False)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.wink.state()
@property
def speed(self) -> str:
"""Return the current speed."""
current_wink_speed = self.wink.current_fan_speed()
if SPEED_AUTO == current_wink_speed:
return SPEED_AUTO
if SPEED_LOWEST == current_wink_speed:
return SPEED_LOWEST
if SPEED_LOW == current_wink_speed:
return SPEED_LOW
if SPEED_MEDIUM == current_wink_speed:
return SPEED_MEDIUM
if SPEED_HIGH == current_wink_speed:
return SPEED_HIGH
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return self.wink.current_fan_direction()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
wink_supported_speeds = self.wink.fan_speeds()
supported_speeds = []
if SPEED_AUTO in wink_supported_speeds:
supported_speeds.append(SPEED_AUTO)
if SPEED_LOWEST in wink_supported_speeds:
supported_speeds.append(SPEED_LOWEST)
if SPEED_LOW in wink_supported_speeds:
supported_speeds.append(SPEED_LOW)
if SPEED_MEDIUM in wink_supported_speeds:
supported_speeds.append(SPEED_MEDIUM)
if SPEED_HIGH in wink_supported_speeds:
supported_speeds.append(SPEED_HIGH)
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
| apache-2.0 | 5,488,091,365,387,689,000 | 31.354167 | 79 | 0.622988 | false |
sourcefabric/Booktype | lib/booki/utils/log.py | 7 | 3495 | # This file is part of Booktype.
# Copyright (c) 2012
# Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import traceback
from booki.editor import models
def logBookHistory(
book=None, version=None, chapter=None,
chapter_history=None, args={}, user=None, kind='unknown'):
"""
Creates history record for book change.
@type book: C{booki.editor.models.Book}
@param book: Book object
@type version: C{booki.editor.models.BookVersion}
@param version: Book version object
@type chapter: C{booki.editor.models.Chapter}
@param chapter: Chapter object
@type chapter_history: C{booki.editor.models.ChapterHistory}
@param chapter_history: Chapter history object
@type args: C{dict}
@param args: Additional arguments
@type user: C{django.contrib.auth.models.User}
@param user: User who did modifications
@type kind: C{string}
@param kind: What kind of modification was done
"""
try:
history = models.BookHistory(
book=book,
version=version,
chapter=chapter,
chapter_history=chapter_history,
args=json.dumps(args),
user=user,
kind=models.HISTORY_CHOICES.get(kind, 0)
)
history.save()
return history
except ValueError:
return None
def logChapterHistory(
chapter=None, content=None,
user=None, comment='', revision=None):
"""
Creates history record for chapter change.
@type chapter: C{booki.editor.models.Chapter}
@param chapter: Chapter object
@type content: C{string}
@param content: Old content
@type user: C{django.contrib.auth.models.User}
@param user: Booki user object
@type comment: C{string}
@param comment: Comment about this change
@type revision: C{int}
@param revision: Revision number for this change
"""
try:
history = models.ChapterHistory(
chapter=chapter,
content=content,
user=user,
revision=revision,
comment=comment
)
history.save()
return history
except ValueError:
return None
def logError(msg, *args):
"""
Logs error message.
@type msg: C{string}
@param msg: Error message
"""
logging.getLogger("booktype").error(msg, *args)
def logWarning(msg, *args):
"""
Logs warning message.
@type msg: C{string}
@param msg: Warning message
"""
logging.getLogger("booktype").warning(msg, *args)
def print_stack(*extra):
"""
Prints entire stack as error message.
"""
logError(traceback.format_exc())
for e in extra:
logError(e)
# legacy support
# we should define a depration warning or something
printStack = print_stack
| agpl-3.0 | 1,839,445,332,114,978,300 | 25.884615 | 77 | 0.6598 | false |
MickSandoz/compassion-switzerland | sponsorship_switzerland/models/contracts.py | 1 | 6731 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, _
class RecurringContracts(models.Model):
_inherit = 'recurring.contract'
gmc_state = fields.Selection('_get_gmc_states', 'GMC State')
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.model
def _get_gmc_states(self):
""" Adds a new gmc state for tracking sponsorships for which we have
received an info about the child and we should notify the sponsor
with a letter (case where e-mail was not sent). """
return [
('transfer', _('Child Transfer')),
('transition', _('Transition')),
('reinstatement', _('Reinstatement')),
('major_revision', _('Major Revision')),
('order_picture', _('Order Picture')),
('biennial', _('Biennial')),
('notes', _('New notes')), # Notes kit notification
('disaster_alert', _('Disaster Alert')),
]
##########################################################################
# VIEW CALLBACKS #
##########################################################################
@api.model
def button_reset_gmc_state(self, value):
""" Button called from Kanban view on all contracts of one group. """
contracts = self.env['recurring.contract'].search([
('gmc_state', '=', value)])
return contracts.reset_gmc_state()
@api.multi
def reset_gmc_state(self):
""" Useful for manually unset GMC State. """
return self.write({'gmc_state': False})
##########################################################################
# WORKFLOW METHODS #
##########################################################################
@api.multi
def contract_active(self):
""" Hook for doing something when contract is activated.
Update partner to add the 'Sponsor' category
"""
super(RecurringContracts, self).contract_active()
sponsor_cat_id = self.env.ref(
'partner_compassion.res_partner_category_sponsor').id
sponsorships = self.filtered(lambda c: 'S' in c.type)
add_sponsor_vals = {'category_id': [(4, sponsor_cat_id)]}
sponsorships.mapped('partner_id').write(add_sponsor_vals)
sponsorships.mapped('correspondant_id').write(add_sponsor_vals)
##########################################################################
# PRIVATE METHODS #
##########################################################################
def _get_invoice_lines_to_clean(self, since_date, to_date):
""" For LSV/DD contracts, don't clean invoices that are in a
Payment Order.
"""
invoice_lines = super(
RecurringContracts, self)._get_invoice_lines_to_clean(since_date,
to_date)
invoices = invoice_lines.mapped('invoice_id')
lsv_dd_invoices = self.env['account.invoice']
for invoice in invoices:
pay_line = self.env['payment.line'].search([
('move_line_id', 'in', invoice.move_id.line_id.ids),
('order_id.state', 'in', ('open', 'done'))])
if pay_line:
lsv_dd_invoices += invoice
# If a draft payment order exitst, we remove the payment line.
pay_line = self.env['payment.line'].search([
('move_line_id', 'in', invoice.move_id.line_id.ids),
('order_id.state', '=', 'draft')])
if pay_line:
pay_line.unlink()
return invoice_lines.filtered(
lambda ivl: ivl.invoice_id not in lsv_dd_invoices)
@api.multi
def _on_sponsorship_finished(self):
""" Called when a sponsorship is terminated or cancelled:
Remove sponsor category if sponsor has no other active
sponsorships.
"""
super(RecurringContracts, self)._on_sponsorship_finished()
sponsor_cat_id = self.env.ref(
'partner_compassion.res_partner_category_sponsor').id
old_sponsor_cat_id = self.env.ref(
'partner_compassion.res_partner_category_old').id
for sponsorship in self:
partner_id = sponsorship.partner_id.id
correspondant_id = sponsorship.correspondant_id.id
# Partner
contract_count = self.search_count([
'|',
('correspondant_id', '=', partner_id),
('partner_id', '=', partner_id),
('state', '=', 'active'),
('type', 'like', 'S')])
if not contract_count:
# Replace sponsor category by old sponsor category
sponsorship.partner_id.write({
'category_id': [(3, sponsor_cat_id),
(4, old_sponsor_cat_id)]})
# Correspondant
contract_count = self.search_count([
'|',
('correspondant_id', '=', correspondant_id),
('partner_id', '=', correspondant_id),
('state', '=', 'active'),
('type', 'like', 'S')])
if not contract_count:
# Replace sponsor category by old sponsor category
sponsorship.correspondant_id.write({
'category_id': [(3, sponsor_cat_id),
(4, old_sponsor_cat_id)]})
@api.model
def _needaction_domain_get(self):
menu = self.env.context.get('count_menu')
if menu == 'menu_follow_gmc':
domain = [
('sds_uid', '=', self.env.user.id),
('gmc_state', '!=', False)]
else:
domain = super(RecurringContracts, self)._needaction_domain_get()
return domain
| agpl-3.0 | -1,129,332,237,836,578,600 | 42.873333 | 78 | 0.449413 | false |
morpheby/levelup-by | cms/djangoapps/contentstore/tests/test_i18n.py | 9 | 3449 | from unittest import skip
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.test.client import Client
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from contentstore.tests.modulestore_config import TEST_MODULESTORE
@override_settings(MODULESTORE=TEST_MODULESTORE)
class InternationalizationTest(ModuleStoreTestCase):
"""
Tests to validate Internationalization.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
self.uname = 'testuser'
self.email = '[email protected]'
self.password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(self.uname, self.email, self.password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
self.course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
}
def test_course_plain_english(self):
"""Test viewing the index page with no courses"""
self.client = Client()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get(reverse('index'))
self.assertContains(resp,
'<h1 class="page-header">My Courses</h1>',
status_code=200,
html=True)
def test_course_explicit_english(self):
"""Test viewing the index page with no courses"""
self.client = Client()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get(reverse('index'),
{},
HTTP_ACCEPT_LANGUAGE='en'
)
self.assertContains(resp,
'<h1 class="page-header">My Courses</h1>',
status_code=200,
html=True)
# ****
# NOTE:
# ****
#
# This test will break when we replace this fake 'test' language
# with actual French. This test will need to be updated with
# actual French at that time.
# Test temporarily disable since it depends on creation of dummy strings
@skip
def test_course_with_accents(self):
"""Test viewing the index page with no courses"""
self.client = Client()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get(reverse('index'),
{},
HTTP_ACCEPT_LANGUAGE='fr'
)
TEST_STRING = (
u'<h1 class="title-1">'
u'My \xc7\xf6\xfcrs\xe9s L#'
u'</h1>'
)
self.assertContains(resp,
TEST_STRING,
status_code=200,
html=True)
| agpl-3.0 | 6,115,657,919,313,645,000 | 33.49 | 86 | 0.563352 | false |
bassio/omicexperiment | omicexperiment/transforms/transform.py | 1 | 4215 |
class ProxiedTransformMixin(object):
def __get__(self, instance, owner):
if isinstance(instance, TransformObjectsProxy):
self.experiment = instance.experiment
return self
else:
return super().__get__(instance, owner)
class TransformObjectsProxy(object):
def __init__(self, experiment=None):
self.experiment = experiment
def __get__(self, instance, owner):
#print("getting TransformObjectsProxy")
self.experiment = instance
return self
class Transform(object):
def __dapply__(self, experiment):
return NotImplementedError
def __eapply__(self, experiment):
return NotImplementedError
def __get__(self, instance, owner):
if isinstance(instance, TransformObjectsProxy):
#print("instance is a TransformObjectsProxy")
return self.__eapply__(instance.experiment) #experiment attribute of the TranformObjectsProxy
return instance.__dict__[self.name]
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __set_name__(self, owner, name):
self.name = name
class Filter(Transform):
def __init__(self, operator=None, value=None):
self.operator = operator
self.value = value
def __get__(self, instance, owner):
return self
def __lt__(self, other):
return self.__class__('__lt__', other)
def __le__(self, other):
return self.__class__('__le__', other)
def __eq__(self, other):
return self.__class__('__eq__', other)
def __ne__(self, other):
return self.__class__('__ne__', other)
def __gt__(self, other):
return self.__class__('__gt__', other)
def __ge__(self, other):
return self.__class__('__ge__', other)
def __dapply__(self, experiment_obj):
return NotImplementedError
def __eapply__(self, experiment):
filtered_df = self.__class__.__dapply__(self, experiment)
return experiment.with_data_df(filtered_df)
def __repr__(self):
base_repr = object.__repr__(self)[1:-1]
full_repr = "<{} - operator:{}; value:{};>".format(base_repr, str(self.operator), str(self.value))
return full_repr
class AttributeFilter(Filter):
def __init__(self, operator=None, value=None, attribute=None):
Filter.__init__(self, operator, value)
self.attribute = attribute
def new(self, operator=None, value=None, attribute=None):
return self.__class__(operator,value, attribute)
def __lt__(self, other):
return self.__class__('__lt__', other, self.attribute)
def __le__(self, other):
return self.__class__('__le__', other, self.attribute)
def __eq__(self, other):
return self.__class__('__eq__', other, self.attribute)
def __ne__(self, other):
return self.__class__('__ne__', other, self.attribute)
def __gt__(self, other):
return self.__class__('__gt__', other, self.attribute)
def __ge__(self, other):
return self.__class__('__ge__', other, self.attribute)
def __getattr__(self, name):
return self.__class__(operator = self.operator, value=self.value, attribute=name)
def __getitem__(self, item):
return self.__class__(operator = self.operator, value=self.value, attribute=item)
def __repr__(self):
base_repr = object.__repr__(self)[1:-1]
full_repr = "<{} - attribute:{}; operator:{}; value:{};>".format(base_repr, str(self.attribute), str(self.operator), str(self.value))
return full_repr
class GroupByTransform(Transform):
def __init__(self, value=None):
Transform.__init__(self)
self.value = value
def __call__(self, value):
return self.__class__(value)
def __getitem__(self, value):
return self.__class__(value)
class FlexibleOperatorMixin(object):
def _op_function(self, dataframe):
return getattr(dataframe, self.operator)
class AttributeFlexibleOperatorMixin(object):
def _op_function(self, dataframe):
return getattr(dataframe[self.attribute], self.operator)
| bsd-3-clause | -7,375,103,905,673,027,000 | 29.323741 | 141 | 0.590747 | false |
0x46616c6b/ansible | lib/ansible/modules/storage/netapp/na_cdot_lun.py | 16 | 12593 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: na_cdot_lun
short_description: Manage NetApp cDOT luns
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, resize luns on NetApp cDOT.
options:
state:
description:
- Whether the specified lun should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the lun to manage.
required: true
flexvol_name:
description:
- The name of the FlexVol the lun should exist on.
- Required when C(state=present).
size:
description:
- The size of the lun in C(size_unit).
- Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
force_resize:
description:
- Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
default: false
force_remove:
description:
- If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
- If "false", destroying an online and mapped LUN will fail.
default: false
force_remove_fenced:
description:
- If "true", override checks that prevent a LUN from being destroyed while it is fenced.
- If "false", attempting to destroy a fenced LUN will fail.
- The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
default: false
vserver:
required: true
description:
- The name of the vserver to use.
'''
EXAMPLES = """
- name: Create LUN
na_cdot_lun:
state: present
name: ansibleLUN
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: mb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Resize Lun
na_cdot_lun:
state: present
name: ansibleLUN
force_resize: True
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: gb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTLUN(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
force_resize=dict(default=False, type='bool'),
force_remove=dict(default=False, type='bool'),
force_remove_fenced=dict(default=False, type='bool'),
flexvol_name=dict(type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['flexvol_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.force_resize = p['force_resize']
self.force_remove = p['force_remove']
self.force_remove_fenced = p['force_remove_fenced']
self.flexvol_name = p['flexvol_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_lun(self):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
luns = []
tag = None
while True:
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
if tag:
lun_info.add_new_child('tag', tag, True)
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('vserver', self.vserver)
query_details.add_new_child('volume', self.flexvol_name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_info.add_child_elem(query)
result = self.server.invoke_successfully(lun_info, True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
luns.extend(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
# The LUNs have been extracted.
# Find the specified lun and extract details.
return_value = None
for lun in luns:
path = lun.get_child_content('path')
_rest, _splitter, found_name = path.rpartition('/')
if found_name == self.name:
size = lun.get_child_content('size')
# Find out if the lun is attached
attached_to = None
lun_id = None
if lun.get_child_content('mapped') == 'true':
lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-map-list-info', **{'path': path})
result = self.server.invoke_successfully(
lun_map_list, enable_tunneling=True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
igroup = igroup_info.get_child_content(
'initiator-group-name')
attached_to = igroup
lun_id = igroup_info.get_child_content('lun-id')
return_value = {
'name': found_name,
'size': size,
'attached_to': attached_to,
'lun_id': lun_id
}
else:
continue
return return_value
def create_lun(self):
"""
Create LUN with requested name and size
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-create-by-size', **{'path': path,
'size': str(self.size),
'ostype': 'linux'})
try:
self.server.invoke_successfully(lun_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg="Error provisioning lun %s of size %s" % (self.name, self.size),
exception=str(err))
def delete_lun(self):
"""
Delete requested LUN
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-destroy', **{'path': path,
'force': str(self.force_remove),
'destroy-fenced-lun':
str(self.force_remove_fenced)})
try:
self.server.invoke_successfully(lun_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg="Error deleting lun %s" % path,
exception=str(err))
def resize_lun(self):
"""
Resize requested LUN.
:return: True if LUN was actually re-sized, false otherwise.
:rtype: bool
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-resize', **{'path': path,
'size': str(self.size),
'force': str(self.force_resize)})
try:
self.server.invoke_successfully(lun_resize, enable_tunneling=True)
except netapp_utils.zapi.NaApiError:
e = get_exception()
if str(e.code) == "9042":
# Error 9042 denotes the new LUN size being the same as the
# old LUN size. This happens when there's barely any difference
# in the two sizes. For example, from 8388608 bytes to
# 8194304 bytes. This should go away if/when the default size
# requested/reported to/from the controller is changed to a
# larger unit (MB/GB/TB).
return False
else:
err = get_exception()
self.module.fail_json(msg="Error resizing lun %s" % path,
exception=str(err))
return True
def apply(self):
property_changed = False
multiple_properties_changed = False
size_changed = False
lun_exists = False
lun_detail = self.get_lun()
if lun_detail:
lun_exists = True
current_size = lun_detail['size']
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if not current_size == self.size:
size_changed = True
property_changed = True
else:
if self.state == 'present':
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not lun_exists:
self.create_lun()
else:
if size_changed:
# Ensure that size was actually changed. Please
# read notes in 'resize_lun' function for details.
size_changed = self.resize_lun()
if not size_changed and not \
multiple_properties_changed:
property_changed = False
elif self.state == 'absent':
self.delete_lun()
changed = property_changed or size_changed
# TODO: include other details about the lun (size, etc.)
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTLUN()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,538,075,810,956,931,000 | 31.794271 | 126 | 0.541968 | false |
chouseknecht/ansible | lib/ansible/modules/network/nxos/_nxos_l3_interface.py | 21 | 7305 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_l3_interface
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage L3 interfaces on Cisco NXOS network devices
description:
- This module provides declarative management of L3 interfaces
on Cisco NXOS network devices.
deprecated:
removed_in: '2.13'
alternative: nxos_l3_interfaces
why: Updated modules released with more functionality
notes:
- Tested against NXOSv 7.0(3)I5(1).
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions.
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: nxos
"""
EXAMPLES = """
- name: Set interface IPv4 address
nxos_l3_interface:
name: Ethernet2/3
ipv4: 192.168.0.1/24
- name: Remove interface IPv4 address
nxos_l3_interface:
name: Ethernet2/3
state: absent
- name: Set IP addresses on aggregate
nxos_l3_interface:
aggregate:
- { name: Ethernet2/1, ipv4: 192.168.2.10/24 }
- { name: Ethernet2/5, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
nxos_l3_interface:
aggregate:
- { name: Ethernet2/1, ipv4: 192.168.2.10/24 }
- { name: Ethernet2/5, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface ethernet2/3
- no switchport
- ip address 192.168.22.1/24
- ipv6 address "fd5d:12c9:2201:1::1/64"
- no ip address 192.168.22.1/24
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, normalize_interface
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def map_obj_to_commands(updates, module, warnings):
commands = list()
want, have = updates
for w in want:
name = w['name']
ipv4 = w['ipv4']
ipv6 = w['ipv6']
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(name, have)
if not obj_in_have:
warnings.append('Unknown interface {0}'.format(name))
elif state == 'absent':
command = []
if obj_in_have['name'] == name:
if ipv4 and ipv4 == obj_in_have['ipv4']:
command.append('no ip address {0}'.format(ipv4))
if ipv6 and ipv6 in obj_in_have['ipv6']:
command.append('no ipv6 address {0}'.format(ipv6))
if command:
command.append('exit')
command.insert(0, 'interface {0}'.format(name))
commands.extend(command)
elif state == 'present':
command = []
if obj_in_have['name'] == name:
if ipv4 and ipv4 != obj_in_have['ipv4']:
command.append('ip address {0}'.format(ipv4))
if ipv6 and ipv6 not in obj_in_have['ipv6']:
command.append('ipv6 address {0}'.format(ipv6))
if command:
command.append('exit')
command.insert(0, 'interface {0}'.format(name))
elif not ipv4 and not ipv6:
command.append('interface {0}'.format(name))
commands.extend(command)
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
name = d['name']
d['name'] = normalize_interface(name)
obj.append(d)
else:
obj.append({
'name': normalize_interface(module.params['name']),
'ipv4': module.params['ipv4'],
'ipv6': module.params['ipv6'],
'state': module.params['state']
})
return obj
def map_config_to_obj(want, module):
objs = list()
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
for w in want:
parents = ['interface {0}'.format(w['name'])]
config = netcfg.get_section(parents)
obj = dict(name=None, ipv4=None, ipv6=[])
if config:
match_name = re.findall(r'interface (\S+)', config, re.M)
if match_name:
obj['name'] = normalize_interface(match_name[0])
match_ipv4 = re.findall(r'ip address (\S+)', config, re.M)
if match_ipv4:
obj['ipv4'] = match_ipv4[0]
match_ipv6 = re.findall(r'ipv6 address (\S+)', config, re.M)
if match_ipv6:
obj['ipv6'] = match_ipv6
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(want, module)
commands = map_obj_to_commands((want, have), module, warnings)
result['commands'] = commands
if warnings:
result['warnings'] = warnings
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,554,241,747,448,360,000 | 28.103586 | 93 | 0.590007 | false |
nateprewitt/pipenv | pipenv/patched/prettytoml/tokens/__init__.py | 5 | 4461 |
"""
TOML lexical tokens.
"""
class TokenType:
"""
A TokenType is a concrete type of a source token along with a defined priority and a higher-order kind.
The priority will be used in determining the tokenization behaviour of the lexer in the following manner:
whenever more than one token is recognizable as the next possible token and they are all of equal source
length, this priority is going to be used to break the tie by favoring the token type of the lowest priority
value. A TokenType instance is naturally ordered by its priority.
"""
def __init__(self, name, priority, is_metadata):
self._priority = priority
self._name = name
self._is_metadata = is_metadata
@property
def is_metadata(self):
return self._is_metadata
@property
def priority(self):
return self._priority
def __repr__(self):
return "{}-{}".format(self.priority, self._name)
def __lt__(self, other):
return isinstance(other, TokenType) and self._priority < other.priority
# Possible types of tokens
TYPE_BOOLEAN = TokenType('boolean', 0, is_metadata=False)
TYPE_INTEGER = TokenType('integer', 0, is_metadata=False)
TYPE_OP_COMMA = TokenType('comma', 0, is_metadata=True)
TYPE_OP_SQUARE_LEFT_BRACKET = TokenType('square_left_bracket', 0, is_metadata=True)
TYPE_OP_SQUARE_RIGHT_BRACKET = TokenType('square_right_bracket', 0, is_metadata=True)
TYPE_OP_CURLY_LEFT_BRACKET = TokenType('curly_left_bracket', 0, is_metadata=True)
TYPE_OP_CURLY_RIGHT_BRACKET = TokenType('curly_right_bracket', 0, is_metadata=True)
TYPE_OP_ASSIGNMENT = TokenType('assignment', 0, is_metadata=True)
TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET = TokenType('double_square_left_bracket', 0, is_metadata=True)
TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET = TokenType('double_square_right_bracket', 0, is_metadata=True)
TYPE_FLOAT = TokenType('float', 1, is_metadata=False)
TYPE_DATE = TokenType('date', 40, is_metadata=False)
TYPE_OPT_DOT = TokenType('dot', 40, is_metadata=True)
TYPE_BARE_STRING = TokenType('bare_string', 50, is_metadata=False)
TYPE_STRING = TokenType('string', 90, is_metadata=False)
TYPE_MULTILINE_STRING = TokenType('multiline_string', 90, is_metadata=False)
TYPE_LITERAL_STRING = TokenType('literal_string', 90, is_metadata=False)
TYPE_MULTILINE_LITERAL_STRING = TokenType('multiline_literal_string', 90, is_metadata=False)
TYPE_NEWLINE = TokenType('newline', 91, is_metadata=True)
TYPE_WHITESPACE = TokenType('whitespace', 93, is_metadata=True)
TYPE_COMMENT = TokenType('comment', 95, is_metadata=True)
def is_operator(token):
"""
Returns True if the given token is an operator token.
"""
return token.type in (
TYPE_OP_COMMA,
TYPE_OP_SQUARE_LEFT_BRACKET,
TYPE_OP_SQUARE_RIGHT_BRACKET,
TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET,
TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET,
TYPE_OP_CURLY_LEFT_BRACKET,
TYPE_OP_CURLY_RIGHT_BRACKET,
TYPE_OP_ASSIGNMENT,
TYPE_OPT_DOT,
)
def is_string(token):
return token.type in (
TYPE_STRING,
TYPE_MULTILINE_STRING,
TYPE_LITERAL_STRING,
TYPE_BARE_STRING,
TYPE_MULTILINE_LITERAL_STRING
)
class Token:
"""
A token/lexeme in a TOML source file.
A Token instance is naturally ordered by its type.
"""
def __init__(self, _type, source_substring, col=None, row=None):
self._source_substring = source_substring
self._type = _type
self._col = col
self._row = row
def __eq__(self, other):
if not isinstance(other, Token):
return False
return self.source_substring == other.source_substring and self.type == other.type
@property
def col(self):
"""
Column number (1-indexed).
"""
return self._col
@property
def row(self):
"""
Row number (1-indexed).
"""
return self._row
@property
def type(self):
"""
One of of the TOKEN_TYPE_* constants.
"""
return self._type
@property
def source_substring(self):
"""
The substring of the initial source file containing this token.
"""
return self._source_substring
def __lt__(self, other):
return isinstance(other, Token) and self.type < other.type
def __repr__(self):
return "{}: {}".format(self.type, self.source_substring)
| mit | 928,640,991,608,080,800 | 31.801471 | 112 | 0.655907 | false |
V155/qutebrowser | qutebrowser/browser/navigate.py | 1 | 5397 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of :navigate."""
import posixpath
from qutebrowser.browser import webelem
from qutebrowser.config import config
from qutebrowser.utils import objreg, urlutils, log, message, qtutils
from qutebrowser.mainwindow import mainwindow
class Error(Exception):
"""Raised when the navigation can't be done."""
def incdec(url, count, inc_or_dec):
"""Helper method for :navigate when `where' is increment/decrement.
Args:
url: The current url.
count: How much to increment or decrement by.
inc_or_dec: Either 'increment' or 'decrement'.
tab: Whether to open the link in a new tab.
background: Open the link in a new background tab.
window: Open the link in a new window.
"""
segments = set(config.val.url.incdec_segments)
try:
new_url = urlutils.incdec_number(url, inc_or_dec, count,
segments=segments)
except urlutils.IncDecError as error:
raise Error(error.msg)
return new_url
def path_up(url, count):
"""Helper method for :navigate when `where' is up.
Args:
url: The current url.
count: The number of levels to go up in the url.
"""
path = url.path()
if not path or path == '/':
raise Error("Can't go up!")
for _i in range(0, min(count, path.count('/'))):
path = posixpath.join(path, posixpath.pardir)
path = posixpath.normpath(path)
url.setPath(path)
return url
def _find_prevnext(prev, elems):
"""Find a prev/next element in the given list of elements."""
# First check for <link rel="prev(ious)|next">
rel_values = {'prev', 'previous'} if prev else {'next'}
for e in elems:
if e.tag_name() not in ['link', 'a'] or 'rel' not in e:
continue
if set(e['rel'].split(' ')) & rel_values:
log.hints.debug("Found {!r} with rel={}".format(e, e['rel']))
return e
# Then check for regular links/buttons.
elems = [e for e in elems if e.tag_name() != 'link']
option = 'prev_regexes' if prev else 'next_regexes'
if not elems:
return None
# pylint: disable=bad-config-option
for regex in getattr(config.val.hints, option):
# pylint: enable=bad-config-option
log.hints.vdebug("== Checking regex '{}'.".format(regex.pattern))
for e in elems:
text = str(e)
if not text:
continue
if regex.search(text):
log.hints.debug("Regex '{}' matched on '{}'.".format(
regex.pattern, text))
return e
else:
log.hints.vdebug("No match on '{}'!".format(text))
return None
def prevnext(*, browsertab, win_id, baseurl, prev=False,
tab=False, background=False, window=False):
"""Click a "previous"/"next" element on the page.
Args:
browsertab: The WebKitTab/WebEngineTab of the page.
baseurl: The base URL of the current tab.
prev: True to open a "previous" link, False to open a "next" link.
tab: True to open in a new tab, False for the current tab.
background: True to open in a background tab.
window: True to open in a new window, False for the current one.
"""
def _prevnext_cb(elems):
elem = _find_prevnext(prev, elems)
word = 'prev' if prev else 'forward'
if elem is None:
message.error("No {} links found!".format(word))
return
url = elem.resolve_url(baseurl)
if url is None:
message.error("No {} links found!".format(word))
return
qtutils.ensure_valid(url)
cur_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if window:
new_window = mainwindow.MainWindow(
private=cur_tabbed_browser.is_private)
new_window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=new_window.win_id)
tabbed_browser.tabopen(url, background=False)
elif tab:
cur_tabbed_browser.tabopen(url, background=background)
else:
browsertab.load_url(url)
try:
link_selector = webelem.css_selector('links', baseurl)
except webelem.Error as e:
raise Error(str(e))
browsertab.elements.find_css(link_selector, callback=_prevnext_cb,
error_cb=lambda err: message.error(str(err)))
| gpl-3.0 | -4,414,692,018,482,149,000 | 34.506579 | 78 | 0.61071 | false |
yland/coala | tests/parsing/StringProcessing/UnescapedSearchInBetweenTest.py | 17 | 8699 |
from coalib.parsing.StringProcessing import (
InBetweenMatch, unescaped_search_in_between)
from tests.parsing.StringProcessing.StringProcessingTestBase import (
StringProcessingTestBase)
class UnescapedSearchInBetweenTest(StringProcessingTestBase):
bs = StringProcessingTestBase.bs
test_basic_pattern = "'"
test_basic_expected_results = [
[(test_basic_pattern, 5,
r"escaped-escape: \\ ", 6,
test_basic_pattern, 32)],
[(test_basic_pattern, 5,
r"escaped-quote: \' ",
6, test_basic_pattern, 32)],
[(test_basic_pattern, 5,
r"escaped-anything: \X ", 6,
test_basic_pattern, 32)],
[(test_basic_pattern, 5,
r"two escaped escapes: \\\\ ", 6,
test_basic_pattern, 32)],
[(test_basic_pattern, 5,
r"escaped-quote at end: \'", 6,
test_basic_pattern, 32)],
[(test_basic_pattern, 5,
"escaped-escape at end: " + 2 * bs, 6,
test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 27, "str2", 28, test_basic_pattern, 32)],
[(test_basic_pattern, 15, "str1", 16, test_basic_pattern, 20),
(test_basic_pattern, 21, "str2", 22, test_basic_pattern, 26),
(test_basic_pattern, 27, "str3", 28, test_basic_pattern, 32)],
[],
[],
[],
[]]
# Test the basic unescaped_search_in_between() functionality.
def test_basic(self):
expected_results = self.test_basic_expected_results
self.assertResultsEqual(
unescaped_search_in_between,
{(self.test_basic_pattern,
self.test_basic_pattern,
test_string,
0,
False,
use_regex): [InBetweenMatch.from_values(*args)
for args in result]
for test_string, result in zip(self.test_strings,
expected_results)
for use_regex in [True, False]},
list)
# Test the unescaped_search_in_between() while varying the max_match
# parameter.
def test_max_match(self):
search_pattern = self.test_basic_pattern
expected_master_results = self.test_basic_expected_results
self.assertResultsEqual(
unescaped_search_in_between,
{(search_pattern,
search_pattern,
test_string,
max_match,
False,
use_regex): [InBetweenMatch.from_values(*args)
for args in result]
for max_match in [1, 2, 3, 4, 5, 100]
for test_string, result in zip(
self.test_strings,
[elem[0: max_match] for elem in expected_master_results])
for use_regex in [True, False]},
list)
# Test the unescaped_search_in_between() function with different regex
# patterns.
def test_regex_pattern(self):
expected_results = [
[("abc", 0, "", 3, "abc", 3)],
[("ab", 0, "c", 2, "ab", 3)],
[("ab", 0, "c", 2, "ab", 3),
("ab", 21, r"bc\+'**'", 23, "ac", 31)],
[(self.bs, 12, r"\13q4ujsabbc", 13, self.bs, 25)],
[("###", 9, r"\\13q4ujsabbc\+'**'ac", 12, "###", 33),
("#", 37, ".", 38, "####", 39)],
[("a", 0, "", 1, "b", 1),
("a", 3, "", 4, "b", 4),
("b", 7, "", 8, "a", 8),
("##", 9, "", 11, "#\\", 11),
("a", 21, "", 22, "b", 22),
("b", 23, r"c\+'**'", 24, "a", 31),
("##", 33, "", 35, "#.", 35),
("#.", 37, "", 39, "##", 39),
("##", 41, "-", 43, "b", 44)],
[("abcabc", 0, r"cba###\\13q4ujs", 6, "abbc", 21)],
[]]
self.assertResultsEqual(
unescaped_search_in_between,
{(pattern,
pattern,
self.multi_pattern_test_string,
0,
False,
True): [InBetweenMatch.from_values(*args)
for args in result]
for pattern, result in zip(self.multi_patterns,
expected_results)},
list)
# Test the unescaped_search_in_between() function for its
# remove_empty_matches feature.
def test_auto_trim(self):
expected_results = [
[],
[(";", 2, r"\\\\\;\\#", 3, ";", 12),
(";", 25, "+ios", 26, ";", 30)],
[(";", 1, "2", 2, ";", 3),
(";", 5, "4", 6, ";", 7),
(";", 9, "6", 10, ";", 11)],
[(";", 1, "2", 2, ";", 3),
(";", 5, "4", 6, ";", 7),
(";", 9, "6", 10, ";", 11)],
[],
[],
[],
[],
[(";", 3, "a", 4, ";", 5)]]
self.assertResultsEqual(
unescaped_search_in_between,
{(self.auto_trim_test_pattern,
self.auto_trim_test_pattern,
test_string,
0,
True,
use_regex): [InBetweenMatch.from_values(*args)
for args in result]
for test_string, result in zip(self.auto_trim_test_strings,
expected_results)
for use_regex in [True, False]},
list)
# Test the unescaped_search_in_between() function for its use_regex
# parameter.
def test_disabled_regex(self):
search_pattern = r"'()?"
expected_results = [[] for x in range(len(self.test_strings))]
self.assertResultsEqual(
unescaped_search_in_between,
{(search_pattern,
search_pattern,
test_string,
0,
# For remove_empty_matches both works, True and False.
auto_trim,
False): [InBetweenMatch.from_values(*args)
for args in result]
for test_string, result in zip(self.test_strings,
expected_results)
for auto_trim in [True, False]},
list)
# Test the unescaped_search_in_between() function using the test-strings
# specific for search-in-between functions.
def test_extended(self):
expected_results = [
[("(", 0, "", 1, ")", 1),
("(", 6, "This is a word", 7, ")", 21),
("(", 25, "(in a word", 26, ")", 36)],
[("(", 4, "((((((((((((((((((1", 5, ")", 24)],
[("(", 6, "do (it ", 7, ")", 14),
("(", 41, "", 42, ")", 42),
("(", 44, "hello.", 45, ")", 51)],
[("(", 0, "", 1, ")", 1),
("(", 8,
r"This\ is a word\)and((in a\\\ word\\\\\) another \)", 9,
")", 60)],
[("(", 10, r"((((\\\(((((((((((1", 11, ")", 30)],
[("(", 11, "it ", 12, ")", 15),
("(", 45, "", 46, ")", 46),
("(", 48, "hello.", 49, ")", 55)]]
self.assertResultsEqual(
unescaped_search_in_between,
{(begin_pattern,
end_pattern,
test_string,
0,
False,
use_regex): [InBetweenMatch.from_values(*args)
for args in result]
for test_string, result in zip(
self.search_in_between_test_strings,
expected_results)
for use_regex, begin_pattern, end_pattern in
[(True, r"\(", r"\)"),
(False,
self.search_in_between_begin_pattern,
self.search_in_between_end_pattern)]},
list)
| agpl-3.0 | 2,084,851,367,532,090,400 | 38.540909 | 76 | 0.4535 | false |
artwr/airflow | tests/www/test_validators.py | 8 | 3244 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import unittest
from airflow.www import validators
class TestGreaterEqualThan(unittest.TestCase):
def setUp(self):
super(TestGreaterEqualThan, self).setUp()
self.form_field_mock = mock.MagicMock(data='2017-05-06')
self.form_field_mock.gettext.side_effect = lambda msg: msg
self.other_field_mock = mock.MagicMock(data='2017-05-05')
self.other_field_mock.gettext.side_effect = lambda msg: msg
self.other_field_mock.label.text = 'other field'
self.form_stub = {'other_field': self.other_field_mock}
self.form_mock = mock.MagicMock(spec_set=dict)
self.form_mock.__getitem__.side_effect = self.form_stub.__getitem__
def _validate(self, fieldname=None, message=None):
if fieldname is None:
fieldname = 'other_field'
validator = validators.GreaterEqualThan(fieldname=fieldname,
message=message)
return validator(self.form_mock, self.form_field_mock)
def test_field_not_found(self):
self.assertRaisesRegexp(
validators.ValidationError,
"^Invalid field name 'some'.$",
self._validate,
fieldname='some',
)
def test_form_field_is_none(self):
self.form_field_mock.data = None
self.assertIsNone(self._validate())
def test_other_field_is_none(self):
self.other_field_mock.data = None
self.assertIsNone(self._validate())
def test_both_fields_are_none(self):
self.form_field_mock.data = None
self.other_field_mock.data = None
self.assertIsNone(self._validate())
def test_validation_pass(self):
self.assertIsNone(self._validate())
def test_validation_raises(self):
self.form_field_mock.data = '2017-05-04'
self.assertRaisesRegexp(
validators.ValidationError,
"^Field must be greater than or equal to other field.$",
self._validate,
)
def test_validation_raises_custom_message(self):
self.form_field_mock.data = '2017-05-04'
self.assertRaisesRegexp(
validators.ValidationError,
"^This field must be greater than or equal to MyField.$",
self._validate,
message="This field must be greater than or equal to MyField.",
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,690,581,175,924,639,000 | 32.791667 | 75 | 0.652281 | false |
bharcode/Coursera | InformationRetrieval/PA1/task2/common.py | 2 | 3140 | from array import array
from collections import deque
#VB Encoding
def encodeNumber( n ):
bytesArray = deque(array('B'))
while True:
bytesArray.appendleft(n % 128)
if n < 128:
break
n = n/128
bytesArray[-1] += 128
return bytesArray
def encodeNumbers( numbers ):
result = array('B')
for n in numbers:
encodedN = encodeNumber(n)
result.extend(encodedN)
return result
def compress( postings_list ):
pl = []
prev = 0
#First get the delta values for the postings list
for n in postings_list:
pl.append( n - prev )
prev = n
return encodeNumbers(pl)
def decode( bytearray ):
pl = []
n = 0
for byte in bytearray:
byte = ord(byte)
if byte < 128:
n = n*128 + byte
else:
n = n*128 + (byte - 128)
pl.append(n)
n = 0
# Also convert from Gaps to actual values of docIDs
docIDs = []
prev = 0
for gap in pl:
docIDs.append(gap+prev)
prev += gap
return docIDs
def arrayToBytes( arr ):
barr = array('B')
#Write each number as 4 bytes
numberSoFar = deque(array('B'))
for n in arr:
for index in range(0, 4):
numberSoFar.appendleft( n & 0xFF )
n = n / 256
barr.extend(numberSoFar)
numberSoFar = deque(array('B'))
return barr
def bytesToArr( barr ):
arr = []
numberSoFar = 0
i = 0
#Read in 4 bytes at a time and add them up to get one number
for b in barr:
numberSoFar = numberSoFar + ord(b)
i = i + 1
if i == 4:
i = 0
arr.append(numberSoFar)
numberSoFar = 0
else:
numberSoFar = numberSoFar * 256
return arr
#postings is a byte array
def writePostingsList( dictFile, postingsFile, termId, postings, numPostings ):
b = arrayToBytes([ termId, postingsFile.tell(), len(postings), numPostings ])
dictFile.write( b )
postingsFile.write( postings )
#Returns a line[termID, Filepos, length of postings] from the .dict file
def getPostingHeader( dictFile ):
dEntry = dictFile.read(16)
if len(dEntry) == 0 :
return None
dEntry = bytesToArr(dEntry)
return dEntry
def getNextPostingsList( dictFile, postingsFile ):
dEntry = dictFile.read(16)
if len(dEntry) == 0 :
return None
#dEntry contains [termID, Filepos, lenofpostings]
dEntry = bytesToArr(dEntry)
postingsFile.seek(dEntry[1])
#posting is a string
posting = postingsFile.read( dEntry[2] )
posting = decode(posting)
return [ dEntry[0], posting ]
def mergePostings( pos1, pos2 ):
pos = []
i = 0
j = 0
while i < len(pos1) and j < len(pos2):
if pos1[i] < pos2[j]:
pos.append(pos1[i])
i = i + 1
elif pos2[j] < pos1[i]:
pos.append(pos2[j])
j = j + 1
else:
print 'Shouldnt happen, since postings are from different blocks'
pos.append(pos2[j])
j = j + 1
i = i + 1
while i < len(pos1):
pos.append( pos1[i] )
i = i + 1
while j < len(pos2):
pos.append( pos2[j] )
j = j + 1
return pos
def mergeAndWritePostings( dictFile, postingsFile, termId1, pos1, pos2 ):
mergedPos = mergePostings( pos1, pos2 )
numPostings = len(mergedPos)
writePostingsList(dictFile, postingsFile, termId1, compress(mergedPos), numPostings) | gpl-2.0 | -2,497,221,725,835,953,000 | 21.984733 | 85 | 0.641401 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/scripts/spark_sql_test_scripts/spark_table.py | 1 | 1944 | """A PySpark driver that creates Spark tables for Spark SQL benchmark.
It takes an HCFS directory and a list of the names of the subdirectories of that
root directory. The subdirectories each hold Parquet data and are to be
converted into a table of the same name. The subdirectories are explicitly
providing because listing HCFS directories in PySpark is ugly.
sys.argv[1]: The root HCFS directory
sys.argv[2]: A comma separated list of the subdirectories/table names
"""
import argparse
import logging
import os
from pyspark.sql import SparkSession
from pyspark.sql.utils import AnalysisException
def main():
parser = argparse.ArgumentParser()
parser.add_argument('root_dir')
parser.add_argument('tables', type=lambda csv: csv.split(','))
args = parser.parse_args()
spark = (SparkSession.builder
.appName('Setup Spark tables')
.enableHiveSupport()
.getOrCreate())
for table in args.tables:
logging.info('Creating table %s', table)
table_dir = os.path.join(args.root_dir, table)
# clean up previous table
spark.sql('DROP TABLE IF EXISTS ' + table)
# register new table
spark.catalog.createTable(table, table_dir, source='parquet')
try:
# This loads the partitions under the table if table is partitioned.
spark.sql('MSCK REPAIR TABLE ' + table)
except AnalysisException:
# The table was not partitioned, which was presumably expected
pass
# Compute column statistics. Spark persists them in the TBL_PARAMS table of
# the Hive Metastore. I do not believe this interoperates with Hive's own
# statistics. See
# https://jaceklaskowski.gitbooks.io/mastering-spark-sql/content/spark-sql-LogicalPlan-AnalyzeColumnCommand.html
columns = ','.join(spark.table(table).columns)
spark.sql(
'ANALYZE TABLE {} COMPUTE STATISTICS FOR COLUMNS {}'.format(
table, columns))
if __name__ == '__main__':
main()
| apache-2.0 | -2,075,883,749,000,230,700 | 37.117647 | 116 | 0.713992 | false |
yakky/djangocms-text-ckeditor | djangocms_text_ckeditor/html.py | 1 | 5197 | # -*- coding: utf-8 -*-
import base64
import re
import uuid
from django.utils.module_loading import import_string
import html5lib
from html5lib import serializer, treebuilders, treewalkers
from html5lib.constants import namespaces
from html5lib.filters import sanitizer
from PIL import Image
from six import BytesIO
from . import settings
from .sanitizer import TextSanitizer
from .utils import plugin_to_tag
def _filter_kwargs():
kwargs = {
'allowed_elements': sanitizer.allowed_elements | frozenset(
((namespaces['html'], 'cms-plugin'), ),
),
}
if settings.TEXT_HTML_SANITIZE:
kwargs.update({
'allowed_elements': kwargs['allowed_elements'] | frozenset(
(namespaces['html'], tag)
for tag in settings.TEXT_ADDITIONAL_TAGS
),
'allowed_attributes': sanitizer.allowed_attributes | frozenset(
(None, attr)
for attr in settings.TEXT_ADDITIONAL_ATTRIBUTES
),
'allowed_protocols': sanitizer.allowed_protocols | frozenset(
settings.TEXT_ADDITIONAL_PROTOCOLS
),
})
return kwargs
def _get_default_parser():
if settings.TEXT_HTML_SANITIZE:
parser_classes = []
for parser_class in settings.ALLOW_TOKEN_PARSERS:
parser_classes.append(import_string(parser_class))
TextSanitizer.allow_token_parsers = parser_classes
return html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
DEFAULT_PARSER = _get_default_parser()
def clean_html(data, full=True, parser=DEFAULT_PARSER):
"""
Cleans HTML from XSS vulnerabilities using html5lib
If full is False, only the contents inside <body> will be returned (without
the <body> tags).
"""
if full:
dom_tree = parser.parse(data)
else:
dom_tree = parser.parseFragment(data)
walker = treewalkers.getTreeWalker('dom')
kwargs = _filter_kwargs()
stream = TextSanitizer(walker(dom_tree), **kwargs)
s = serializer.HTMLSerializer(
omit_optional_tags=False,
quote_attr_values='always',
)
return u''.join(s.serialize(stream))
def extract_images(data, plugin):
"""
extracts base64 encoded images from drag and drop actions in browser and saves
those images as plugins
"""
if not settings.TEXT_SAVE_IMAGE_FUNCTION:
return data
tree_builder = html5lib.treebuilders.getTreeBuilder('dom')
parser = html5lib.html5parser.HTMLParser(tree=tree_builder)
dom = parser.parse(data)
found = False
for img in dom.getElementsByTagName('img'):
src = img.getAttribute('src')
if not src.startswith('data:'):
# nothing to do
continue
width = img.getAttribute('width')
height = img.getAttribute('height')
# extract the image data
data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)')
m = data_re.search(src)
dr = m.groupdict()
mime_type = dr['mime_type']
image_data = dr['data']
if mime_type.find(';'):
mime_type = mime_type.split(';')[0]
try:
image_data = base64.b64decode(image_data)
except Exception:
image_data = base64.urlsafe_b64decode(image_data)
try:
image_type = mime_type.split('/')[1]
except IndexError:
# No image type specified -- will convert to jpg below if it's valid image data
image_type = ''
image = BytesIO(image_data)
# genarate filename and normalize image format
if image_type == 'jpg' or image_type == 'jpeg':
file_ending = 'jpg'
elif image_type == 'png':
file_ending = 'png'
elif image_type == 'gif':
file_ending = 'gif'
else:
# any not "web-safe" image format we try to convert to jpg
im = Image.open(image)
new_image = BytesIO()
file_ending = 'jpg'
im.save(new_image, 'JPEG')
new_image.seek(0)
image = new_image
filename = u'%s.%s' % (uuid.uuid4(), file_ending)
# transform image into a cms plugin
image_plugin = img_data_to_plugin(
filename, image, parent_plugin=plugin, width=width, height=height
)
# render the new html for the plugin
new_img_html = plugin_to_tag(image_plugin)
# replace the original image node with the newly created cms plugin html
img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img)
found = True
if found:
return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes])
else:
return data
def img_data_to_plugin(filename, image, parent_plugin, width=None, height=None):
func_name = settings.TEXT_SAVE_IMAGE_FUNCTION.split('.')[-1]
module = __import__(
'.'.join(settings.TEXT_SAVE_IMAGE_FUNCTION.split('.')[:-1]), fromlist=[func_name]
)
func = getattr(module, func_name)
return func(filename, image, parent_plugin, width=width, height=height)
| bsd-3-clause | -8,654,637,987,762,446,000 | 33.879195 | 94 | 0.612661 | false |
cfairweather/adafruit-led-pixels | gluon/contrib/pysimplesoap/client.py | 9 | 35446 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple SOAP Client implementation"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.07a"
TIMEOUT = 60
import cPickle as pickle
import hashlib
import logging
import os
import tempfile
import urllib2
from urlparse import urlsplit
from simplexml import SimpleXMLElement, TYPE_MAP, REVERSE_TYPE_MAP, OrderedDict
from transport import get_http_wrapper, set_http_wrapper, get_Http
log = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
class SoapFault(RuntimeError):
def __init__(self,faultcode,faultstring):
self.faultcode = faultcode
self.faultstring = faultstring
RuntimeError.__init__(self, faultcode, faultstring)
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
def __unicode__(self):
return u'%s: %s' % (self.faultcode, self.faultstring)
def __repr__(self):
return u"SoapFault(%s, %s)" % (repr(self.faultcode),
repr(self.faultstring))
# soap protocol specification & namespace
soap_namespaces = dict(
soap11="http://schemas.xmlsoap.org/soap/envelope/",
soap="http://schemas.xmlsoap.org/soap/envelope/",
soapenv="http://schemas.xmlsoap.org/soap/envelope/",
soap12="http://www.w3.org/2003/05/soap-env",
)
_USE_GLOBAL_DEFAULT = object()
class SoapClient(object):
"Simple SOAP Client (simil PHP)"
def __init__(self, location = None, action = None, namespace = None,
cert = None, trace = False, exceptions = True, proxy = None, ns=False,
soap_ns=None, wsdl = None, cache = False, cacert=None,
sessions=False, soap_server=None, timeout=_USE_GLOBAL_DEFAULT,
http_headers={}
):
"""
:param http_headers: Additional HTTP Headers; example: {'Host': 'ipsec.example.com'}
"""
self.certssl = cert
self.keyssl = None
self.location = location # server location (url)
self.action = action # SOAP base action
self.namespace = namespace # message
self.trace = trace # show debug messages
self.exceptions = exceptions # lanzar execpiones? (Soap Faults)
self.xml_request = self.xml_response = ''
self.http_headers = http_headers
if not soap_ns and not ns:
self.__soap_ns = 'soap' # 1.1
elif not soap_ns and ns:
self.__soap_ns = 'soapenv' # 1.2
else:
self.__soap_ns = soap_ns
# SOAP Server (special cases like oracle or jbossas6)
self.__soap_server = soap_server
# SOAP Header support
self.__headers = {} # general headers
self.__call_headers = None # OrderedDict to be marshalled for RPC Call
# check if the Certification Authority Cert is a string and store it
if cacert and cacert.startswith("-----BEGIN CERTIFICATE-----"):
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w+b', -1)
if self.trace: log.info(u"Saving CA certificate to %s" % filename)
f.write(cacert)
cacert = filename
f.close()
self.cacert = cacert
if timeout is _USE_GLOBAL_DEFAULT:
timeout = TIMEOUT
else:
timeout = timeout
# Create HTTP wrapper
Http = get_Http()
self.http = Http(timeout=timeout, cacert=cacert, proxy=proxy, sessions=sessions)
self.__ns = ns # namespace prefix or False to not use it
if not ns:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:%(soap_ns)s="%(soap_uri)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(method)s xmlns="%(namespace)s">
</%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
else:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(ns)s="%(namespace)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(ns)s:%(method)s>
</%(ns)s:%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
# parse wsdl url
self.services = wsdl and self.wsdl_parse(wsdl, debug=trace, cache=cache)
self.service_port = None # service port for late binding
def __getattr__(self, attr):
"Return a pseudo-method that can be called"
if not self.services: # not using WSDL?
return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs)
else: # using WSDL:
return lambda *args, **kwargs: self.wsdl_call(attr,*args,**kwargs)
def call(self, method, *args, **kwargs):
"""Prepare xml request and make SOAP call, returning a SimpleXMLElement.
If a keyword argument called "headers" is passed with a value of a
SimpleXMLElement object, then these headers will be inserted into the
request.
"""
#TODO: method != input_message
# Basic SOAP request:
xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns,
soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns])
request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns)
try:
request_headers = kwargs.pop('headers')
except KeyError:
request_headers = None
# serialize parameters
if kwargs:
parameters = kwargs.items()
else:
parameters = args
if parameters and isinstance(parameters[0], SimpleXMLElement):
# merge xmlelement parameter ("raw" - already marshalled)
if parameters[0].children() is not None:
for param in parameters[0].children():
getattr(request,method).import_node(param)
elif parameters:
# marshall parameters:
for k,v in parameters: # dict: tag=valor
getattr(request,method).marshall(k,v)
elif not self.__soap_server in ('oracle', ) or self.__soap_server in ('jbossas6',):
# JBossAS-6 requires no empty method parameters!
delattr(request("Body", ns=soap_namespaces.values(),), method)
# construct header and parameters (if not wsdl given) except wsse
if self.__headers and not self.services:
self.__call_headers = dict([(k, v) for k, v in self.__headers.items()
if not k.startswith("wsse:")])
# always extract WS Security header and send it
if 'wsse:Security' in self.__headers:
#TODO: namespaces too hardwired, clean-up...
header = request('Header' , ns=soap_namespaces.values(),)
k = 'wsse:Security'
v = self.__headers[k]
header.marshall(k, v, ns=False, add_children_ns=False)
header(k)['xmlns:wsse'] = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'
#<wsse:UsernameToken xmlns:wsu='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'>
if self.__call_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for k, v in self.__call_headers.items():
##if not self.__ns:
## header['xmlns']
header.marshall(k, v, ns=self.__ns, add_children_ns=False)
if request_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for subheader in request_headers.children():
header.import_node(subheader)
self.xml_request = request.as_xml()
self.xml_response = self.send(method, self.xml_request)
response = SimpleXMLElement(self.xml_response, namespace=self.namespace)
if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False):
raise SoapFault(unicode(response.faultcode), unicode(response.faultstring))
return response
def send(self, method, xml):
"Send SOAP request using HTTP"
if self.location == 'test': return
# location = "%s" % self.location #?op=%s" % (self.location, method)
location = self.location
if self.services:
soap_action = self.action
else:
soap_action = self.action + method
headers={
'Content-type': 'text/xml; charset="UTF-8"',
'Content-length': str(len(xml)),
"SOAPAction": "\"%s\"" % (soap_action)
}
headers.update(self.http_headers)
log.info("POST %s" % location)
log.info("Headers: %s" % headers)
if self.trace:
print "-"*80
print "POST %s" % location
print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()])
print u"\n%s" % xml.decode("utf8","ignore")
response, content = self.http.request(
location, "POST", body=xml, headers=headers)
self.response = response
self.content = content
if self.trace:
print
print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()])
print content#.decode("utf8","ignore")
print "="*80
return content
def get_operation(self, method):
# try to find operation in wsdl file
soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11'
if not self.service_port:
for service_name, service in self.services.items():
for port_name, port in [port for port in service['ports'].items()]:
if port['soap_ver'] == soap_ver:
self.service_port = service_name, port_name
break
else:
raise RuntimeError("Cannot determine service in WSDL: "
"SOAP version: %s" % soap_ver)
else:
port = self.services[self.service_port[0]]['ports'][self.service_port[1]]
self.location = port['location']
operation = port['operations'].get(unicode(method))
if not operation:
raise RuntimeError("Operation %s not found in WSDL: "
"Service/Port Type: %s" %
(method, self.service_port))
return operation
def wsdl_call(self, method, *args, **kwargs):
"Pre and post process SOAP call, input and output parameters using WSDL"
soap_uri = soap_namespaces[self.__soap_ns]
operation = self.get_operation(method)
# get i/o type declarations:
input = operation['input']
output = operation['output']
header = operation.get('header')
if 'action' in operation:
self.action = operation['action']
# sort parameters (same order as xsd:sequence)
def sort_dict(od, d):
if isinstance(od, dict):
ret = OrderedDict()
for k in od.keys():
v = d.get(k)
# don't append null tags!
if v is not None:
if isinstance(v, dict):
v = sort_dict(od[k], v)
elif isinstance(v, list):
v = [sort_dict(od[k][0], v1)
for v1 in v]
ret[str(k)] = v
return ret
else:
return d
# construct header and parameters
if header:
self.__call_headers = sort_dict(header, self.__headers)
if input and args:
# convert positional parameters to named parameters:
d = [(k, arg) for k, arg in zip(input.values()[0].keys(), args)]
kwargs.update(dict(d))
if input and kwargs:
params = sort_dict(input.values()[0], kwargs).items()
if self.__soap_server == "axis":
# use the operation name
method = method
else:
# use the message (element) name
method = input.keys()[0]
#elif not input:
#TODO: no message! (see wsmtxca.dummy)
else:
params = kwargs and kwargs.items()
# call remote procedure
response = self.call(method, *params)
# parse results:
resp = response('Body',ns=soap_uri).children().unmarshall(output)
return resp and resp.values()[0] # pass Response tag children
def help(self, method):
"Return operation documentation and invocation/returned value example"
operation = self.get_operation(method)
input = operation.get('input')
input = input and input.values() and input.values()[0]
if isinstance(input, dict):
input = ", ".join("%s=%s" % (k,repr(v)) for k,v
in input.items())
elif isinstance(input, list):
input = repr(input)
output = operation.get('output')
if output:
output = operation['output'].values()[0]
headers = operation.get('headers') or None
return u"%s(%s)\n -> %s:\n\n%s\nHeaders: %s" % (
method,
input or "",
output and output or "",
operation.get("documentation",""),
headers,
)
def wsdl_parse(self, url, debug=False, cache=False):
"Parse Web Service Description v1.1"
log.debug("wsdl url: %s" % url)
# Try to load a previously parsed wsdl:
force_download = False
if cache:
# make md5 hash of the url for caching...
filename_pkl = "%s.pkl" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename_pkl = os.path.join(cache, filename_pkl)
if os.path.exists(filename_pkl):
log.debug("Unpickle file %s" % (filename_pkl, ))
f = open(filename_pkl, "r")
pkl = pickle.load(f)
f.close()
# sanity check:
if pkl['version'][:-1] != __version__.split(" ")[0][:-1] or pkl['url'] != url:
import warnings
warnings.warn('version or url mismatch! discarding cached wsdl', RuntimeWarning)
if debug:
log.debug('Version: %s %s' % (pkl['version'], __version__))
log.debug('URL: %s %s' % (pkl['url'], url))
force_download = True
else:
self.namespace = pkl['namespace']
self.documentation = pkl['documentation']
return pkl['services']
soap_ns = {
"http://schemas.xmlsoap.org/wsdl/soap/": 'soap11',
"http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12',
}
wsdl_uri="http://schemas.xmlsoap.org/wsdl/"
xsd_uri="http://www.w3.org/2001/XMLSchema"
xsi_uri="http://www.w3.org/2001/XMLSchema-instance"
get_local_name = lambda s: s and str((':' in s) and s.split(':')[1] or s)
get_namespace_prefix = lambda s: s and str((':' in s) and s.split(':')[0] or None)
# always return an unicode object:
REVERSE_TYPE_MAP[u'string'] = unicode
def fetch(url):
"Download a document from a URL, save it locally if cache enabled"
# check / append a valid schema if not given:
url_scheme, netloc, path, query, fragment = urlsplit(url)
if not url_scheme in ('http','https', 'file'):
for scheme in ('http','https', 'file'):
try:
if not url.startswith("/") and scheme in ('http', 'https'):
tmp_url = "%s://%s" % (scheme, url)
else:
tmp_url = "%s:%s" % (scheme, url)
if debug: log.debug("Scheme not found, trying %s" % scheme)
return fetch(tmp_url)
except Exception, e:
log.error(e)
raise RuntimeError("No scheme given for url: %s" % url)
# make md5 hash of the url for caching...
filename = "%s.xml" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename = os.path.join(cache, filename)
if cache and os.path.exists(filename) and not force_download:
log.info("Reading file %s" % (filename, ))
f = open(filename, "r")
xml = f.read()
f.close()
else:
if url_scheme == 'file':
log.info("Fetching url %s using urllib2" % (url, ))
f = urllib2.urlopen(url)
xml = f.read()
else:
log.info("GET %s using %s" % (url, self.http._wrapper_version))
response, xml = self.http.request(url, "GET", None, {})
if cache:
log.info("Writing file %s" % (filename, ))
if not os.path.isdir(cache):
os.makedirs(cache)
f = open(filename, "w")
f.write(xml)
f.close()
return xml
# Open uri and read xml:
xml = fetch(url)
# Parse WSDL XML:
wsdl = SimpleXMLElement(xml, namespace=wsdl_uri)
# detect soap prefix and uri (xmlns attributes of <definitions>)
xsd_ns = None
soap_uris = {}
for k, v in wsdl[:]:
if v in soap_ns and k.startswith("xmlns:"):
soap_uris[get_local_name(k)] = v
if v== xsd_uri and k.startswith("xmlns:"):
xsd_ns = get_local_name(k)
# Extract useful data:
self.namespace = wsdl['targetNamespace']
self.documentation = unicode(wsdl('documentation', error=False) or '')
services = {}
bindings = {} # binding_name: binding
operations = {} # operation_name: operation
port_type_bindings = {} # port_type_name: binding
messages = {} # message: element
elements = {} # element: type def
for service in wsdl.service:
service_name=service['name']
if not service_name:
continue # empty service?
if debug: log.debug("Processing service %s" % service_name)
serv = services.setdefault(service_name, {'ports': {}})
serv['documentation']=service['documentation'] or ''
for port in service.port:
binding_name = get_local_name(port['binding'])
address = port('address', ns=soap_uris.values(), error=False)
location = address and address['location'] or None
soap_uri = address and soap_uris.get(address.get_prefix())
soap_ver = soap_uri and soap_ns.get(soap_uri)
bindings[binding_name] = {'service_name': service_name,
'location': location,
'soap_uri': soap_uri, 'soap_ver': soap_ver,
}
serv['ports'][port['name']] = bindings[binding_name]
for binding in wsdl.binding:
binding_name = binding['name']
if debug: log.debug("Processing binding %s" % service_name)
soap_binding = binding('binding', ns=soap_uris.values(), error=False)
transport = soap_binding and soap_binding['transport'] or None
port_type_name = get_local_name(binding['type'])
bindings[binding_name].update({
'port_type_name': port_type_name,
'transport': transport, 'operations': {},
})
port_type_bindings[port_type_name] = bindings[binding_name]
for operation in binding.operation:
op_name = operation['name']
op = operation('operation',ns=soap_uris.values(), error=False)
action = op and op['soapAction']
d = operations.setdefault(op_name, {})
bindings[binding_name]['operations'][op_name] = d
d.update({'name': op_name})
d['parts'] = {}
# input and/or ouput can be not present!
input = operation('input', error=False)
body = input and input('body', ns=soap_uris.values(), error=False)
d['parts']['input_body'] = body and body['parts'] or None
output = operation('output', error=False)
body = output and output('body', ns=soap_uris.values(), error=False)
d['parts']['output_body'] = body and body['parts'] or None
header = input and input('header', ns=soap_uris.values(), error=False)
d['parts']['input_header'] = header and {'message': header['message'], 'part': header['part']} or None
headers = output and output('header', ns=soap_uris.values(), error=False)
d['parts']['output_header'] = header and {'message': header['message'], 'part': header['part']} or None
#if action: #TODO: separe operation_binding from operation
if action:
d["action"] = action
def make_key(element_name, element_type):
"return a suitable key for elements"
# only distinguish 'element' vs other types
if element_type in ('complexType', 'simpleType'):
eltype = 'complexType'
else:
eltype = element_type
if eltype not in ('element', 'complexType', 'simpleType'):
raise RuntimeError("Unknown element type %s = %s" % (unicode(element_name), eltype))
return (unicode(element_name), eltype)
#TODO: cleanup element/schema/types parsing:
def process_element(element_name, node, element_type):
"Parse and define simple element types"
if debug:
log.debug("Processing element %s %s" % (element_name, element_type))
for tag in node:
if tag.get_local_name() in ("annotation", "documentation"):
continue
elif tag.get_local_name() in ('element', 'restriction'):
if debug: log.debug("%s has not children! %s" % (element_name,tag))
children = tag # element "alias"?
alias = True
elif tag.children():
children = tag.children()
alias = False
else:
if debug: log.debug("%s has not children! %s" % (element_name,tag))
continue #TODO: abstract?
d = OrderedDict()
for e in children:
t = e['type']
if not t:
t = e['base'] # complexContent (extension)!
if not t:
t = 'anyType' # no type given!
t = t.split(":")
if len(t)>1:
ns, type_name = t
else:
ns, type_name = None, t[0]
if element_name == type_name:
pass ## warning with infinite recursion
uri = ns and e.get_namespace_uri(ns) or xsd_uri
if uri==xsd_uri:
# look for the type, None == any
fn = REVERSE_TYPE_MAP.get(unicode(type_name), None)
else:
fn = None
if not fn:
# simple / complex type, postprocess later
fn = elements.setdefault(make_key(type_name, "complexType"), OrderedDict())
if e['name'] is not None and not alias:
e_name = unicode(e['name'])
d[e_name] = fn
else:
if debug: log.debug("complexConent/simpleType/element %s = %s" % (element_name, type_name))
d[None] = fn
if e['maxOccurs']=="unbounded" or (ns == 'SOAP-ENC' and type_name == 'Array'):
# it's an array... TODO: compound arrays?
d.array = True
if e is not None and e.get_local_name() == 'extension' and e.children():
# extend base element:
process_element(element_name, e.children(), element_type)
elements.setdefault(make_key(element_name, element_type), OrderedDict()).update(d)
# check axis2 namespace at schema types attributes
self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace)
imported_schemas = {}
def preprocess_schema(schema):
"Find schema elements and complex types"
for element in schema.children() or []:
if element.get_local_name() in ('import', ):
schema_namespace = element['namespace']
schema_location = element['schemaLocation']
if schema_location is None:
if debug: log.debug("Schema location not provided for %s!" % (schema_namespace, ))
continue
if schema_location in imported_schemas:
if debug: log.debug("Schema %s already imported!" % (schema_location, ))
continue
imported_schemas[schema_location] = schema_namespace
if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location)
# Open uri and read xml:
xml = fetch(schema_location)
# Parse imported XML schema (recursively):
imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)
preprocess_schema(imported_schema)
element_type = element.get_local_name()
if element_type in ('element', 'complexType', "simpleType"):
element_name = unicode(element['name'])
if debug: log.debug("Parsing Element %s: %s" % (element_type, element_name))
if element.get_local_name() == 'complexType':
children = element.children()
elif element.get_local_name() == 'simpleType':
children = element("restriction", ns=xsd_uri)
elif element.get_local_name() == 'element' and element['type']:
children = element
else:
children = element.children()
if children:
children = children.children()
elif element.get_local_name() == 'element':
children = element
if children:
process_element(element_name, children, element_type)
def postprocess_element(elements):
"Fix unresolved references (elements referenced before its definition, thanks .net)"
for k,v in elements.items():
if isinstance(v, OrderedDict):
if v.array:
elements[k] = [v] # convert arrays to python lists
if v!=elements: #TODO: fix recursive elements
postprocess_element(v)
if None in v and v[None]: # extension base?
if isinstance(v[None], dict):
for i, kk in enumerate(v[None]):
# extend base -keep orginal order-
if v[None] is not None:
elements[k].insert(kk, v[None][kk], i)
del v[None]
else: # "alias", just replace
if debug: log.debug("Replacing %s = %s" % (k, v[None]))
elements[k] = v[None]
#break
if isinstance(v, list):
for n in v: # recurse list
postprocess_element(n)
# process current wsdl schema:
for schema in wsdl.types("schema", ns=xsd_uri):
preprocess_schema(schema)
postprocess_element(elements)
for message in wsdl.message:
if debug: log.debug("Processing message %s" % message['name'])
for part in message('part', error=False) or []:
element = {}
element_name = part['element']
if not element_name:
# some implementations (axis) uses type instead
element_name = part['type']
type_ns = get_namespace_prefix(element_name)
type_uri = wsdl.get_namespace_uri(type_ns)
if type_uri == xsd_uri:
element_name = get_local_name(element_name)
fn = REVERSE_TYPE_MAP.get(unicode(element_name), None)
element = {part['name']: fn}
# emulate a true Element (complexType)
messages.setdefault((message['name'], None), {message['name']: OrderedDict()}).values()[0].update(element)
else:
element_name = get_local_name(element_name)
fn = elements.get(make_key(element_name, 'element'))
if not fn:
# some axis servers uses complexType for part messages
fn = elements.get(make_key(element_name, 'complexType'))
element = {message['name']: {part['name']: fn}}
else:
element = {element_name: fn}
messages[(message['name'], part['name'])] = element
def get_message(message_name, part_name):
if part_name:
# get the specific part of the message:
return messages.get((message_name, part_name))
else:
# get the first part for the specified message:
for (message_name_key, part_name_key), message in messages.items():
if message_name_key == message_name:
return message
for port_type in wsdl.portType:
port_type_name = port_type['name']
if debug: log.debug("Processing port type %s" % port_type_name)
binding = port_type_bindings[port_type_name]
for operation in port_type.operation:
op_name = operation['name']
op = operations[op_name]
op['documentation'] = unicode(operation('documentation', error=False) or '')
if binding['soap_ver']:
#TODO: separe operation_binding from operation (non SOAP?)
if operation("input", error=False):
input_msg = get_local_name(operation.input['message'])
input_header = op['parts'].get('input_header')
if input_header:
header_msg = get_local_name(input_header.get('message'))
header_part = get_local_name(input_header.get('part'))
# warning: some implementations use a separate message!
header = get_message(header_msg or input_msg, header_part)
else:
header = None # not enought info to search the header message:
op['input'] = get_message(input_msg, op['parts'].get('input_body'))
op['header'] = header
else:
op['input'] = None
op['header'] = None
if operation("output", error=False):
output_msg = get_local_name(operation.output['message'])
op['output'] = get_message(output_msg, op['parts'].get('output_body'))
else:
op['output'] = None
if debug:
import pprint
log.debug(pprint.pformat(services))
# Save parsed wsdl (cache)
if cache:
f = open(filename_pkl, "wb")
pkl = {
'version': __version__.split(" ")[0],
'url': url,
'namespace': self.namespace,
'documentation': self.documentation,
'services': services,
}
pickle.dump(pkl, f)
f.close()
return services
def __setitem__(self, item, value):
"Set SOAP Header value - this header will be sent for every request."
self.__headers[item] = value
def close(self):
"Finish the connection and remove temp files"
self.http.close()
if self.cacert.startswith(tempfile.gettempdir()):
if self.trace: log.info("removing %s" % self.cacert)
os.unlink(self.cacert)
def parse_proxy(proxy_str):
"Parses proxy address user:pass@host:port into a dict suitable for httplib2"
if isinstance(proxy_str, unicode):
proxy_str = proxy_str.encode("utf8")
proxy_dict = {}
if proxy_str is None:
return
if "@" in proxy_str:
user_pass, host_port = proxy_str.split("@")
else:
user_pass, host_port = "", proxy_str
if ":" in host_port:
host, port = host_port.split(":")
proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port)
if ":" in user_pass:
proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":")
return proxy_dict
if __name__ == "__main__":
pass
| gpl-2.0 | -8,982,075,149,803,601,000 | 44.501926 | 129 | 0.513937 | false |
mikaelboman/home-assistant | homeassistant/components/http.py | 2 | 14871 | """This module provides WSGI application to serve the Home Assistant API."""
import hmac
import json
import logging
import mimetypes
import threading
import re
import voluptuous as vol
import homeassistant.core as ha
import homeassistant.remote as rem
from homeassistant import util
from homeassistant.const import (
SERVER_PORT, HTTP_HEADER_HA_AUTH, HTTP_HEADER_CACHE_CONTROL,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS, ALLOWED_CORS_HEADERS)
from homeassistant.helpers.entity import split_entity_id
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
DOMAIN = "http"
REQUIREMENTS = ("eventlet==0.19.0", "static3==0.7.0", "Werkzeug==0.11.5",)
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
CONF_CORS_ORIGINS = 'cors_allowed_origins'
DATA_API_PASSWORD = 'api_password'
_FINGERPRINT = re.compile(r'^(.+)-[a-z0-9]{32}\.(\w+)$', re.IGNORECASE)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_PASSWORD): cv.string,
vol.Optional(CONF_SERVER_HOST): cv.string,
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT):
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
vol.Optional(CONF_DEVELOPMENT): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_KEY): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS): cv.ensure_list
}),
}, extra=vol.ALLOW_EXTRA)
class HideSensitiveFilter(logging.Filter):
"""Filter API password calls."""
# pylint: disable=too-few-public-methods
def __init__(self, hass):
"""Initialize sensitive data filter."""
super().__init__()
self.hass = hass
def filter(self, record):
"""Hide sensitive data in messages."""
if self.hass.wsgi.api_password is None:
return True
record.msg = record.msg.replace(self.hass.wsgi.api_password, '*******')
return True
def setup(hass, config):
"""Set up the HTTP API and debug interface."""
_LOGGER.addFilter(HideSensitiveFilter(hass))
conf = config.get(DOMAIN, {})
api_password = util.convert(conf.get(CONF_API_PASSWORD), str)
server_host = conf.get(CONF_SERVER_HOST, '0.0.0.0')
server_port = conf.get(CONF_SERVER_PORT, SERVER_PORT)
development = str(conf.get(CONF_DEVELOPMENT, "")) == "1"
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
cors_origins = conf.get(CONF_CORS_ORIGINS, [])
server = HomeAssistantWSGI(
hass,
development=development,
server_host=server_host,
server_port=server_port,
api_password=api_password,
ssl_certificate=ssl_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins
)
hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True,
name='WSGI-server').start())
hass.wsgi = server
hass.config.api = rem.API(server_host if server_host != '0.0.0.0'
else util.get_local_ip(),
api_password, server_port,
ssl_certificate is not None)
return True
def request_class():
"""Generate request class.
Done in method because of imports.
"""
from werkzeug.exceptions import BadRequest
from werkzeug.wrappers import BaseRequest, AcceptMixin
from werkzeug.utils import cached_property
class Request(BaseRequest, AcceptMixin):
"""Base class for incoming requests."""
@cached_property
def json(self):
"""Get the result of json.loads if possible."""
if not self.data:
return None
# elif 'json' not in self.environ.get('CONTENT_TYPE', ''):
# raise BadRequest('Not a JSON request')
try:
return json.loads(self.data.decode(
self.charset, self.encoding_errors))
except (TypeError, ValueError):
raise BadRequest('Unable to read JSON request')
return Request
def routing_map(hass):
"""Generate empty routing map with HA validators."""
from werkzeug.routing import Map, BaseConverter, ValidationError
class EntityValidator(BaseConverter):
"""Validate entity_id in urls."""
regex = r"(\w+)\.(\w+)"
def __init__(self, url_map, exist=True, domain=None):
"""Initilalize entity validator."""
super().__init__(url_map)
self._exist = exist
self._domain = domain
def to_python(self, value):
"""Validate entity id."""
if self._exist and hass.states.get(value) is None:
raise ValidationError()
if self._domain is not None and \
split_entity_id(value)[0] != self._domain:
raise ValidationError()
return value
def to_url(self, value):
"""Convert entity_id for a url."""
return value
class DateValidator(BaseConverter):
"""Validate dates in urls."""
regex = r'\d{4}-\d{1,2}-\d{1,2}'
def to_python(self, value):
"""Validate and convert date."""
parsed = dt_util.parse_date(value)
if parsed is None:
raise ValidationError()
return parsed
def to_url(self, value):
"""Convert date to url value."""
return value.isoformat()
return Map(converters={
'entity': EntityValidator,
'date': DateValidator,
})
class HomeAssistantWSGI(object):
"""WSGI server for Home Assistant."""
# pylint: disable=too-many-instance-attributes, too-many-locals
# pylint: disable=too-many-arguments
def __init__(self, hass, development, api_password, ssl_certificate,
ssl_key, server_host, server_port, cors_origins):
"""Initilalize the WSGI Home Assistant server."""
from werkzeug.wrappers import Response
Response.mimetype = 'text/html'
# pylint: disable=invalid-name
self.Request = request_class()
self.url_map = routing_map(hass)
self.views = {}
self.hass = hass
self.extra_apps = {}
self.development = development
self.api_password = api_password
self.ssl_certificate = ssl_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self.cors_origins = cors_origins
self.event_forwarder = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
from werkzeug.routing import Rule
if view.name in self.views:
_LOGGER.warning("View '%s' is being overwritten", view.name)
if isinstance(view, type):
# Instantiate the view, if needed
view = view(self.hass)
self.views[view.name] = view
rule = Rule(view.url, endpoint=view.name)
self.url_map.add(rule)
for url in view.extra_urls:
rule = Rule(url, endpoint=view.name)
self.url_map.add(rule)
def register_redirect(self, url, redirect_to):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
from werkzeug.routing import Rule
self.url_map.add(Rule(url, redirect_to=redirect_to))
def register_static_path(self, url_root, path, cache_length=31):
"""Register a folder to serve as a static path.
Specify optional cache length of asset in days.
"""
from static import Cling
headers = []
if cache_length and not self.development:
# 1 year in seconds
cache_time = cache_length * 86400
headers.append({
'prefix': '',
HTTP_HEADER_CACHE_CONTROL:
"public, max-age={}".format(cache_time)
})
self.register_wsgi_app(url_root, Cling(path, headers=headers))
def register_wsgi_app(self, url_root, app):
"""Register a path to serve a WSGI app."""
if url_root in self.extra_apps:
_LOGGER.warning("Url root '%s' is being overwritten", url_root)
self.extra_apps[url_root] = app
def start(self):
"""Start the wsgi server."""
from eventlet import wsgi
import eventlet
sock = eventlet.listen((self.server_host, self.server_port))
if self.ssl_certificate:
sock = eventlet.wrap_ssl(sock, certfile=self.ssl_certificate,
keyfile=self.ssl_key, server_side=True)
wsgi.server(sock, self, log=_LOGGER)
def dispatch_request(self, request):
"""Handle incoming request."""
from werkzeug.exceptions import (
MethodNotAllowed, NotFound, BadRequest, Unauthorized,
)
from werkzeug.routing import RequestRedirect
with request:
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return self.views[endpoint].handle_request(request, **values)
except RequestRedirect as ex:
return ex
except (BadRequest, NotFound, MethodNotAllowed,
Unauthorized) as ex:
resp = ex.get_response(request.environ)
if request.accept_mimetypes.accept_json:
resp.data = json.dumps({
"result": "error",
"message": str(ex),
})
resp.mimetype = "application/json"
return resp
def base_app(self, environ, start_response):
"""WSGI Handler of requests to base app."""
request = self.Request(environ)
response = self.dispatch_request(request)
if self.cors_origins:
cors_check = (environ.get("HTTP_ORIGIN") in self.cors_origins)
cors_headers = ", ".join(ALLOWED_CORS_HEADERS)
if cors_check:
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN] = \
environ.get("HTTP_ORIGIN")
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS] = \
cors_headers
return response(environ, start_response)
def __call__(self, environ, start_response):
"""Handle a request for base app + extra apps."""
from werkzeug.wsgi import DispatcherMiddleware
app = DispatcherMiddleware(self.base_app, self.extra_apps)
# Strip out any cachebusting MD5 fingerprints
fingerprinted = _FINGERPRINT.match(environ.get('PATH_INFO', ''))
if fingerprinted:
environ['PATH_INFO'] = "{}.{}".format(*fingerprinted.groups())
return app(environ, start_response)
class HomeAssistantView(object):
"""Base view for all views."""
extra_urls = []
requires_auth = True # Views inheriting from this class can override this
def __init__(self, hass):
"""Initilalize the base view."""
from werkzeug.wrappers import Response
if not hasattr(self, 'url'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "url"'.format(class_name)
)
if not hasattr(self, 'name'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "name"'.format(class_name)
)
self.hass = hass
# pylint: disable=invalid-name
self.Response = Response
def handle_request(self, request, **values):
"""Handle request to url."""
from werkzeug.exceptions import MethodNotAllowed, Unauthorized
try:
handler = getattr(self, request.method.lower())
except AttributeError:
raise MethodNotAllowed
# Auth code verbose on purpose
authenticated = False
if self.hass.wsgi.api_password is None:
authenticated = True
elif hmac.compare_digest(request.headers.get(HTTP_HEADER_HA_AUTH, ''),
self.hass.wsgi.api_password):
# A valid auth header has been set
authenticated = True
elif hmac.compare_digest(request.args.get(DATA_API_PASSWORD, ''),
self.hass.wsgi.api_password):
authenticated = True
if self.requires_auth and not authenticated:
raise Unauthorized()
request.authenticated = authenticated
result = handler(request, **values)
if isinstance(result, self.Response):
# The method handler returned a ready-made Response, how nice of it
return result
status_code = 200
if isinstance(result, tuple):
result, status_code = result
return self.Response(result, status=status_code)
def json(self, result, status_code=200):
"""Return a JSON response."""
msg = json.dumps(
result,
sort_keys=True,
cls=rem.JSONEncoder
).encode('UTF-8')
return self.Response(msg, mimetype="application/json",
status=status_code)
def json_message(self, error, status_code=200):
"""Return a JSON message response."""
return self.json({'message': error}, status_code)
def file(self, request, fil, mimetype=None):
"""Return a file."""
from werkzeug.wsgi import wrap_file
from werkzeug.exceptions import NotFound
if isinstance(fil, str):
if mimetype is None:
mimetype = mimetypes.guess_type(fil)[0]
try:
fil = open(fil)
except IOError:
raise NotFound()
return self.Response(wrap_file(request.environ, fil),
mimetype=mimetype, direct_passthrough=True)
| mit | 863,876,997,045,437,600 | 32.417978 | 79 | 0.595253 | false |
gragas/RSSS-toolkit | gpsbro/gpsbro/geonet/rinex.py | 2 | 3079 | import sys
import random
import datetime
import requests
import urllib.request
from xml.etree import ElementTree
MET = 0b0001
NAV = 0b0010
OBS = 0b0100
QC = 0b1000
DEFAULT_MASK = MET | OBS | QC
def get_URLs_on(date, mask=None):
mask = mask if mask is not None else DEFAULT_MASK
day_string = str(date.timetuple().tm_yday)
while len(day_string) < 3:
day_string = "0" + day_string
date_url = date.strftime("%Y/") + day_string + "/"
URL = "ftp://ftp.geonet.org.nz/gps/rinex/"
mZ = list()
nZ = list()
dZ = list()
qc = list()
with urllib.request.urlopen(URL + date_url) as response:
data = response.read().decode("utf-8")
for line in data.split("\n"):
if not line.strip().split():
continue
string = line.strip().split()[-1]
if len(string) > 3:
if string[-3:] == "m.Z":
mZ.append(URL + date_url + string)
elif string[-3:] == "n.Z":
nZ.append(URL + date_url + string)
elif string[-3:] == "d.Z":
dZ.append(URL + date_url + string)
elif string[-3:] == ".qc":
qc.append(URL + date_url + string)
maximum_len = max(len(mZ), len(nZ), len(dZ), len(qc))
mZ.extend([None]*(maximum_len - len(mZ)))
nZ.extend([None]*(maximum_len - len(nZ)))
dZ.extend([None]*(maximum_len - len(dZ)))
qc.extend([None]*(maximum_len - len(qc)))
base = list()
if mask & MET:
base = mZ
if mask & NAV:
if not len(base):
base = nZ
else:
for indx, e in enumerate(base):
if type(e) is list:
base[indx] = e + [nZ[indx]]
else:
base[indx] = [e] + [nZ[indx]]
if mask & OBS:
if not len(base):
base = dZ
else:
for indx, e in enumerate(base):
if type(e) is list:
base[indx] = e + [dz[indx]]
else:
base[indx] = [e] + [dZ[indx]]
if mask & QC:
if not len(base):
base = qc
else:
for indx, e in enumerate(base):
if type(e) is list:
base[indx] = e + [qc[indx]]
else:
base[indx] = [e] + [qc[indx]]
return base
def get_URLs_within(start_date, end_date, mask=None, sample_size=None):
mask = mask if mask is not None else DEFAULT_MASK
domain = [end_date - datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]
if sample_size is not None:
assert(type(sample_size) is int)
assert(sample_size <= (end_date - start_date).days + 1)
domain = random.sample(domain, sample_size)
dates = dict()
for date in domain:
try:
dates[date] = get_URLs_on(date, mask)
except Exception as e:
print("Could not find URL for {0}".format(date.strftime("%Y-%m-%d")))
print(e)
return dates
| mit | 2,430,859,960,251,099,600 | 32.107527 | 101 | 0.500812 | false |
immenz/pyload | module/web/json_app.py | 40 | 8697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join
from traceback import print_exc
from shutil import copyfileobj
from bottle import route, request, HTTPError
from webinterface import PYLOAD
from utils import login_required, render_to_response, toDict
from module.utils import decode, formatSize
def format_time(seconds):
seconds = int(seconds)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
def get_sort_key(item):
return item["order"]
@route("/json/status")
@route("/json/status", method="POST")
@login_required('LIST')
def status():
try:
status = toDict(PYLOAD.statusServer())
status['captcha'] = PYLOAD.isCaptchaWaiting()
return status
except:
return HTTPError()
@route("/json/links")
@route("/json/links", method="POST")
@login_required('LIST')
def links():
try:
links = [toDict(x) for x in PYLOAD.statusDownloads()]
ids = []
for link in links:
ids.append(link['fid'])
if link['status'] == 12:
link['info'] = "%s @ %s/s" % (link['format_eta'], formatSize(link['speed']))
elif link['status'] == 5:
link['percent'] = 0
link['size'] = 0
link['bleft'] = 0
link['info'] = _("waiting %s") % link['format_wait']
else:
link['info'] = ""
data = {'links': links, 'ids': ids}
return data
except Exception, e:
print_exc()
return HTTPError()
@route("/json/packages")
@login_required('LIST')
def packages():
print "/json/packages"
try:
data = PYLOAD.getQueue()
for package in data:
package['links'] = []
for file in PYLOAD.get_package_files(package['id']):
package['links'].append(PYLOAD.get_file_info(file))
return data
except:
return HTTPError()
@route("/json/package/<id:int>")
@login_required('LIST')
def package(id):
try:
data = toDict(PYLOAD.getPackageData(id))
data["links"] = [toDict(x) for x in data["links"]]
for pyfile in data["links"]:
if pyfile["status"] == 0:
pyfile["icon"] = "status_finished.png"
elif pyfile["status"] in (2, 3):
pyfile["icon"] = "status_queue.png"
elif pyfile["status"] in (9, 1):
pyfile["icon"] = "status_offline.png"
elif pyfile["status"] == 5:
pyfile["icon"] = "status_waiting.png"
elif pyfile["status"] == 8:
pyfile["icon"] = "status_failed.png"
elif pyfile["status"] == 4:
pyfile["icon"] = "arrow_right.png"
elif pyfile["status"] in (11, 13):
pyfile["icon"] = "status_proc.png"
else:
pyfile["icon"] = "status_downloading.png"
tmp = data["links"]
tmp.sort(key=get_sort_key)
data["links"] = tmp
return data
except:
print_exc()
return HTTPError()
@route("/json/package_order/:ids")
@login_required('ADD')
def package_order(ids):
try:
pid, pos = ids.split("|")
PYLOAD.orderPackage(int(pid), int(pos))
return {"response": "success"}
except:
return HTTPError()
@route("/json/abort_link/<id:int>")
@login_required('DELETE')
def abort_link(id):
try:
PYLOAD.stopDownloads([id])
return {"response": "success"}
except:
return HTTPError()
@route("/json/link_order/:ids")
@login_required('ADD')
def link_order(ids):
try:
pid, pos = ids.split("|")
PYLOAD.orderFile(int(pid), int(pos))
return {"response": "success"}
except:
return HTTPError()
@route("/json/add_package")
@route("/json/add_package", method="POST")
@login_required('ADD')
def add_package():
name = request.forms.get("add_name", "New Package").strip()
queue = int(request.forms['add_dest'])
links = decode(request.forms['add_links'])
links = links.split("\n")
pw = request.forms.get("add_password", "").strip("\n\r")
try:
f = request.files['add_file']
if not name or name == "New Package":
name = f.name
fpath = join(PYLOAD.getConfigValue("general", "download_folder"), "tmp_" + f.filename)
destination = open(fpath, 'wb')
copyfileobj(f.file, destination)
destination.close()
links.insert(0, fpath)
except:
pass
name = name.decode("utf8", "ignore")
links = map(lambda x: x.strip(), links)
links = filter(lambda x: x != "", links)
pack = PYLOAD.addPackage(name, links, queue)
if pw:
pw = pw.decode("utf8", "ignore")
data = {"password": pw}
PYLOAD.setPackageData(pack, data)
@route("/json/move_package/<dest:int>/<id:int>")
@login_required('MODIFY')
def move_package(dest, id):
try:
PYLOAD.movePackage(dest, id)
return {"response": "success"}
except:
return HTTPError()
@route("/json/edit_package", method="POST")
@login_required('MODIFY')
def edit_package():
try:
id = int(request.forms.get("pack_id"))
data = {"name": request.forms.get("pack_name").decode("utf8", "ignore"),
"folder": request.forms.get("pack_folder").decode("utf8", "ignore"),
"password": request.forms.get("pack_pws").decode("utf8", "ignore")}
PYLOAD.setPackageData(id, data)
return {"response": "success"}
except:
return HTTPError()
@route("/json/set_captcha")
@route("/json/set_captcha", method="POST")
@login_required('ADD')
def set_captcha():
if request.environ.get('REQUEST_METHOD', "GET") == "POST":
try:
PYLOAD.setCaptchaResult(request.forms["cap_id"], request.forms["cap_result"])
except:
pass
task = PYLOAD.getCaptchaTask()
if task.tid >= 0:
src = "data:image/%s;base64,%s" % (task.type, task.data)
return {'captcha': True, 'id': task.tid, 'src': src, 'result_type' : task.resultType}
else:
return {'captcha': False}
@route("/json/load_config/:category/:section")
@login_required("SETTINGS")
def load_config(category, section):
conf = None
if category == "general":
conf = PYLOAD.getConfigDict()
elif category == "plugin":
conf = PYLOAD.getPluginConfigDict()
for key, option in conf[section].iteritems():
if key in ("desc","outline"): continue
if ";" in option["type"]:
option["list"] = option["type"].split(";")
option["value"] = decode(option["value"])
return render_to_response("settings_item.html", {"skey": section, "section": conf[section]})
@route("/json/save_config/:category", method="POST")
@login_required("SETTINGS")
def save_config(category):
for key, value in request.POST.iteritems():
try:
section, option = key.split("|")
except:
continue
if category == "general": category = "core"
PYLOAD.setConfigValue(section, option, decode(value), category)
@route("/json/add_account", method="POST")
@login_required("ACCOUNTS")
def add_account():
login = request.POST["account_login"]
password = request.POST["account_password"]
type = request.POST["account_type"]
PYLOAD.updateAccount(type, login, password)
@route("/json/update_accounts", method="POST")
@login_required("ACCOUNTS")
def update_accounts():
deleted = [] #dont update deleted accs or they will be created again
for name, value in request.POST.iteritems():
value = value.strip()
if not value: continue
tmp, user = name.split(";")
plugin, action = tmp.split("|")
if (plugin, user) in deleted: continue
if action == "password":
PYLOAD.updateAccount(plugin, user, value)
elif action == "time" and "-" in value:
PYLOAD.updateAccount(plugin, user, options={"time": [value]})
elif action == "limitdl" and value.isdigit():
PYLOAD.updateAccount(plugin, user, options={"limitDL": [value]})
elif action == "delete":
deleted.append((plugin,user))
PYLOAD.removeAccount(plugin, user)
@route("/json/change_password", method="POST")
def change_password():
user = request.POST["user_login"]
oldpw = request.POST["login_current_password"]
newpw = request.POST["login_new_password"]
if not PYLOAD.changePassword(user, oldpw, newpw):
print "Wrong password"
return HTTPError()
| gpl-3.0 | 872,937,739,479,280,600 | 26.875 | 96 | 0.57767 | false |
mitsuhiko/lektor | lektor/devcli.py | 3 | 3718 | import os
import sys
import click
from .packages import get_package_info, register_package, publish_package
from .cli import pass_context
def ensure_plugin():
here = os.getcwd()
if not os.path.isfile(os.path.join(here, 'setup.py')):
raise click.UsageError('This command must be run in a '
'Lektor plugin folder')
info = get_package_info(here)
if not info['name'].lower().startswith('lektor-'):
raise click.UsageError('Python package is misnamed. Needs to start '
'with lektor-')
return info
@click.group(short_help='Development commands.')
def cli():
"""Development commands for Lektor.
This provides various development support commands for Lektor. This is
primarily useful for Lektor plugin development but also if you want to
extend Lektor itself. Additional functionality can be unlocked by
exporting the `LEKTOR_DEV=1` environment variable.
"""
@cli.command('shell', short_help='Starts a python shell.')
@pass_context
def shell_cmd(ctx):
"""Starts a Python shell in the context of a Lektor project.
This is particularly useful for debugging plugins and to explore the
API. To quit the shell just use `quit()`. Within the shell various
utilities are available right from the get-go for you.
\b
- `project`: the loaded project as object.
- `env`: an environment for the loaded project.
- `pad`: a database pad initialized for the project and environment
that is ready to use.
"""
ctx.load_plugins()
import code
from lektor.db import F, Tree
from lektor.builder import Builder
banner = 'Python %s on %s\nLektor Project: %s' % (
sys.version,
sys.platform,
ctx.get_env().root_path,
)
ns = {}
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ns)
pad = ctx.get_env().new_pad()
ns.update(
project=ctx.get_project(),
env=ctx.get_env(),
pad=pad,
tree=Tree(pad),
config=ctx.get_env().load_config(),
make_builder=lambda: Builder(ctx.get_env().new_pad(),
ctx.get_default_output_path()),
F=F
)
code.interact(banner=banner, local=ns)
@cli.command('publish-plugin', short_help='Publish a plugin to PyPI.')
def publish_plugin_cmd():
"""Publishes the current version of the plugin in the current folder.
This generally requires that your setup.py has at least the bare minimum
configuration for valid publishing to PyPI.
"""
info = ensure_plugin()
for key in 'author', 'author_email', 'license', 'url':
if not info[key]:
raise click.UsageError('Cannot publish plugin without setting '
'"%s" in setup.py.' % key)
register_package(info['path'])
publish_package(info['path'])
@cli.command('new-plugin', short_help='Creates a new plugin')
@click.option('--path', type=click.Path(), help='The destination path')
@click.argument('plugin_name', required=False)
@pass_context
def new_plugin(ctx, **defaults):
"""This command creates a new plugin.
This will present you with a very short wizard that guides you through
creation of a new plugin. At the end of it, it will create a plugin
in the packages folder of the current project or the path you defined.
This is the fastest way to creating a new plugin.
"""
from .quickstart import plugin_quickstart
project = ctx.get_project(silent=True)
plugin_quickstart(defaults, project=project)
| bsd-3-clause | 1,449,175,397,260,104,700 | 34.075472 | 76 | 0.648467 | false |
thp44/delphin_6_automation | data_process/wp6_run/not_in_sample.py | 1 | 1275 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
sample = sample_entry.Sample.objects().only('delphin_docs')
sample_projects = [delphin.id
for s in sample
for delphin in s.delphin_docs]
print(f'There is {len(sample_projects)} connected to a sample')
projects = delphin_entry.Delphin.objects().only('id')
print(f'There are currently {len(projects)} projects in the database')
print('Starting')
for proj in projects:
if proj.id not in sample_projects:
#print(f'Project with ID: {proj.id} is not part of a sample!')
proj.delete()
mongo_setup.global_end_ssh(server)
| mit | -887,571,754,433,448,200 | 30.875 | 120 | 0.586667 | false |
Weihonghao/ECM | ECM_model.py | 1 | 22809 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import inspect
import numpy as np
import tensorflow as tf
import copy
import logging
import preprocess_data
import tensorflow as tf
import numpy as np
class ECMModel(object):
def __init__(self, embeddings, id2word, config, forward_only=False):
magic_number = 256
assert (magic_number%2 == 0)
self.embeddings = tf.cast(embeddings, dtype=tf.float32)
# self.vocab_label = vocab_label # label for vocab
# self.emotion_label = emotion_label # label for emotion
self.config = config
self.batch_size = config.batch_size
#print("batch size", self.batch_size)
self.vocab_size = config.vocab_size
self.non_emotion_size = config.non_emotion_size
self.emotion_size = self.vocab_size - self.non_emotion_size
self.id2word = id2word
self.forward_only = forward_only
'''if (self.config.vocab_size % 2 == 1):
self.decoder_state_size = config.vocab_size + 1
print (len(self.id2word))
id2word.append('NULL')
else:
self.decoder_state_size = config.vocab_size'''
self.decoder_state_size = magic_number
self.encoder_state_size = int(self.decoder_state_size / 2)
self.pad_step_embedded = tf.zeros([self.batch_size, self.decoder_state_size * 2 + config.embedding_size])
self.go_step_embedded = tf.ones([self.batch_size, self.decoder_state_size * 2 + config.embedding_size])
self.emotion_kind = 6
self.GO_id = 1
self.pad_id = 0
self.IM_size = 256
self.eps = 1e-5
#with tf.variable_scope("reuse_sel_internalMemory") as scope:
#scope.reuse_variables()
self.internalMemory = tf.get_variable("IMFuck", shape=[self.emotion_kind, self.IM_size],
initializer=tf.contrib.layers.xavier_initializer())
self.vu = tf.get_variable("vu", shape=[self.decoder_state_size, 1], initializer=tf.contrib.layers.xavier_initializer())
# self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
self.question = tf.placeholder(tf.int32, shape=[None, None], name='question')
self.question_len = tf.placeholder(tf.int32, shape=[None], name='question_len')
if not self.forward_only:
self.answer = tf.placeholder(tf.int32, shape=[None, None], name='answer')
self.answer_len = tf.placeholder(tf.int32, shape=[None], name='answer_len')
self.LA = tf.placeholder(dtype=tf.int32, name='LA', shape=()) # batch
self.emotion_tag = tf.placeholder(tf.int32, shape=[None], name='emotion_tag')
self.dropout_placeholder = tf.placeholder(dtype=tf.float32, name="dropout", shape=())
self.LQ = tf.placeholder(dtype=tf.int32, name='LQ', shape=()) # batch
with tf.variable_scope("ecm", initializer=tf.contrib.layers.xavier_initializer()):
self.setup_embeddings()
self.setup_system()
self.merged_all = tf.summary.merge_all()
def setup_embeddings(self):
with tf.variable_scope("embeddings"):
if self.config.retrain_embeddings: # whether to cotrain word embedding
embeddings = tf.Variable(self.embeddings, name="Emb", dtype=tf.float32)
else:
embeddings = tf.cast(self.embeddings, tf.float32)
question_embeddings = tf.nn.embedding_lookup(embeddings, self.question)
self.q = tf.reshape(question_embeddings, shape=[-1, self.LQ, self.config.embedding_size])
answer_embeddings = tf.nn.embedding_lookup(embeddings, self.answer)
self.a = tf.reshape(answer_embeddings, shape=[-1, self.LA, self.config.embedding_size])
def encode(self, inputs, sequence_length, encoder_state_input, dropout=1.0):
"""
In a generalized encode function, you pass in your inputs,
sequence_length, and an initial hidden state input into this function.
:param inputs: Symbolic representations of your input (padded all to the same length)
:param mask: mask of the sequence
:param encoder_state_input: (Optional) pass this as initial hidden state
to tf.nn.dynamic_rnn to build conditional representations
:return: an encoded representation of your input.
It can be context-level representation, word-level representation,
or both.
"""
logging.debug('-' * 5 + 'encode' + '-' * 5)
# Forward direction cell
lstm_fw_cell = tf.contrib.rnn.LSTMCell(self.encoder_state_size, state_is_tuple=True)
# Backward direction cell
lstm_bw_cell = tf.contrib.rnn.LSTMCell(self.encoder_state_size, state_is_tuple=True)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell, input_keep_prob=dropout)
lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell, input_keep_prob=dropout)
initial_state_fw = None
initial_state_bw = None
if encoder_state_input is not None:
initial_state_fw, initial_state_bw = encoder_state_input
logging.debug('sequence_length: %s' % str(sequence_length))
logging.debug('Inputs: %s' % str(inputs))
# Get lstm cell output
print(inputs.get_shape())
(outputs_fw, outputs_bw), (final_state_fw, final_state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=inputs,
sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=tf.float32)
# Concatinate forward and backword hidden output vectors.
# each vector is of size [batch_size, sequence_length, encoder_state_size]
logging.debug('fw hidden state: %s' % str(outputs_fw))
hidden_state = tf.concat([outputs_fw, outputs_bw], 2)
logging.debug('Concatenated bi-LSTM hidden state: %s' % str(hidden_state))
# final_state_fw and final_state_bw are the final states of the forwards/backwards LSTM
print("encode output ", final_state_fw[1].get_shape())
concat_final_state = tf.concat([final_state_fw[1], final_state_bw[1]], 1)
logging.debug('Concatenated bi-LSTM final hidden state: %s' % str(concat_final_state))
return hidden_state, concat_final_state
def decode(self, encoder_outputs, encoder_final_state, decoder_length):
print('decode start')
# initialize first decode state
def loop_fn_initial():
initial_elements_finished = (0 >= decoder_length) # all False at the initial step
#GO_emb = tf.ones([self.batch_size], dtype=tf.int32, name='GO')
initial_input = self.go_step_embedded#tf.nn.embedding_lookup(self.embeddings, GO_emb)
initial_cell_state = encoder_final_state
initial_cell_output = None
initial_loop_state = self.internalMemory # we don't need to pass any additional information
print('before return initial')
logging.debug('initial_elements_finished: %s' % str(initial_elements_finished))
logging.debug('initial_input: %s' % str(initial_input))
logging.debug('initial_cell_state: %s' % str(initial_cell_state))
logging.debug('initial_cell_output: %s' % str(initial_cell_output))
logging.debug('initial_loop_state: %s' % str(initial_loop_state))
return (initial_elements_finished,
initial_input,
initial_cell_state,
initial_cell_output,
initial_loop_state)
def loop_fn_transition(time, previous_output, previous_state, previous_loop_state):
# get next state
print('in trans')
def get_next_input():
print('in get next input')
'''write_gate = tf.sigmoid(tf.layers.dense(previous_state, self.IM_size, name="write_gate"))
eps_matrix = self.eps * tf.ones_like(write_gate)
eps_write_gate = tf.log(eps_matrix + write_gate)
write_one_hot = tf.one_hot(indices=self.emotion_tag, depth=self.emotion_kind)
write_one_hot_transpose = tf.transpose(write_one_hot)
tmpFuck = tf.sign(tf.reshape(tf.reduce_sum(write_one_hot_transpose,axis=1),[self.emotion_kind,1]))
logging.debug('Before: %s' % str(tmpFuck))
new_internalMemory = previous_loop_state * (1- tmpFuck)
logging.debug('new_internalMemory: %s' % str(new_internalMemory))
tmpFuck2 = tf.matmul(write_one_hot_transpose, eps_write_gate)
logging.debug('TmpFuck2: %s' % str(tmpFuck2))
new_internalMemory += tf.exp(tmpFuck)
logging.debug('new_internalMemory: %s' % str(new_internalMemory))
assert new_internalMemory.get_shape().as_list() == previous_loop_state.get_shape().as_list()
#previous_loop_state = new_internalMemory
previous_loop_state = new_internalMemory
logging.debug('after: %s' % "fuck")'''
tmp_id, _ = self.external_memory_function(previous_output)
previous_output_id = tmp_id#tf.reshape(self.external_memory_function(previous_output), [self.batch_size])
previous_output_vector = tf.nn.embedding_lookup(self.embeddings, previous_output_id)
score = attention_mechanism(previous_state)
weights = tf.nn.softmax(score)
print("here")
weights = tf.reshape(weights, [tf.shape(weights)[0], 1, tf.shape(weights)[1]])
logging.debug('weights: %s' % str(weights))
logging.debug('attention_mechanism.values: %s' % str(attention_mechanism.values))
context = tf.matmul(weights, attention_mechanism.values)
logging.debug('context: %s' % str(context))
context = tf.reshape(context, [-1, context.get_shape().as_list()[2]])
print("here1")
logging.debug('previous_output_vector: %s' % str(previous_output_vector))
logging.debug('context: %s' % str(context))
attention = tf.layers.dense(inputs=tf.concat([previous_output_vector, context], 1), units=self.IM_size)
read_gate = tf.sigmoid(attention, name="read_gate")
logging.debug('read_gate: %s' % str(read_gate))
read_gate_output = tf.nn.embedding_lookup(self.internalMemory,self.emotion_tag)
logging.debug('gate output: %s' % str(read_gate_output))
next_input = tf.concat(
[context, previous_output_vector, read_gate_output], 1)
logging.debug('next_input: %s' % str(next_input))
return next_input
elements_finished = (time >= decoder_length) # this operation produces boolean tensor of [batch_size]
# defining if corresponding sequence has ended
finished = tf.reduce_all(elements_finished) # -> boolean scalar
#pad_step_embedded = tf.nn.embedding_lookup(self.embeddings, self.pad_id) ## undefined
pad_step_embedded = self.pad_step_embedded
logging.debug('finished: %s' % str(finished))
logging.debug('pad_step_embedded: %s' % str(pad_step_embedded))
if previous_state is not None:
write_gate = tf.sigmoid(tf.layers.dense(previous_state, self.IM_size, name="write_gate"))
eps_matrix = self.eps * tf.ones_like(write_gate)
eps_write_gate = tf.log(eps_matrix + write_gate)
write_one_hot = tf.one_hot(indices=self.emotion_tag, depth=self.emotion_kind)
write_one_hot_transpose = tf.transpose(write_one_hot)
tmpFuck = tf.sign(tf.reshape(tf.reduce_sum(write_one_hot_transpose,axis=1),[self.emotion_kind,1]))
logging.debug('Before: %s' % str(tmpFuck))
new_internalMemory = previous_loop_state * (1- tmpFuck)
logging.debug('new_internalMemory: %s' % str(new_internalMemory))
tmpFuck2 = tf.matmul(write_one_hot_transpose, eps_write_gate)
logging.debug('TmpFuck2: %s' % str(tmpFuck2))
new_internalMemory += tf.exp(tmpFuck)
logging.debug('new_internalMemory: %s' % str(new_internalMemory))
assert new_internalMemory.get_shape().as_list() == previous_loop_state.get_shape().as_list()
previous_loop_state = new_internalMemory
logging.debug('after: %s' % "fuck")
inputNow = tf.cond(finished, lambda : pad_step_embedded , get_next_input)
#loop_state = tf.cond(finished, None, previous_loop_state)
logging.debug('inputNow: %s' % str(inputNow))
logging.debug('previous_state: %s' % str(previous_state))
loop_state = previous_loop_state
output = previous_output
state = previous_state
#output, state = decode_cell(inputNow, previous_state)
#write_gate = tf.sigmoid(tf.layers.dense(state, self.IM_size, name="write_gate"))
#change_IM = tf.nn.embedding_lookup(self.internalMemory,self.emotion_tag)
#change_IM = change_IM * write_gate
return (elements_finished,
inputNow,
state,
output,
loop_state)
def loop_fn(time, previous_output, previous_state, previous_loop_state):
if previous_state is None: # time == 0
assert previous_output is None and previous_state is None
print("initialii******")
return loop_fn_initial()
else:
print("trainsition******")
return loop_fn_transition(time, previous_output, previous_state, previous_loop_state)
decode_cell = tf.contrib.rnn.GRUCell(self.decoder_state_size)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.decoder_state_size, encoder_outputs)
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decode_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))#decoder_outputs.get_shape().as_list()#tf.unstack(tf.shape(decoder_outputs))
#assert (decoder_batch_size.as_list()[0] == self.batch_size)
#assert (decoder_dim.as_list()[0] == self.decoder_state_size)
decoder_outputs_reshape = tf.reshape(decoder_outputs, [decoder_batch_size,decoder_max_steps , decoder_dim])
return decoder_outputs_reshape, decoder_final_state
def external_memory_function(self, decode_state): # decode_output, shape[batch_size,vocab_size]
print('flag1')
#decode_output = tf.reshape(in_decode_output, [self.batch_size,-1,self.decoder_state_size])
gto = tf.sigmoid(tf.reduce_sum(tf.matmul(decode_state, self.vu)))
print('flag2')
emotion_num = self.emotion_size
decode_output = tf.layers.dense(decode_state, self.vocab_size, name="state2output")
print('flag3')
arg = tf.argmax(tf.concat([gto * decode_output[:,:emotion_num], (1 - gto) * decode_output[:, emotion_num:]],
1), axis=1) # [batch_size,1]
logging.debug('arg: %s' % str(arg))
return arg, decode_output
def create_feed_dict(self, question_batch, question_len_batch, emotion_tag_batch, answer_batch=None,
answer_len_batch=None, is_train=True):
feed_dict = {}
LQ = np.max(question_len_batch)
def add_paddings(sentence, max_length):
pad_len = max_length - len(sentence)
if pad_len > 0:
padded_sentence = sentence + [0] * pad_len
else:
padded_sentence = sentence[:max_length]
return padded_sentence
def padding_batch(data, max_len):
padded_data = []
for sentence in data:
d = add_paddings(sentence, max_len)
padded_data.append(d)
return padded_data
feed_dict[self.question_len] = question_len_batch
feed_dict[self.LQ] = LQ
feed_dict[self.emotion_tag] = emotion_tag_batch
padded_question = padding_batch(question_batch, LQ)
print("padding question size ", np.array(padded_question).shape)
feed_dict[self.question] = padded_question
if not self.forward_only:
assert answer_batch is not None
assert answer_len_batch is not None
LA = np.max(answer_len_batch)
padded_answer = padding_batch(answer_batch, LA)
feed_dict[self.answer] = padded_answer
feed_dict[self.answer_len] = answer_len_batch
feed_dict[self.LA] = LA
if is_train:
feed_dict[self.dropout_placeholder] = 0.8
else:
feed_dict[self.dropout_placeholder] = 1.0
return feed_dict
def setup_system(self):
def emotion_distribution(decode_outputs_ids):
return tf.cast((decode_outputs_ids < (self.emotion_size)), dtype=tf.int64)
def loss(results, final_IM):
#logging.debug('logits: %s' % str(results))
logging.debug('labels: %s' % str(self.answer))
#answer_all = tf.reshape(self.a, [-1,self.config.embedding_size])
answer_one_hot = tf.one_hot(indices= self.answer, depth= self.vocab_size, on_value= 1, off_value=0,axis=-1)#, dtype=tf.float32)
answer_one_hot = tf.cast(answer_one_hot, dtype=tf.float32)
answer_one_hot = tf.reshape(answer_one_hot,[-1, self.vocab_size])
#results = tf.reshape(results, [-1,results.get_shape().as_list()[2]])
#results = tf.cast(self.external_memory_function(results), dtype=tf.float32)
EM_ids, EM_output = self.external_memory_function(tf.reshape(results,[-1,self.decoder_state_size]))
EM_ids = tf.reshape(EM_ids,[self.batch_size,-1])
#EM_output = tf.reshape(EM_output,[self.batch_size,-1, self.vocab_size])
logging.debug('logits: %s' % str(EM_output))
logging.debug('labels: %s' % str(answer_one_hot))
logging.debug('EM_ID: %s' % str(EM_ids))
tmp = tf.nn.softmax_cross_entropy_with_logits(logits=EM_output, labels=answer_one_hot)
logging.debug('tmp loss 1: %s' % str(tmp))
loss = tf.reduce_sum(tmp) # self.vocab_label)
print("loss 1 ptint ", loss)
emotion_label = tf.cast((self.answer < (self.emotion_size)), dtype=tf.float32)
emotion_logit = tf.cast((EM_ids < (self.emotion_size)), dtype=tf.float32)
logging.debug('emotion logits: %s' % str(emotion_logit))
logging.debug('emotion labels: %s' % str(emotion_label))
tmp = tf.nn.softmax_cross_entropy_with_logits(logits=tf.cast(emotion_logit, dtype=tf.float32),
labels=tf.cast(emotion_label, dtype=tf.float32))
logging.debug('tmp loss 2: %s' % str(tmp))
loss += tf.reduce_sum(tmp)
print("loss 2 ptint ", loss)
loss += 2 * tf.nn.l2_loss(final_IM)
print("loss 3 ptint ", loss)
logging.debug('loss: %s' % str(loss))
EM_output = tf.reshape(EM_output,[self.batch_size,-1, self.vocab_size])
return loss, EM_output
encoder_outputs, encoder_final_state = self.encode(self.q, self.question_len, None, self.dropout_placeholder)
results, final_IM = self.decode(encoder_outputs, encoder_final_state, self.answer_len)
if not self.forward_only:
logging.debug('results: %s' % str(results))
self.tfloss, self.EM_output = loss(results, final_IM)
loss_sum = tf.summary.scalar("loss", self.tfloss)
self.train_op = tf.train.AdamOptimizer(self.config.learning_rate, beta1=0.5).minimize(self.tfloss)
else:
EM_ids, EM_output = self.external_memory_function(tf.reshape(results,[-1,self.decoder_state_size]))
self.EM_output = tf.reshape(EM_output,[self.batch_size,-1, self.vocab_size])
self.tfids = tf.argmax(self.EM_output, axis=2)
logging.debug('self.tfids: %s' % str(self.tfids))
def train(self, sess, training_set, tensorboard=False):
question_batch, question_len_batch, answer_batch, answer_len_batch, tag_batch = training_set
tag_batch = map(lambda x: x[0],tag_batch)
input_feed = self.create_feed_dict(question_batch, question_len_batch, tag_batch, answer_batch,
answer_len_batch, is_train=True)
if tensorboard:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, loss, merged = sess.run([self.train_op, self.tfloss, self.merged_all], feed_dict=input_feed, options=run_options, run_metadata=run_metadata)
return loss, merged
else:
_, loss = sess.run([self.train_op, self.tfloss], feed_dict=input_feed)
return loss
def answer(self, sess, dataset):
#print(len(dataset))
assert self.forward_only == True
question_batch, question_len_batch, _, _, tag_batch = dataset
tag_batch = map(lambda x: x[0],tag_batch)
answer_len_batch = 10 * np.ones(self.batch_size)
input_feed = self.create_feed_dict(question_batch, question_len_batch, tag_batch, answer_batch=None,
answer_len_batch=answer_len_batch, is_train=False)
ids = sess.run([self.tfids], feed_dict=input_feed)
return [[self.id2word[each] for each in each_list] for each_list in ids]
def test(self, sess, test_set):
#print(len(test_set))
question_batch, question_len_batch, answer_batch, answer_len_batch, tag_batch = test_set
tag_batch = map(lambda x: x[0],tag_batch)
input_feed = self.create_feed_dict(question_batch, question_len_batch, tag_batch, answer_batch,
answer_len_batch, is_train=False)
loss, ids = sess.run([self.tfloss, self.tfids], feed_dict=input_feed)
return loss, [[self.id2word[each] for each in each_list] for each_list in ids]
| agpl-3.0 | -7,835,359,824,562,673,000 | 51.434483 | 174 | 0.605945 | false |
debianitram/carcaptus | main.py | 1 | 3091 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
# import
import pygame
import common
import random
from pygame.locals import *
from auto import CarPlayer, CarEnemy
from balas import DisparoPlayer, DisparoEnemy
import ImageDraw
from PIL import ImageEnhance
from plantas import Planta
from pista import Ruta
from bomba import Boom
pygame.init()
screen = pygame.display.set_mode((common.ANCHO, common.ALTO))
pygame.display.set_caption(common.titulo)
# Objetos
ruta1 = Ruta(0,0)
ruta2 = Ruta(0, -ruta1.rect.height)
autoJugador = CarPlayer()
# Objetos Sonidos.
sonido_jugador = common.load_sound("piu.ogg")
sonido_enemigo = common.load_sound("scup.ogg")
sonido_explosion = common.load_sound("kboom.ogg")
# Grupos
fondo_grupo = pygame.sprite.RenderUpdates(ruta1, ruta2)
jugador_grupo = pygame.sprite.RenderUpdates(autoJugador)
enemigos_grupo = pygame.sprite.RenderUpdates()
disparos_grupo = pygame.sprite.RenderUpdates()
disparos_enemigos_grupo = pygame.sprite.RenderUpdates()
objVarios_grupo = pygame.sprite.RenderUpdates()
# Contadores.
enemigos = 0
plantas = 0
# Reloj
reloj = pygame.time.Clock()
# Estado del bucle y objetos.
estado = True
common.scroll = False
screen.blit(ruta1.image, ruta1.rect)
ref = (common.ANCHO, common.ALTO)
brightness = 1.0
contrast = 1.0
while estado:
reloj.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
estado = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
estado = False
elif event.key == K_SPACE:
common.scroll = True
elif event.key == K_UP:
disparos_grupo.add(DisparoPlayer(autoJugador.rect.midtop))
sonido_jugador.play()
elif event.type == KEYUP and event.key == K_SPACE:
common.scroll = False
# Manejadores de autoJugador.
teclas = pygame.key.get_pressed()
if teclas[K_LEFT]:
autoJugador.giro += common.DELTA
if teclas[K_RIGHT]:
autoJugador.giro -= common.DELTA
# Creación de las plantas al juego.
plantas += 1
if plantas >= 15 and common.scroll:
objVarios_grupo.add(Planta())
plantas = 0
# Creación de los enemigos al juego.
enemigos += 1
if enemigos >= 100:
enemigo_nuevo = CarEnemy(random.randint(220, 590), random.randint(0, 4),
disparos_enemigos_grupo, DisparoEnemy, sonido_enemigo)
enemigos_grupo.add(enemigo_nuevo)
enemigos = 0
# Chequeamos las colisiones.
for u in pygame.sprite.groupcollide(enemigos_grupo, disparos_grupo, 1, 1):
(x, y) = u.rect.center
objVarios_grupo.add(Boom(x,y))
sonido_explosion.play()
for u in pygame.sprite.groupcollide(jugador_grupo, disparos_enemigos_grupo, 1, 1):
(ps, py) = u.rect.center
objVarios_grupo.add(Boom(ps, py))
sonido_explosion.play()
#jugador_grupo.add(autoJugador)
fondo_grupo.update()
jugador_grupo.update()
enemigos_grupo.update()
disparos_grupo.update()
disparos_enemigos_grupo.update()
objVarios_grupo.update()
fondo_grupo.draw(screen)
jugador_grupo.draw(screen)
enemigos_grupo.draw(screen)
disparos_grupo.draw(screen)
disparos_enemigos_grupo.draw(screen)
objVarios_grupo.draw(screen)
pygame.display.update()
| agpl-3.0 | 4,571,479,237,483,563,000 | 21.713235 | 83 | 0.723211 | false |
louispotok/pandas | pandas/tests/io/test_clipboard.py | 1 | 5122 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except (PyperclipException, RuntimeError):
_DEPS_INSTALLED = 0
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
@classmethod
def setup_class(cls):
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
# unicode round trip test for GH 13747, GH 12529
cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
def teardown_class(cls):
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None,
encoding=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
self.check_round_trip_frame(dt, sep=r'\s+')
self.check_round_trip_frame(dt, sep='|')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
# gh-19010: avoid warnings
clip_kwargs = dict(engine="python")
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard(**clip_kwargs)
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self):
# test case for testing invalid encoding
data = self.data['string']
with pytest.raises(ValueError):
data.to_clipboard(encoding='ascii')
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
def test_round_trip_valid_encodings(self):
for enc in ['UTF-8', 'utf-8', 'utf8']:
for dt in self.data_types:
self.check_round_trip_frame(dt, encoding=enc)
| bsd-3-clause | 8,799,019,577,396,479,000 | 35.985507 | 77 | 0.527429 | false |
korealerts1/sentry | src/sentry/migrations/0082_auto__add_activity__add_field_group_num_comments__add_field_event_num_.py | 36 | 24692 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Activity'
db.create_table('sentry_activity', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'], null=True)),
('event', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Event'], null=True)),
('type', self.gf('django.db.models.fields.PositiveIntegerField')()),
('ident', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)),
('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.User'], null=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('data', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('sentry', ['Activity'])
# Adding field 'Group.num_comments'
db.add_column('sentry_groupedmessage', 'num_comments',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True),
keep_default=False)
# Adding field 'Event.num_comments'
db.add_column('sentry_message', 'num_comments',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Activity'
db.delete_table('sentry_activity')
# Deleting field 'Group.num_comments'
db.delete_column('sentry_groupedmessage', 'num_comments')
# Deleting field 'Event.num_comments'
db.delete_column('sentry_message', 'num_comments')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause | -4,683,570,997,946,223,000 | 80.223684 | 167 | 0.559371 | false |
pydot/pydot | dot_parser.py | 1 | 15074 | """Graphviz's dot language parser.
The dotparser parses GraphViz files in
dot and dot files and transforms them
into a class representation defined by `pydot`.
Author: Michael Krause <[email protected]>
Fixes by: Ero Carrera <[email protected]>
"""
from pyparsing import (
nestedExpr,
Literal,
CaselessLiteral,
Word,
OneOrMore,
Forward,
Group,
Optional,
Combine,
restOfLine,
cStyleComment,
nums,
alphanums,
printables,
ParseException,
ParseResults,
CharsNotIn,
QuotedString,
)
import pydot
__author__ = ["Michael Krause", "Ero Carrera"]
__license__ = "MIT"
class P_AttrList(object):
def __init__(self, toks):
self.attrs = {}
i = 0
while i < len(toks):
attrname = toks[i]
if i + 2 < len(toks) and toks[i + 1] == "=":
attrvalue = toks[i + 2]
i += 3
else:
attrvalue = None
i += 1
self.attrs[attrname] = attrvalue
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.attrs)
class DefaultStatement(P_AttrList):
def __init__(self, default_type, attrs):
self.default_type = default_type
self.attrs = attrs
def __repr__(self):
return "%s(%s, %r)" % (
self.__class__.__name__,
self.default_type,
self.attrs,
)
top_graphs = list()
def push_top_graph_stmt(s, loc, toks):
attrs = {}
g = None
for element in toks:
if (
isinstance(element, (ParseResults, tuple, list))
and len(element) == 1
and isinstance(element[0], str)
):
element = element[0]
if element == "strict":
attrs["strict"] = True
elif element in ["graph", "digraph"]:
attrs = {}
g = pydot.Dot(graph_type=element, **attrs)
attrs["type"] = element
top_graphs.append(g)
elif isinstance(element, str):
g.set_name(element)
elif isinstance(element, pydot.Subgraph):
g.obj_dict["attributes"].update(element.obj_dict["attributes"])
g.obj_dict["edges"].update(element.obj_dict["edges"])
g.obj_dict["nodes"].update(element.obj_dict["nodes"])
g.obj_dict["subgraphs"].update(element.obj_dict["subgraphs"])
g.set_parent_graph(g)
elif isinstance(element, P_AttrList):
attrs.update(element.attrs)
elif isinstance(element, (ParseResults, list)):
add_elements(g, element)
else:
raise ValueError(
"Unknown element statement: {s}".format(s=element)
)
for g in top_graphs:
update_parent_graph_hierarchy(g)
if len(top_graphs) == 1:
return top_graphs[0]
return top_graphs
def update_parent_graph_hierarchy(g, parent_graph=None, level=0):
if parent_graph is None:
parent_graph = g
for key_name in ("edges",):
if isinstance(g, pydot.frozendict):
item_dict = g
else:
item_dict = g.obj_dict
if key_name not in item_dict:
continue
for key, objs in item_dict[key_name].items():
for obj in objs:
if (
"parent_graph" in obj
and obj["parent_graph"].get_parent_graph() == g
):
if obj["parent_graph"] is g:
pass
else:
obj["parent_graph"].set_parent_graph(parent_graph)
if key_name == "edges" and len(key) == 2:
for idx, vertex in enumerate(obj["points"]):
if isinstance(
vertex,
(pydot.Graph, pydot.Subgraph, pydot.Cluster),
):
vertex.set_parent_graph(parent_graph)
if isinstance(vertex, pydot.frozendict):
if vertex["parent_graph"] is g:
pass
else:
vertex["parent_graph"].set_parent_graph(
parent_graph
)
def add_defaults(element, defaults):
d = element.__dict__
for key, value in defaults.items():
if not d.get(key):
d[key] = value
def add_elements(
g, toks, defaults_graph=None, defaults_node=None, defaults_edge=None
):
if defaults_graph is None:
defaults_graph = {}
if defaults_node is None:
defaults_node = {}
if defaults_edge is None:
defaults_edge = {}
for elm_idx, element in enumerate(toks):
if isinstance(element, (pydot.Subgraph, pydot.Cluster)):
add_defaults(element, defaults_graph)
g.add_subgraph(element)
elif isinstance(element, pydot.Node):
add_defaults(element, defaults_node)
g.add_node(element)
elif isinstance(element, pydot.Edge):
add_defaults(element, defaults_edge)
g.add_edge(element)
elif isinstance(element, ParseResults):
for e in element:
add_elements(
g, [e], defaults_graph, defaults_node, defaults_edge
)
elif isinstance(element, DefaultStatement):
if element.default_type == "graph":
default_graph_attrs = pydot.Node("graph", **element.attrs)
g.add_node(default_graph_attrs)
elif element.default_type == "node":
default_node_attrs = pydot.Node("node", **element.attrs)
g.add_node(default_node_attrs)
elif element.default_type == "edge":
default_edge_attrs = pydot.Node("edge", **element.attrs)
g.add_node(default_edge_attrs)
defaults_edge.update(element.attrs)
else:
raise ValueError(
"Unknown DefaultStatement: {s}".format(
s=element.default_type
)
)
elif isinstance(element, P_AttrList):
g.obj_dict["attributes"].update(element.attrs)
else:
raise ValueError(
"Unknown element statement: {s}".format(s=element)
)
def push_graph_stmt(s, loc, toks):
g = pydot.Subgraph("")
add_elements(g, toks)
return g
def push_subgraph_stmt(s, loc, toks):
g = pydot.Subgraph("")
for e in toks:
if len(e) == 3:
e[2].set_name(e[1])
if e[0] == "subgraph":
e[2].obj_dict["show_keyword"] = True
return e[2]
else:
if e[0] == "subgraph":
e[1].obj_dict["show_keyword"] = True
return e[1]
return g
def push_default_stmt(s, loc, toks):
# The pydot class instances should be marked as
# default statements to be inherited by actual
# graphs, nodes and edges.
#
default_type = toks[0][0]
if len(toks) > 1:
attrs = toks[1].attrs
else:
attrs = {}
if default_type in ["graph", "node", "edge"]:
return DefaultStatement(default_type, attrs)
else:
raise ValueError("Unknown default statement: {s}".format(s=toks))
def push_attr_list(s, loc, toks):
p = P_AttrList(toks)
return p
def get_port(node):
if len(node) > 1:
if isinstance(node[1], ParseResults):
if len(node[1][0]) == 2:
if node[1][0][0] == ":":
return node[1][0][1]
return None
def do_node_ports(node):
node_port = ""
if len(node) > 1:
node_port = "".join([str(a) + str(b) for a, b in node[1]])
return node_port
def push_edge_stmt(s, loc, toks):
tok_attrs = [a for a in toks if isinstance(a, P_AttrList)]
attrs = {}
for a in tok_attrs:
attrs.update(a.attrs)
e = []
if isinstance(toks[0][0], pydot.Graph):
n_prev = pydot.frozendict(toks[0][0].obj_dict)
else:
n_prev = toks[0][0] + do_node_ports(toks[0])
if isinstance(toks[2][0], ParseResults):
n_next_list = [[n.get_name()] for n in toks[2][0]]
for n_next in [n for n in n_next_list]:
n_next_port = do_node_ports(n_next)
e.append(pydot.Edge(n_prev, n_next[0] + n_next_port, **attrs))
elif isinstance(toks[2][0], pydot.Graph):
e.append(
pydot.Edge(n_prev, pydot.frozendict(toks[2][0].obj_dict), **attrs)
)
elif isinstance(toks[2][0], pydot.Node):
node = toks[2][0]
if node.get_port() is not None:
name_port = node.get_name() + ":" + node.get_port()
else:
name_port = node.get_name()
e.append(pydot.Edge(n_prev, name_port, **attrs))
# if the target of this edge is the name of a node
elif isinstance(toks[2][0], str):
for n_next in [n for n in tuple(toks)[2::2]]:
if isinstance(n_next, P_AttrList) or not isinstance(
n_next[0], str
):
continue
n_next_port = do_node_ports(n_next)
e.append(pydot.Edge(n_prev, n_next[0] + n_next_port, **attrs))
n_prev = n_next[0] + n_next_port
else:
raise Exception(
"Edge target {r} with type {s} unsupported.".format(
r=toks[2][0], s=type(toks[2][0])
)
)
return e
def push_node_stmt(s, loc, toks):
if len(toks) == 2:
attrs = toks[1].attrs
else:
attrs = {}
node_name = toks[0]
if isinstance(node_name, list) or isinstance(node_name, tuple):
if len(node_name) > 0:
node_name = node_name[0]
n = pydot.Node(str(node_name), **attrs)
return n
graphparser = None
def graph_definition():
global graphparser
if not graphparser:
# punctuation
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
at = Literal("@")
minus = Literal("-")
# keywords
strict_ = CaselessLiteral("strict")
graph_ = CaselessLiteral("graph")
digraph_ = CaselessLiteral("digraph")
subgraph_ = CaselessLiteral("subgraph")
node_ = CaselessLiteral("node")
edge_ = CaselessLiteral("edge")
# token definitions
identifier = Word(alphanums + "_.").setName("identifier")
double_quoted_string = QuotedString(
'"', multiline=True, unquoteResults=False, escChar="\\"
)
noncomma = "".join([c for c in printables if c != ","])
alphastring_ = OneOrMore(CharsNotIn(noncomma + " "))
def parse_html(s, loc, toks):
return "<%s>" % "".join(toks[0])
opener = "<"
closer = ">"
html_text = (
nestedExpr(opener, closer, (CharsNotIn(opener + closer)))
.setParseAction(parse_html)
.leaveWhitespace()
)
ID = (
identifier | html_text | double_quoted_string | alphastring_
).setName("ID")
float_number = Combine(
Optional(minus) + OneOrMore(Word(nums + "."))
).setName("float_number")
righthand_id = (float_number | ID).setName("righthand_id")
port_angle = (at + ID).setName("port_angle")
port_location = (
OneOrMore(Group(colon + ID))
| Group(colon + lparen + ID + comma + ID + rparen)
).setName("port_location")
port = (
Group(port_location + Optional(port_angle))
| Group(port_angle + Optional(port_location))
).setName("port")
node_id = ID + Optional(port)
a_list = OneOrMore(
ID + Optional(equals + righthand_id) + Optional(comma.suppress())
).setName("a_list")
attr_list = OneOrMore(
lbrack.suppress() + Optional(a_list) + rbrack.suppress()
).setName("attr_list")
attr_stmt = (Group(graph_ | node_ | edge_) + attr_list).setName(
"attr_stmt"
)
edgeop = (Literal("--") | Literal("->")).setName("edgeop")
stmt_list = Forward()
graph_stmt = Group(
lbrace.suppress()
+ Optional(stmt_list)
+ rbrace.suppress()
+ Optional(semi.suppress())
).setName("graph_stmt")
edge_point = Forward()
edgeRHS = OneOrMore(edgeop + edge_point)
edge_stmt = edge_point + edgeRHS + Optional(attr_list)
subgraph = Group(subgraph_ + Optional(ID) + graph_stmt).setName(
"subgraph"
)
edge_point << Group(subgraph | graph_stmt | node_id).setName(
"edge_point"
)
node_stmt = (
node_id + Optional(attr_list) + Optional(semi.suppress())
).setName("node_stmt")
assignment = (ID + equals + righthand_id).setName("assignment")
stmt = (
assignment
| edge_stmt
| attr_stmt
| subgraph
| graph_stmt
| node_stmt
).setName("stmt")
stmt_list << OneOrMore(stmt + Optional(semi.suppress()))
graphparser = OneOrMore(
(
Optional(strict_)
+ Group((graph_ | digraph_))
+ Optional(ID)
+ graph_stmt
).setResultsName("graph")
)
singleLineComment = Group("//" + restOfLine) | Group("#" + restOfLine)
# actions
graphparser.ignore(singleLineComment)
graphparser.ignore(cStyleComment)
assignment.setParseAction(push_attr_list)
a_list.setParseAction(push_attr_list)
edge_stmt.setParseAction(push_edge_stmt)
node_stmt.setParseAction(push_node_stmt)
attr_stmt.setParseAction(push_default_stmt)
subgraph.setParseAction(push_subgraph_stmt)
graph_stmt.setParseAction(push_graph_stmt)
graphparser.setParseAction(push_top_graph_stmt)
return graphparser
def parse_dot_data(s):
"""Parse DOT description in (unicode) string `s`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
global top_graphs
top_graphs = list()
try:
graphparser = graph_definition()
graphparser.parseWithTabs()
tokens = graphparser.parseString(s)
return list(tokens)
except ParseException as err:
print(err.line)
print(" " * (err.column - 1) + "^")
print(err)
return None
| mit | -2,621,469,633,389,718,500 | 25.917857 | 78 | 0.521295 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.