repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dajohnso/cfme_tests
|
cfme/intelligence/reports/widgets/menu_widgets.py
|
1
|
1845
|
# -*- coding: utf-8 -*-
"""Page model for Cloud Intel / Reports / Dashboard Widgets / Menus"""
from widgetastic_manageiq import MenuShortcutsPicker
from utils.appliance.implementations.ui import navigator
from . import (
BaseDashboardReportWidget,
BaseDashboardWidgetFormCommon,
BaseEditDashboardWidgetStep,
BaseEditDashboardWidgetView,
BaseNewDashboardWidgetStep,
BaseNewDashboardWidgetView
)
class MenuWidgetFormCommon(BaseDashboardWidgetFormCommon):
menu_shortcuts = MenuShortcutsPicker(
"form_filter_div",
select_id="add_shortcut",
names_locator=".//input[starts-with(@name, 'shortcut_desc_')]",
remove_locator=".//input[@value={}]/../a[@title='Remove this Shortcut']"
)
class NewMenuWidgetView(BaseNewDashboardWidgetView, MenuWidgetFormCommon):
pass
class EditMenuWidgetView(BaseEditDashboardWidgetView, MenuWidgetFormCommon):
pass
class MenuWidget(BaseDashboardReportWidget):
TYPE = "Menus"
TITLE = "Menu"
pretty_attrs = ["description", "shortcuts", "visibility"]
def __init__(self, title, description=None, active=None, shortcuts=None, visibility=None):
self.title = title
self.description = description
self.active = active
self.shortcuts = shortcuts
self.visibility = visibility
@property
def fill_dict(self):
return {
"widget_title": self.title,
"description": self.description,
"active": self.active,
"menu_shortcuts": self.shortcuts,
"visibility": self.visibility
}
@navigator.register(MenuWidget, "Add")
class NewMenuWidget(BaseNewDashboardWidgetStep):
VIEW = NewMenuWidgetView
@navigator.register(MenuWidget, "Edit")
class EditMenuWidget(BaseEditDashboardWidgetStep):
VIEW = EditMenuWidgetView
|
gpl-2.0
|
celarco/ardupilot
|
Tools/autotest/arduplane.py
|
4
|
17233
|
# fly ArduPlane in SIL
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
HOME_LOCATION='-35.362938,149.165085,585,354'
WIND="0,180,0.2" # speed,direction,variance
homeloc = None
def takeoff(mavproxy, mav):
'''takeoff get to 30m altitude'''
# wait for EKF and GPS checks to pass
wait_seconds(mav, 30)
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
mavproxy.send('switch 4\n')
wait_mode(mav, 'FBWA')
# some rudder to counteract the prop torque
mavproxy.send('rc 4 1700\n')
# some up elevator to keep the tail down
mavproxy.send('rc 2 1200\n')
# get it moving a bit first
mavproxy.send('rc 3 1300\n')
mav.recv_match(condition='VFR_HUD.groundspeed>6', blocking=True)
# a bit faster again, straighten rudder
mavproxy.send('rc 3 1600\n')
mavproxy.send('rc 4 1500\n')
mav.recv_match(condition='VFR_HUD.groundspeed>12', blocking=True)
# hit the gas harder now, and give it some more elevator
mavproxy.send('rc 2 1100\n')
mavproxy.send('rc 3 2000\n')
# gain a bit of altitude
if not wait_altitude(mav, homeloc.alt+150, homeloc.alt+180, timeout=30):
return False
# level off
mavproxy.send('rc 2 1500\n')
print("TAKEOFF COMPLETE")
return True
def fly_left_circuit(mavproxy, mav):
'''fly a left circuit, 200m on a side'''
mavproxy.send('switch 4\n')
wait_mode(mav, 'FBWA')
mavproxy.send('rc 3 2000\n')
if not wait_level_flight(mavproxy, mav):
return False
print("Flying left circuit")
# do 4 turns
for i in range(0,4):
# hard left
print("Starting turn %u" % i)
mavproxy.send('rc 1 1000\n')
if not wait_heading(mav, 270 - (90*i), accuracy=10):
return False
mavproxy.send('rc 1 1500\n')
print("Starting leg %u" % i)
if not wait_distance(mav, 100, accuracy=20):
return False
print("Circuit complete")
return True
def fly_RTL(mavproxy, mav):
'''fly to home'''
print("Flying home in RTL")
mavproxy.send('switch 2\n')
wait_mode(mav, 'RTL')
if not wait_location(mav, homeloc, accuracy=120,
target_altitude=homeloc.alt+100, height_accuracy=20,
timeout=180):
return False
print("RTL Complete")
return True
def fly_LOITER(mavproxy, mav, num_circles=4):
'''loiter where we are'''
print("Testing LOITER for %u turns" % num_circles)
mavproxy.send('loiter\n')
wait_mode(mav, 'LOITER')
m = mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
print("Initial altitude %u\n" % initial_alt)
while num_circles > 0:
if not wait_heading(mav, 0, accuracy=10, timeout=60):
return False
if not wait_heading(mav, 180, accuracy=10, timeout=60):
return False
num_circles -= 1
print("Loiter %u circles left" % num_circles)
m = mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
print("Final altitude %u initial %u\n" % (final_alt, initial_alt))
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
if abs(final_alt - initial_alt) > 20:
print("Failed to maintain altitude")
return False
print("Completed Loiter OK")
return True
def fly_CIRCLE(mavproxy, mav, num_circles=1):
'''circle where we are'''
print("Testing CIRCLE for %u turns" % num_circles)
mavproxy.send('mode CIRCLE\n')
wait_mode(mav, 'CIRCLE')
m = mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
print("Initial altitude %u\n" % initial_alt)
while num_circles > 0:
if not wait_heading(mav, 0, accuracy=10, timeout=60):
return False
if not wait_heading(mav, 180, accuracy=10, timeout=60):
return False
num_circles -= 1
print("CIRCLE %u circles left" % num_circles)
m = mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
print("Final altitude %u initial %u\n" % (final_alt, initial_alt))
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
if abs(final_alt - initial_alt) > 20:
print("Failed to maintain altitude")
return False
print("Completed CIRCLE OK")
return True
def wait_level_flight(mavproxy, mav, accuracy=5, timeout=30):
'''wait for level flight'''
tstart = get_sim_time(mav)
print("Waiting for level flight")
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 2 1500\n')
mavproxy.send('rc 4 1500\n')
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
roll = math.degrees(m.roll)
pitch = math.degrees(m.pitch)
print("Roll=%.1f Pitch=%.1f" % (roll, pitch))
if math.fabs(roll) <= accuracy and math.fabs(pitch) <= accuracy:
print("Attained level flight")
return True
print("Failed to attain level flight")
return False
def change_altitude(mavproxy, mav, altitude, accuracy=30):
'''get to a given altitude'''
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
alt_error = mav.messages['VFR_HUD'].alt - altitude
if alt_error > 0:
mavproxy.send('rc 2 2000\n')
else:
mavproxy.send('rc 2 1000\n')
if not wait_altitude(mav, altitude-accuracy/2, altitude+accuracy/2):
return False
mavproxy.send('rc 2 1500\n')
print("Reached target altitude at %u" % mav.messages['VFR_HUD'].alt)
return wait_level_flight(mavproxy, mav)
def axial_left_roll(mavproxy, mav, count=1):
'''fly a left axial roll'''
# full throttle!
mavproxy.send('rc 3 2000\n')
if not change_altitude(mavproxy, mav, homeloc.alt+300):
return False
# fly the roll in manual
mavproxy.send('switch 6\n')
wait_mode(mav, 'MANUAL')
while count > 0:
print("Starting roll")
mavproxy.send('rc 1 1000\n')
if not wait_roll(mav, -150, accuracy=90):
mavproxy.send('rc 1 1500\n')
return False
if not wait_roll(mav, 150, accuracy=90):
mavproxy.send('rc 1 1500\n')
return False
if not wait_roll(mav, 0, accuracy=90):
mavproxy.send('rc 1 1500\n')
return False
count -= 1
# back to FBWA
mavproxy.send('rc 1 1500\n')
mavproxy.send('switch 4\n')
wait_mode(mav, 'FBWA')
mavproxy.send('rc 3 1700\n')
return wait_level_flight(mavproxy, mav)
def inside_loop(mavproxy, mav, count=1):
'''fly a inside loop'''
# full throttle!
mavproxy.send('rc 3 2000\n')
if not change_altitude(mavproxy, mav, homeloc.alt+300):
return False
# fly the loop in manual
mavproxy.send('switch 6\n')
wait_mode(mav, 'MANUAL')
while count > 0:
print("Starting loop")
mavproxy.send('rc 2 1000\n')
if not wait_pitch(mav, -60, accuracy=20):
return False
if not wait_pitch(mav, 0, accuracy=20):
return False
count -= 1
# back to FBWA
mavproxy.send('rc 2 1500\n')
mavproxy.send('switch 4\n')
wait_mode(mav, 'FBWA')
mavproxy.send('rc 3 1700\n')
return wait_level_flight(mavproxy, mav)
def test_stabilize(mavproxy, mav, count=1):
'''fly stabilize mode'''
# full throttle!
mavproxy.send('rc 3 2000\n')
mavproxy.send('rc 2 1300\n')
if not change_altitude(mavproxy, mav, homeloc.alt+300):
return False
mavproxy.send('rc 2 1500\n')
mavproxy.send("mode STABILIZE\n")
wait_mode(mav, 'STABILIZE')
count = 1
while count > 0:
print("Starting roll")
mavproxy.send('rc 1 2000\n')
if not wait_roll(mav, -150, accuracy=90):
return False
if not wait_roll(mav, 150, accuracy=90):
return False
if not wait_roll(mav, 0, accuracy=90):
return False
count -= 1
mavproxy.send('rc 1 1500\n')
if not wait_roll(mav, 0, accuracy=5):
return False
# back to FBWA
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
mavproxy.send('rc 3 1700\n')
return wait_level_flight(mavproxy, mav)
def test_acro(mavproxy, mav, count=1):
'''fly ACRO mode'''
# full throttle!
mavproxy.send('rc 3 2000\n')
mavproxy.send('rc 2 1300\n')
if not change_altitude(mavproxy, mav, homeloc.alt+300):
return False
mavproxy.send('rc 2 1500\n')
mavproxy.send("mode ACRO\n")
wait_mode(mav, 'ACRO')
count = 1
while count > 0:
print("Starting roll")
mavproxy.send('rc 1 1000\n')
if not wait_roll(mav, -150, accuracy=90):
return False
if not wait_roll(mav, 150, accuracy=90):
return False
if not wait_roll(mav, 0, accuracy=90):
return False
count -= 1
mavproxy.send('rc 1 1500\n')
# back to FBWA
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
wait_level_flight(mavproxy, mav)
mavproxy.send("mode ACRO\n")
wait_mode(mav, 'ACRO')
count = 2
while count > 0:
print("Starting loop")
mavproxy.send('rc 2 1000\n')
if not wait_pitch(mav, -60, accuracy=20):
return False
if not wait_pitch(mav, 0, accuracy=20):
return False
count -= 1
mavproxy.send('rc 2 1500\n')
# back to FBWA
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
mavproxy.send('rc 3 1700\n')
return wait_level_flight(mavproxy, mav)
def test_FBWB(mavproxy, mav, count=1, mode='FBWB'):
'''fly FBWB or CRUISE mode'''
mavproxy.send("mode %s\n" % mode)
wait_mode(mav, mode)
mavproxy.send('rc 3 1700\n')
mavproxy.send('rc 2 1500\n')
# lock in the altitude by asking for an altitude change then releasing
mavproxy.send('rc 2 1000\n')
wait_distance(mav, 50, accuracy=20)
mavproxy.send('rc 2 1500\n')
wait_distance(mav, 50, accuracy=20)
m = mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
print("Initial altitude %u\n" % initial_alt)
print("Flying right circuit")
# do 4 turns
for i in range(0,4):
# hard left
print("Starting turn %u" % i)
mavproxy.send('rc 1 1800\n')
if not wait_heading(mav, 0 + (90*i), accuracy=20, timeout=60):
mavproxy.send('rc 1 1500\n')
return False
mavproxy.send('rc 1 1500\n')
print("Starting leg %u" % i)
if not wait_distance(mav, 100, accuracy=20):
return False
print("Circuit complete")
print("Flying rudder left circuit")
# do 4 turns
for i in range(0,4):
# hard left
print("Starting turn %u" % i)
mavproxy.send('rc 4 1900\n')
if not wait_heading(mav, 360 - (90*i), accuracy=20, timeout=60):
mavproxy.send('rc 4 1500\n')
return False
mavproxy.send('rc 4 1500\n')
print("Starting leg %u" % i)
if not wait_distance(mav, 100, accuracy=20):
return False
print("Circuit complete")
m = mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
print("Final altitude %u initial %u\n" % (final_alt, initial_alt))
# back to FBWA
mavproxy.send('mode FBWA\n')
wait_mode(mav, 'FBWA')
if abs(final_alt - initial_alt) > 20:
print("Failed to maintain altitude")
return False
return wait_level_flight(mavproxy, mav)
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in [1,2,4,5,6,7]:
mavproxy.send('rc %u 1500\n' % chan)
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 8 1800\n')
def fly_mission(mavproxy, mav, filename, height_accuracy=-1, target_altitude=None):
'''fly a mission from a file'''
global homeloc
print("Flying mission %s" % filename)
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('Flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
mavproxy.send('switch 1\n') # auto mode
wait_mode(mav, 'AUTO')
if not wait_waypoint(mav, 1, 7, max_dist=60):
return False
if not wait_groundspeed(mav, 0, 0.5, timeout=60):
return False
print("Mission OK")
return True
def fly_ArduPlane(binary, viewerip=None, map=False, valgrind=False, gdb=False):
'''fly ArduPlane in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
if viewerip:
options += " --out=%s:14550" % viewerip
if map:
options += ' --map'
sil = util.start_SIL(binary, model='plane-elevrev', home=HOME_LOCATION, speedup=10,
valgrind=valgrind, gdb=gdb,
defaults_file=os.path.join(testdir, 'default_params/plane-jsbsim.parm'))
mavproxy = util.start_MAVProxy_SIL('ArduPlane', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/ArduPlane-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
util.expect_setup_callback(mavproxy, expect_callback)
mavproxy.expect('Received [0-9]+ parameters')
expect_list_clear()
expect_list_extend([sil, mavproxy])
print("Started simulator")
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
fail_list = []
e = 'None'
try:
print("Waiting for a heartbeat with mavlink protocol %s" % mav.WIRE_PROTOCOL_VERSION)
mav.wait_heartbeat()
print("Setting up RC parameters")
setup_rc(mavproxy)
print("Waiting for GPS fix")
mav.recv_match(condition='VFR_HUD.alt>10', blocking=True)
mav.wait_gps_fix()
while mav.location().alt < 10:
mav.wait_gps_fix()
homeloc = mav.location()
print("Home location: %s" % homeloc)
if not takeoff(mavproxy, mav):
print("Failed takeoff")
failed = True
fail_list.append("takeoff")
if not fly_left_circuit(mavproxy, mav):
print("Failed left circuit")
failed = True
fail_list.append("left_circuit")
if not axial_left_roll(mavproxy, mav, 1):
print("Failed left roll")
failed = True
fail_list.append("left_roll")
if not inside_loop(mavproxy, mav):
print("Failed inside loop")
failed = True
fail_list.append("inside_loop")
if not test_stabilize(mavproxy, mav):
print("Failed stabilize test")
failed = True
fail_list.append("stabilize")
if not test_acro(mavproxy, mav):
print("Failed ACRO test")
failed = True
fail_list.append("acro")
if not test_FBWB(mavproxy, mav):
print("Failed FBWB test")
failed = True
fail_list.append("fbwb")
if not test_FBWB(mavproxy, mav, mode='CRUISE'):
print("Failed CRUISE test")
failed = True
fail_list.append("cruise")
if not fly_RTL(mavproxy, mav):
print("Failed RTL")
failed = True
fail_list.append("RTL")
if not fly_LOITER(mavproxy, mav):
print("Failed LOITER")
failed = True
fail_list.append("LOITER")
if not fly_CIRCLE(mavproxy, mav):
print("Failed CIRCLE")
failed = True
fail_list.append("LOITER")
if not fly_mission(mavproxy, mav, os.path.join(testdir, "ap1.txt"), height_accuracy = 10,
target_altitude=homeloc.alt+100):
print("Failed mission")
failed = True
fail_list.append("mission")
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/ArduPlane-log.bin")):
print("Failed log download")
failed = True
fail_list.append("log_download")
except pexpect.TIMEOUT, e:
print("Failed with timeout")
failed = True
fail_list.append("timeout")
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
valgrind_log = sil.valgrind_log_filepath()
if os.path.exists(valgrind_log):
os.chmod(valgrind_log, 0644)
shutil.copy(valgrind_log, util.reltopdir("../buildlogs/ArduPlane-valgrind.log"))
if failed:
print("FAILED: %s" % e, fail_list)
return False
return True
|
gpl-3.0
|
frenetic-lang/featherweight-openflow
|
py/CustomTopo.py
|
1
|
4447
|
from mininet.topo import Topo, Node
import random
import networkx as nx
import math
class WattsStrogatzTopology(Topo):
def __init__(self, numSwitches=6):
super(WattsStrogatzTopology, self).__init__()
# add switches
numHosts = numSwitches
hosts = range(1, numHosts+1)
firstSwitch = max(101, numHosts+1)
switches = range(firstSwitch, numSwitches + firstSwitch)
# Add switches
for s in switches:
self.add_node(s, Node(is_switch=True))
# Add hosts
for h in hosts:
self.add_node(h, Node(is_switch=False))
# Add links
for h in hosts:
self.add_edge(h, switches[h-1])
rev_switches = list(switches)
rev_switches.reverse()
[last] = rev_switches[-1:]
for s in rev_switches:
self.add_edge(s, last)
last = s
# Add "magic" links
self.add_edge(101, 103)
self.add_edge(102, 105)
# Add monitoring host
# self.add_node(99, Node(is_switch=False))
# for s in switches:
# self.add_edge(s, 99)
self.enable_all()
# 4 hosts on each edge switch
# N/2 core switches
class FattreeTopology(Topo):
def __init__(self, numEdgeSwitches=4):
super(FattreeTopology, self).__init__()
# add switches
numHosts = 4*numEdgeSwitches
numCoreSwitches = 2
hosts = range(1, numHosts+1)
firstSwitch = max(101, numHosts+1)
edgeSwitches = range(firstSwitch, numEdgeSwitches + firstSwitch)
self.edgeSwitches = edgeSwitches
coreSwitches=range(numEdgeSwitches + firstSwitch, numEdgeSwitches + firstSwitch + numCoreSwitches)
self.coreSwitches = coreSwitches
# Add switches
for s in edgeSwitches:
self.add_node(s, Node(is_switch=True))
for s in coreSwitches:
self.add_node(s, Node(is_switch=True))
# Add hosts
for h in hosts:
self.add_node(h, Node(is_switch=False))
# Add links
for h in hosts:
if h <= 4:
self.add_edge(h, firstSwitch)
elif h <= 8:
self.add_edge(h, firstSwitch + 1)
elif h <= 12:
self.add_edge(h, firstSwitch + 2)
else:
self.add_edge(h, firstSwitch + 3)
# Add monitoring host
# self.add_node(99, Node(is_switch=False))
for s1 in edgeSwitches:
if (s1 - firstSwitch) < numEdgeSwitches / 2:
self.add_edge(s1, coreSwitches[0])
else:
self.add_edge(s1, coreSwitches[1])
# connect monitor to every edge switch
# self.add_edge(99, s1)
self.add_edge(coreSwitches[0], coreSwitches[1])
self.enable_all()
class WaxmanTopology(Topo):
def __init__(self, num_switches=5,seed=100):
super(WaxmanTopology, self).__init__()
num_hosts_per_switch = 2
# Needed so that subsequent calls will generate the same graph
random.seed(seed)
num_hosts = num_switches*num_hosts_per_switch
# build waxman graph
wax = nx.waxman_graph(num_switches,.9,.9)
# Add switches
for s in wax.nodes():
self.add_node(s+1, Node(is_switch=True))
# Add edges
for s1, s2 in wax.edges():
print "new edge"
self.add_edge(s1+1, s2+1)
# Add hosts
hostoffset = num_switches+2
for s in wax:
# Add host
host_base = num_hosts_per_switch*s + hostoffset
for host in range(0, num_hosts_per_switch):
self.add_node(host_base + host, Node(is_switch=False))
self.add_edge(host_base + host, s+1)
# # Globally connected host
# self.add_host(9999)
# for switch in wax:
# self.add_link(9999, switch)
# f = open('/home/openflow/workspace/foo.log', 'w')
# f.write('hosts: %d\n' % len(self.hosts()))
# f.close()
# assert(False)
self.enable_all()
topos = {
'wattsstrogatz': ( WattsStrogatzTopology ),
'fattree': ( FattreeTopology ),
'waxman': ( WaxmanTopology )
}
|
bsd-3-clause
|
PennyDreadfulMTG/Penny-Dreadful-Tools
|
decksite/data/elo.py
|
1
|
2016
|
from decksite.data import person
from decksite.database import db
from shared import guarantee
from shared.database import sqlescape
# Using chess numbers here would make individual matches have too much meaning. Magic matches should move your rating less because of the inherent variance in Magic.
# Fritz with the width in order to make the numbers look like chess numbers so that similar numbers are "good" and "great" even though that means a gap of 200 now means a lot less for who is going to win than it does in chess.
# See http://www.mtgeloproject.net/faq.php for some other thoughts on this (their numbers didn't quite work applied to our data, but we went with something similar that was a better fit).
STARTING_ELO = 1500
ELO_WIDTH = 1600
K_FACTOR = 12
def adjustment(elo1: int, elo2: int) -> int:
e = expected(elo1, elo2)
return max(round(K_FACTOR * (1 - e)), 1)
def expected(elo1: int, elo2: int) -> float:
return 1.0 / (1 + 10**((elo2 - elo1) / ELO_WIDTH))
def adjust_elo(winning_deck_id: int, losing_deck_id: int) -> None:
if not losing_deck_id:
return # Intentional draws do not affect Elo.
winner = guarantee.exactly_one(person.load_people('p.id IN (SELECT person_id FROM deck WHERE id = {winning_deck_id})'.format(winning_deck_id=sqlescape(winning_deck_id))))
loser = guarantee.exactly_one(person.load_people('p.id IN (SELECT person_id FROM deck WHERE id = {losing_deck_id})'.format(losing_deck_id=sqlescape(losing_deck_id))))
adj = adjustment(winner.elo or STARTING_ELO, loser.elo or STARTING_ELO)
sql = 'UPDATE person SET elo = IFNULL(elo, {starting_elo}) + %s WHERE id = %s'.format(starting_elo=sqlescape(STARTING_ELO))
db().begin('per-match-elo-adjustment')
print('Elo (winner) ', adj, winner.id, winner.mtgo_username, winner.elo, sql)
print('Elo (loser) ', -adj, loser.id, loser.mtgo_username, loser.elo, sql)
db().execute(sql, [adj, winner.id])
db().execute(sql, [-adj, loser.id])
db().commit('per-match-elo-adjustment')
|
gpl-3.0
|
iffy/AutobahnPython
|
examples/twisted/wamp/rpc/options/backend.py
|
2
|
2668
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import RegisterOptions, PublishOptions
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component providing procedures with
different kinds of arguments.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def square(val, details=None):
print("square called from: {}".format(details.caller))
if val < 0:
self.publish('com.myapp.square_on_nonpositive', val)
elif val == 0:
if details.caller:
options = PublishOptions(exclude=[details.caller])
else:
options = None
self.publish('com.myapp.square_on_nonpositive', val, options=options)
return val * val
yield self.register(square, 'com.myapp.square', RegisterOptions(details_arg='details'))
print("procedure registered")
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://localhost:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
|
mit
|
ssh1/stbgui
|
lib/python/Plugins/SystemPlugins/WirelessLan/Wlan.py
|
2
|
11267
|
from Components.config import config, ConfigYesNo, NoSave, ConfigSubsection, ConfigText, ConfigSelection, ConfigPassword
from Components.Console import Console
from Components.Network import iNetwork
from os import system, path as os_path
from string import maketrans, strip
import sys
import types
from re import compile as re_compile, search as re_search, escape as re_escape
from pythonwifi.iwlibs import getNICnames, Wireless, Iwfreq, getWNICnames
from pythonwifi import flags as wififlags
list = []
list.append("Unencrypted")
list.append("WEP")
list.append("WPA")
list.append("WPA/WPA2")
list.append("WPA2")
weplist = []
weplist.append("ASCII")
weplist.append("HEX")
config.plugins.wlan = ConfigSubsection()
config.plugins.wlan.essid = NoSave(ConfigText(default = "", fixed_size = False))
config.plugins.wlan.hiddenessid = NoSave(ConfigYesNo(default = False))
config.plugins.wlan.encryption = NoSave(ConfigSelection(list, default = "WPA2"))
config.plugins.wlan.wepkeytype = NoSave(ConfigSelection(weplist, default = "ASCII"))
config.plugins.wlan.psk = NoSave(ConfigPassword(default = "", fixed_size = False))
def getWlanConfigName(iface):
return '/etc/wpa_supplicant.' + iface + '.conf'
class Wlan:
def __init__(self, iface = None):
self.iface = iface
self.oldInterfaceState = None
a = ''; b = ''
for i in range(0, 255):
a = a + chr(i)
if i < 32 or i > 127:
b = b + ' '
else:
b = b + chr(i)
self.asciitrans = maketrans(a, b)
def asciify(self, str):
return str.translate(self.asciitrans)
def getWirelessInterfaces(self):
return getWNICnames()
def setInterface(self, iface = None):
self.iface = iface
def getInterface(self):
return self.iface
def getNetworkList(self):
if self.oldInterfaceState is None:
self.oldInterfaceState = iNetwork.getAdapterAttribute(self.iface, "up")
if self.oldInterfaceState is False:
if iNetwork.getAdapterAttribute(self.iface, "up") is False:
iNetwork.setAdapterAttribute(self.iface, "up", True)
system("ifconfig "+self.iface+" up")
ifobj = Wireless(self.iface) # a Wireless NIC Object
try:
scanresults = ifobj.scan()
except:
scanresults = None
print "[Wlan.py] No wireless networks could be found"
aps = {}
if scanresults is not None:
(num_channels, frequencies) = ifobj.getChannelInfo()
index = 1
for result in scanresults:
bssid = result.bssid
if result.encode.flags & wififlags.IW_ENCODE_DISABLED > 0:
encryption = False
elif result.encode.flags & wififlags.IW_ENCODE_NOKEY > 0:
encryption = True
else:
encryption = None
signal = str(result.quality.siglevel-0x100) + " dBm"
quality = "%s/%s" % (result.quality.quality,ifobj.getQualityMax().quality)
extra = []
for element in result.custom:
element = element.encode()
extra.append( strip(self.asciify(element)) )
for element in extra:
if 'SignalStrength' in element:
signal = element[element.index('SignalStrength')+15:element.index(',L')]
if 'LinkQuality' in element:
quality = element[element.index('LinkQuality')+12:len(element)]
aps[bssid] = {
'active' : True,
'bssid': result.bssid,
'channel': frequencies.index(ifobj._formatFrequency(result.frequency.getFrequency())) + 1,
'encrypted': encryption,
'essid': strip(self.asciify(result.essid)),
'iface': self.iface,
'maxrate' : ifobj._formatBitrate(result.rate[-1][-1]),
'noise' : '',#result.quality.nlevel-0x100,
'quality' : str(quality),
'signal' : str(signal),
'custom' : extra,
}
index = index + 1
return aps
def stopGetNetworkList(self):
if self.oldInterfaceState is not None:
if self.oldInterfaceState is False:
iNetwork.setAdapterAttribute(self.iface, "up", False)
system("ifconfig "+self.iface+" down")
self.oldInterfaceState = None
self.iface = None
iWlan = Wlan()
class wpaSupplicant:
def __init__(self):
pass
def writeConfig(self, iface):
essid = config.plugins.wlan.essid.value
hiddenessid = config.plugins.wlan.hiddenessid.value
encryption = config.plugins.wlan.encryption.value
wepkeytype = config.plugins.wlan.wepkeytype.value
psk = config.plugins.wlan.psk.value
fp = file(getWlanConfigName(iface), 'w')
fp.write('#WPA Supplicant Configuration by STB-GUI\n')
fp.write('ctrl_interface=/var/run/wpa_supplicant\n')
fp.write('eapol_version=1\n')
fp.write('fast_reauth=1\n')
fp.write('ap_scan=1\n')
fp.write('network={\n')
fp.write('\tssid="'+essid+'"\n')
if hiddenessid:
fp.write('\tscan_ssid=1\n')
else:
fp.write('\tscan_ssid=0\n')
if encryption in ('WPA', 'WPA2', 'WPA/WPA2'):
fp.write('\tkey_mgmt=WPA-PSK\n')
if encryption == 'WPA':
fp.write('\tproto=WPA\n')
fp.write('\tpairwise=TKIP\n')
fp.write('\tgroup=TKIP\n')
elif encryption == 'WPA2':
fp.write('\tproto=RSN\n')
fp.write('\tpairwise=CCMP\n')
fp.write('\tgroup=CCMP\n')
else:
fp.write('\tproto=WPA RSN\n')
fp.write('\tpairwise=CCMP TKIP\n')
fp.write('\tgroup=CCMP TKIP\n')
fp.write('\tpsk="'+psk+'"\n')
elif encryption == 'WEP':
fp.write('\tkey_mgmt=NONE\n')
if wepkeytype == 'ASCII':
fp.write('\twep_key0="'+psk+'"\n')
else:
fp.write('\twep_key0='+psk+'\n')
else:
fp.write('\tkey_mgmt=NONE\n')
fp.write('}')
fp.write('\n')
fp.close()
#system('cat ' + getWlanConfigName(iface))
def loadConfig(self,iface):
configfile = getWlanConfigName(iface)
if not os_path.exists(configfile):
configfile = '/etc/wpa_supplicant.conf'
try:
#parse the wpasupplicant configfile
print "[Wlan.py] parsing configfile: ",configfile
fp = file(configfile, 'r')
supplicant = fp.readlines()
fp.close()
essid = None
encryption = "Unencrypted"
for s in supplicant:
split = s.strip().split('=',1)
if split[0] == 'scan_ssid':
if split[1] == '1':
config.plugins.wlan.hiddenessid.value = True
else:
config.plugins.wlan.hiddenessid.value = False
elif split[0] == 'ssid':
essid = split[1][1:-1]
config.plugins.wlan.essid.value = essid
elif split[0] == 'proto':
if split[1] == 'WPA' :
mode = 'WPA'
if split[1] == 'RSN':
mode = 'WPA2'
if split[1] in ('WPA RSN', 'WPA WPA2'):
mode = 'WPA/WPA2'
encryption = mode
elif split[0] == 'wep_key0':
encryption = 'WEP'
if split[1].startswith('"') and split[1].endswith('"'):
config.plugins.wlan.wepkeytype.value = 'ASCII'
config.plugins.wlan.psk.value = split[1][1:-1]
else:
config.plugins.wlan.wepkeytype.value = 'HEX'
config.plugins.wlan.psk.value = split[1]
elif split[0] == 'psk':
config.plugins.wlan.psk.value = split[1][1:-1]
else:
pass
config.plugins.wlan.encryption.value = encryption
wsconfig = {
'hiddenessid': config.plugins.wlan.hiddenessid.value,
'ssid': config.plugins.wlan.essid.value,
'encryption': config.plugins.wlan.encryption.value,
'wepkeytype': config.plugins.wlan.wepkeytype.value,
'key': config.plugins.wlan.psk.value,
}
for (key, item) in wsconfig.items():
if item is "None" or item is "":
if key == 'hiddenessid':
wsconfig['hiddenessid'] = False
if key == 'ssid':
wsconfig['ssid'] = ""
if key == 'encryption':
wsconfig['encryption'] = "WPA2"
if key == 'wepkeytype':
wsconfig['wepkeytype'] = "ASCII"
if key == 'key':
wsconfig['key'] = ""
except:
print "[Wlan.py] Error parsing ",configfile
wsconfig = {
'hiddenessid': False,
'ssid': "",
'encryption': "WPA2",
'wepkeytype': "ASCII",
'key': "",
}
#print "[Wlan.py] WS-CONFIG-->",wsconfig
return wsconfig
class Status:
def __init__(self):
self.wlaniface = {}
self.backupwlaniface = {}
self.statusCallback = None
self.WlanConsole = Console()
def stopWlanConsole(self):
if self.WlanConsole is not None:
print "[iStatus] killing self.WlanConsole"
self.WlanConsole.killAll()
self.WlanConsole = None
def getDataForInterface(self, iface, callback = None):
self.WlanConsole = Console()
cmd = "iwconfig " + iface
if callback is not None:
self.statusCallback = callback
self.WlanConsole.ePopen(cmd, self.iwconfigFinished, iface)
def iwconfigFinished(self, result, retval, extra_args):
iface = extra_args
data = { 'essid': False, 'frequency': False, 'accesspoint': False, 'bitrate': False, 'encryption': False, 'quality': False, 'signal': False }
for line in result.splitlines():
line = line.strip()
if "ESSID" in line:
if "off/any" in line:
ssid = "off"
else:
if "Nickname" in line:
ssid=(line[line.index('ESSID')+7:line.index('" Nickname')])
else:
ssid=(line[line.index('ESSID')+7:len(line)-1])
if ssid is not None:
data['essid'] = ssid
if "Frequency" in line:
frequency = line[line.index('Frequency')+10 :line.index(' GHz')]
if frequency is not None:
data['frequency'] = frequency
if "Access Point" in line:
if "Sensitivity" in line:
ap=line[line.index('Access Point')+14:line.index(' Sensitivity')]
else:
ap=line[line.index('Access Point')+14:len(line)]
if ap is not None:
data['accesspoint'] = ap
if "Bit Rate" in line:
if "kb" in line:
br = line[line.index('Bit Rate')+9 :line.index(' kb/s')]
else:
br = line[line.index('Bit Rate')+9 :line.index(' Mb/s')]
if br is not None:
data['bitrate'] = br
if "Encryption key" in line:
if ":off" in line:
enc = "off"
elif "Security" in line:
enc = line[line.index('Encryption key')+15 :line.index(' Security')]
if enc is not None:
enc = "on"
else:
enc = line[line.index('Encryption key')+15 :len(line)]
if enc is not None:
enc = "on"
if enc is not None:
data['encryption'] = enc
if 'Quality' in line:
if "/100" in line:
qual = line[line.index('Quality')+8:line.index(' Signal')]
else:
qual = line[line.index('Quality')+8:line.index('Sig')]
if qual is not None:
data['quality'] = qual
if 'Signal level' in line:
if "dBm" in line:
signal = line[line.index('Signal level')+13 :line.index(' dBm')] + " dBm"
elif "/100" in line:
if "Noise" in line:
signal = line[line.index('Signal level')+13:line.index(' Noise')]
else:
signal = line[line.index('Signal level')+13:len(line)]
else:
if "Noise" in line:
signal = line[line.index('Signal level')+13:line.index(' Noise')]
else:
signal = line[line.index('Signal level')+13:len(line)]
if signal is not None:
data['signal'] = signal
self.wlaniface[iface] = data
self.backupwlaniface = self.wlaniface
if self.WlanConsole is not None:
if len(self.WlanConsole.appContainers) == 0:
print "[Wlan.py] self.wlaniface after loading:", self.wlaniface
if self.statusCallback is not None:
self.statusCallback(True,self.wlaniface)
self.statusCallback = None
def getAdapterAttribute(self, iface, attribute):
self.iface = iface
if self.wlaniface.has_key(self.iface):
if self.wlaniface[self.iface].has_key(attribute):
return self.wlaniface[self.iface][attribute]
return None
iStatus = Status()
|
gpl-2.0
|
PaddlePaddle/Paddle
|
python/paddle/fluid/contrib/tests/test_quantize_transpiler.py
|
2
|
11518
|
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import numpy as np
import six
import unittest
import paddle
import paddle.fluid as fluid
from paddle.fluid.contrib.quantize.quantize_transpiler import _original_var_name
from paddle.fluid.contrib.quantize.quantize_transpiler import QuantizeTranspiler
import paddle
paddle.enable_static()
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
return loss
def residual_block(num):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
fc = fluid.layers.fc(input=hidden, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
def conv_net(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return avg_loss
class TestQuantizeTranspiler(unittest.TestCase):
def setUp(self):
# since quant_op and dequant_op is not ready, use cos and sin for test
self.weight_quant_op_type = 'fake_quantize_abs_max'
self.dequant_op_type = 'fake_dequantize_max_abs'
self.quantizable_op_and_inputs = {
'conv2d': ['Input', 'Filter'],
'depthwise_conv2d': ['Input', 'Filter'],
'mul': ['X', 'Y']
}
self.quantizable_op_grad_and_inputs = {
'conv2d_grad': ['Input', 'Filter'],
'depthwise_conv2d_grad': ['Input', 'Filter'],
'mul_grad': ['X', 'Y']
}
def check_program(self, program):
quantized_ops = {}
persistable_vars = [
v.name
for v in filter(lambda var: var.persistable, program.list_vars())
]
for block in program.blocks:
for idx, op in enumerate(block.ops):
# check forward
if op.type in self.quantizable_op_and_inputs:
for i, arg_name in enumerate(op.input_arg_names):
quant_op_type = self.weight_quant_op_type if \
_original_var_name(arg_name) \
in persistable_vars else self.act_quant_op_type
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
if arg_name not in quantized_ops:
self.assertEqual(block.ops[idx - 2 * i - 1].type,
self.dequant_op_type)
self.assertEqual(block.ops[idx - 2 * i - 2].type,
quant_op_type)
quantized_ops[arg_name] = block.ops[idx - 2 * i - 2]
else:
op_idx = block.ops.index(quantized_ops[arg_name])
self.assertLess(op_idx, idx)
# check backward
if op.type in self.quantizable_op_grad_and_inputs:
for pname in self.quantizable_op_grad_and_inputs[op.type]:
arg_name = op.input(pname)[0]
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
self.assertTrue(arg_name in quantized_ops)
def linear_fc_quant(self, quant_type):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
t = QuantizeTranspiler(activation_quantize_type=quant_type)
t.training_transpile(main)
self.check_program(main)
def test_linear_fc_quant_abs_max(self):
self.act_quant_op_type = 'fake_quantize_abs_max'
self.linear_fc_quant('abs_max')
def test_linear_fc_quant_range_abs_max(self):
self.act_quant_op_type = 'fake_quantize_range_abs_max'
self.linear_fc_quant('range_abs_max')
def residual_block_quant(self, quant_type):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
t = QuantizeTranspiler(activation_quantize_type=quant_type)
t.training_transpile(main)
self.check_program(main)
def test_residual_block_abs_max(self):
self.act_quant_op_type = 'fake_quantize_abs_max'
self.residual_block_quant('abs_max')
def test_residual_block_range_abs_max(self):
self.act_quant_op_type = 'fake_quantize_range_abs_max'
self.residual_block_quant('range_abs_max')
def freeze_program(self, use_cuda, seed):
def build_program(main, startup, is_test):
main.random_seed = seed
startup.random_seed = seed
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
loss = conv_net(img, label)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss
main = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
import random
random.seed(0)
np.random.seed(0)
feeds, loss = build_program(main, startup, False)
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
quant_type = 'range_abs_max' # 'range_abs_max' or 'abs_max'
quant_transpiler = QuantizeTranspiler(
activation_quantize_type=quant_type)
quant_transpiler.training_transpile(main, startup)
quant_transpiler.training_transpile(test_program, startup)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
iters = 5
batch_size = 8
class_num = 10
exe.run(startup)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
with fluid.program_guard(main):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(program=main,
feed=feeder.feed(data),
fetch_list=[loss])
with fluid.program_guard(test_program):
test_data = next(test_reader())
w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',
test_program)
# Testing during training
test_loss1, w_quant = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[loss, w_var])
# Freeze program for inference, but the weight of fc/conv is still float type.
quant_transpiler.freeze_program(test_program, place)
test_loss2, = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
w_freeze = np.array(fluid.global_scope().find_var('conv2d_1.w_0')
.get_tensor())
# fail: -432.0 != -433.0, this is due to the calculation precision
#self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))
# Convert parameter to 8-bit.
quant_transpiler.convert_to_int8(test_program, place)
# Save the 8-bit parameter and model file.
fluid.io.save_inference_model('model_8bit', ['image', 'label'],
[loss], exe, test_program)
# Test whether the 8-bit parameter and model file can be loaded successfully.
[infer, feed, fetch] = fluid.io.load_inference_model('model_8bit',
exe)
# Check the loaded 8-bit weight.
w_8bit = np.array(fluid.global_scope().find_var('conv2d_1.w_0.int8')
.get_tensor())
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
def not_test_freeze_program_cuda(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_program(True, seed=1)
def not_test_freeze_program_cpu(self):
with fluid.unique_name.guard():
self.freeze_program(False, seed=2)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
xuui/nerfpi
|
nerf-sight.py
|
1
|
1798
|
#!/usr/bin/python
#coding=utf-8
#author:xuhel
import RPi.GPIO as GPIO
import time
import sys,os
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
GPIO.setmode(GPIO.BCM)
resPath='/home/nerfpi/Resources/'
#define GPIO pin
pin_btn=21
GPIO.setup(pin_btn, GPIO.IN, pull_up_down=GPIO.PUD_UP)
RST = 5
DC = 3
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
disp.begin()
disp.clear()
disp.display()
sight_mod=0
def oledshow(img):
image=Image.open(resPath+img).resize((disp.width, disp.height),Image.ANTIALIAS).convert('1')
disp.image(image)
disp.display()
def cleanup():
print('clean up')
GPIO.cleanup()
def handleSIGTERM(signum, frame):
cleanup()
def onPress(channel):
global sight_mod
# print('pressed')
sight_mod+=1
if sight_mod >11:
sight_mod=1
if sight_mod==1:
#print('sight mode:01')
oledshow('nerf01.png')
elif sight_mod==2:
#print('sight mode:02')
oledshow('nerf02.png')
elif sight_mod==3:
#print('sight mode:03')
oledshow('nerf03.png')
elif sight_mod==4:
oledshow('nerf04.png')
elif sight_mod==5:
oledshow('nerf05.png')
elif sight_mod==6:
oledshow('nerf06.png')
elif sight_mod==7:
oledshow('nerf07.png')
elif sight_mod==8:
oledshow('nerf08.png')
elif sight_mod==9:
oledshow('nerf09.png')
elif sight_mod==10:
oledshow('nerf10.png')
elif sight_mod==11:
oledshow('logo2.png')
oledshow('logo.png')
GPIO.add_event_detect(pin_btn, GPIO.FALLING, callback=onPress, bouncetime=500)
try:
while True:
#if sight_mod==1:
#print "Mode:[1]"
time.sleep(1)
except KeyboardInterrupt:
print('User press Ctrl+c ,exit;')
finally:
cleanup()
|
gpl-3.0
|
maciejkula/scipy
|
scipy/optimize/tests/test_linprog.py
|
9
|
11921
|
"""
Unit test for Linear Programming via Simplex Algorithm.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal, assert_allclose,
assert_almost_equal, assert_raises, assert_equal, run_module_suite)
from scipy.optimize import linprog, OptimizeWarning
from scipy.lib._numpy_compat import _assert_warns
def lpgen_2d(m,n):
""" -> A b c LP test: m*n vars, m+n constraints
row sums == n/m, col sums == 1
https://gist.github.com/denis-bz/8647461
"""
np.random.seed(0)
c = - np.random.exponential(size=(m,n))
Arow = np.zeros((m,m*n))
brow = np.zeros(m)
for j in range(m):
j1 = j + 1
Arow[j,j*n:j1*n] = 1
brow[j] = n/m
Acol = np.zeros((n,m*n))
bcol = np.zeros(n)
for j in range(n):
j1 = j + 1
Acol[j,j::n] = 1
bcol[j] = 1
A = np.vstack((Arow,Acol))
b = np.hstack((brow,bcol))
return A, b, c.ravel()
def _assert_infeasible(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report infeasible status")
def _assert_unbounded(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 3, "failed to report unbounded status")
def _assert_success(res, desired_fun=None, desired_x=None):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
assert_(res.success)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(res.fun, desired_fun,
err_msg="converged to an unexpected objective value")
if desired_x is not None:
assert_allclose(res.x, desired_x,
err_msg="converged to an unexpected solution")
def test_linprog_upper_bound_constraints():
# Maximize a linear function subject to only linear upper bound constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3,2])*-1 # maximize
A_ub = [[2,1],
[1,1],
[1,0]]
b_ub = [10,8,4]
res = (linprog(c,A_ub=A_ub,b_ub=b_ub))
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_linprog_mixed_constraints():
# Minimize linear function subject to non-negative variables.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
c = [6,3]
A_ub = [[0, 3],
[-1,-1],
[-2, 1]]
b_ub = [2,-1,-1]
res = linprog(c,A_ub=A_ub,b_ub=b_ub)
_assert_success(res, desired_fun=5, desired_x=[2/3, 1/3])
def test_linprog_cyclic_recovery():
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty http://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100,10,1])*-1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200,20, 1]]
b_ub = [1,100,10000]
res = linprog(c,A_ub=A_ub,b_ub=b_ub)
_assert_success(res, desired_x=[0, 0, 10000])
def test_linprog_cyclic_bland():
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=dict(maxiter=100))
assert_(not res.success)
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
options=dict(maxiter=100, bland=True,))
_assert_success(res, desired_x=[1, 0, 1, 0])
def test_linprog_unbounded():
# Test linprog response to an unbounded problem
c = np.array([1,1])*-1 # maximize
A_ub = [[-1,1],
[-1,-1]]
b_ub = [-1,-2]
res = linprog(c,A_ub=A_ub,b_ub=b_ub)
_assert_unbounded(res)
def test_linprog_infeasible():
# Test linrpog response to an infeasible problem
c = [-1,-1]
A_ub = [[1,0],
[0,1],
[-1,-1]]
b_ub = [2,2,-5]
res = linprog(c,A_ub=A_ub,b_ub=b_ub)
_assert_infeasible(res)
def test_nontrivial_problem():
# Test linprog for a problem involving all constraint types,
# negative resource limits, and rounding issues.
c = [-1,8,4,-6]
A_ub = [[-7,-7,6,9],
[1,-1,-3,0],
[10,-10,-7,7],
[6,-1,3,4]]
b_ub = [-3,6,-6,6]
A_eq = [[-10,1,1,-8]]
b_eq = [-4]
res = linprog(c,A_ub=A_ub,b_ub=b_ub,A_eq=A_eq,b_eq=b_eq)
_assert_success(res, desired_fun=7083/1391,
desired_x=[101/1391,1462/1391,0,752/1391])
def test_negative_variable():
# Test linprog with a problem with one unbounded variable and
# another with a negative lower bound.
c = np.array([-1,4])*-1 # maximize
A_ub = np.array([[-3,1],
[1, 2]], dtype=np.float64)
A_ub_orig = A_ub.copy()
b_ub = [6,4]
x0_bounds = (-np.inf,np.inf)
x1_bounds = (-3,np.inf)
res = linprog(c,A_ub=A_ub,b_ub=b_ub,bounds=(x0_bounds,x1_bounds))
assert_equal(A_ub, A_ub_orig) # user input not overwritten
_assert_success(res, desired_fun=-80/7, desired_x=[-8/7, 18/7])
def test_large_problem():
# Test linprog simplex with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A,b,c = lpgen_2d(20,20)
res = linprog(c,A_ub=A,b_ub=b)
_assert_success(res, desired_fun=-64.049494229)
def test_network_flow():
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_success(res, desired_fun=755)
def test_network_flow_limited_capacity():
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
cost = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example():
# http://en.wikipedia.org/wiki/Simplex_algorithm#Example
Z = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c=Z, A_ub=A_ub, b_ub=b_ub)
_assert_success(res, desired_fun=-20)
def test_enzo_example():
# http://projects.scipy.org/scipy/attachment/ticket/1252/lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0])
def test_enzo_example_b():
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [
[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy():
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2*np.pi*np.arange(1, m+1)/(m+1)
A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness():
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2*np.pi*np.arange(m)/(m+1)
A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility():
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2*np.pi*np.arange(m)/(m+1)
A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp)))
b_eq = [1, 1]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq)
_assert_infeasible(res)
def test_callback():
# Check that callback is as advertised
callback_complete = [False]
last_xk = []
def cb(xk, **kwargs):
kwargs.pop('tableau')
assert_(isinstance(kwargs.pop('phase'), int))
assert_(isinstance(kwargs.pop('nit'), int))
i, j = kwargs.pop('pivot')
assert_(np.isscalar(i))
assert_(np.isscalar(j))
basis = kwargs.pop('basis')
assert_(isinstance(basis, np.ndarray))
assert_(basis.dtype == np.int_)
complete = kwargs.pop('complete')
assert_(isinstance(complete, bool))
if complete:
last_xk.append(xk)
callback_complete[0] = True
else:
assert_(not callback_complete[0])
# no more kwargs
assert_(not kwargs)
c = np.array([-3,-2])
A_ub = [[2,1], [1,1], [1,0]]
b_ub = [10,8,4]
res = linprog(c,A_ub=A_ub,b_ub=b_ub, callback=cb)
assert_(callback_complete[0])
assert_allclose(last_xk[0], res.x)
def test_unknown_options_or_solver():
c = np.array([-3,-2])
A_ub = [[2,1], [1,1], [1,0]]
b_ub = [10,8,4]
_assert_warns(OptimizeWarning, linprog,
c, A_ub=A_ub, b_ub=b_ub, options=dict(spam='42'))
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
def test_no_constraints():
res = linprog([-1, -2])
assert_equal(res.x, [0, 0])
_assert_unbounded(res)
def test_simple_bounds():
res = linprog([1, 2], bounds=(1, 2))
_assert_success(res, desired_x=[1, 1])
res = linprog([1, 2], bounds=[(1, 2), (1, 2)])
_assert_success(res, desired_x=[1, 1])
def test_invalid_inputs():
for bad_bound in [[(5, 0), (1, 2), (3, 4)],
[(1, 2), (3, 4)],
[(1, 2), (3, 4), (3, 4, 5)],
[(1, 2), (np.inf, np.inf), (3, 4)],
[(1, 2), (-np.inf, -np.inf), (3, 4)],
]:
assert_raises(ValueError, linprog, [1, 2, 3], bounds=bad_bound)
assert_raises(ValueError, linprog, [1,2], A_ub=[[1,2]], b_ub=[1,2])
assert_raises(ValueError, linprog, [1,2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, linprog, [1,2], A_eq=[[1,2]], b_eq=[1,2])
assert_raises(ValueError, linprog, [1,2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, linprog, [1,2], A_eq=[1], b_eq=1)
assert_raises(ValueError, linprog, [1,2], A_ub=np.zeros((1,1,3)), b_eq=1)
if __name__ == '__main__':
run_module_suite()
|
bsd-3-clause
|
sandvine/horizon
|
openstack_dashboard/dashboards/settings/user/views.py
|
5
|
1896
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon.utils import functions as utils
from openstack_dashboard.dashboards.settings.user import forms as user_forms
class UserSettingsView(forms.ModalFormView):
form_class = user_forms.UserSettingsForm
form_id = "user_settings_modal"
modal_id = "user_settings_modal"
page_title = _("User Settings")
submit_label = _("Save")
submit_url = reverse_lazy("horizon:settings:user:index")
template_name = 'settings/user/settings.html'
def get_initial(self):
return {
'language': self.request.session.get(
settings.LANGUAGE_COOKIE_NAME,
self.request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME,
self.request.LANGUAGE_CODE)),
'timezone': self.request.session.get(
'django_timezone',
self.request.COOKIES.get('django_timezone', 'UTC')),
'pagesize': utils.get_page_size(self.request),
'instance_log_length': utils.get_log_length(self.request)}
def form_valid(self, form):
return form.handle(self.request, form.cleaned_data)
|
apache-2.0
|
Jet-Streaming/gyp
|
test/mac/gyptest-strip-default.py
|
1
|
2543
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the default STRIP_STYLEs match between different generators.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR='strip'
test.run_gyp('test-defaults.gyp', chdir=CHDIR)
test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, chdir=CHDIR)
def CheckNsyms(p, o_expected):
proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
# Filter out mysterious "00 0000 OPT radr://5614542" symbol which
# is apparently only printed on the bots (older toolchain?).
# Yes, "radr", not "rdar".
o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
o = o.replace('A', 'T')
o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
assert not proc.returncode
if o != o_expected:
print 'Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
o_expected, o)
test.fail_test()
CheckNsyms(OutPath('libsingle_dylib.dylib'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_so.so'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_exe'),
"""\
XXXXXXXX T __mh_execute_header
""")
CheckNsyms(test.built_file_path(
'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
"""\
XXXXXXXX T __mh_execute_header
""")
test.pass_test()
|
bsd-3-clause
|
cauchycui/scikit-learn
|
sklearn/preprocessing/imputation.py
|
208
|
14158
|
# Authors: Nicolas Tresegnie <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils import as_float_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <imputation>`.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Copy just once
X = as_float_array(X, copy=self.copy, force_all_finite=False)
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', force_all_finite=False,
copy=False)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', force_all_finite=False,
copy=False)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = astype(valid_statistics[indexes], X.dtype,
copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
|
bsd-3-clause
|
kkintaro/termite-data-server
|
web2py/gluon/validators.py
|
9
|
127935
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Thanks to ga2arch for help with IS_IN_DB and IS_NOT_IN_DB on GAE
"""
import os
import re
import datetime
import time
import cgi
import urllib
import struct
import decimal
import unicodedata
from cStringIO import StringIO
from gluon.utils import simple_hash, web2py_uuid, DIGEST_ALG_BY_SIZE
from gluon.dal import FieldVirtual, FieldMethod
regex_isint = re.compile('^[+-]?\d+$')
JSONErrors = (NameError, TypeError, ValueError, AttributeError,
KeyError)
try:
import json as simplejson
except ImportError:
from gluon.contrib import simplejson
from gluon.contrib.simplejson.decoder import JSONDecodeError
JSONErrors += (JSONDecodeError,)
__all__ = [
'ANY_OF',
'CLEANUP',
'CRYPT',
'IS_ALPHANUMERIC',
'IS_DATE_IN_RANGE',
'IS_DATE',
'IS_DATETIME_IN_RANGE',
'IS_DATETIME',
'IS_DECIMAL_IN_RANGE',
'IS_EMAIL',
'IS_LIST_OF_EMAILS',
'IS_EMPTY_OR',
'IS_EXPR',
'IS_FLOAT_IN_RANGE',
'IS_IMAGE',
'IS_IN_DB',
'IS_IN_SET',
'IS_INT_IN_RANGE',
'IS_IPV4',
'IS_IPV6',
'IS_IPADDRESS',
'IS_LENGTH',
'IS_LIST_OF',
'IS_LOWER',
'IS_MATCH',
'IS_EQUAL_TO',
'IS_NOT_EMPTY',
'IS_NOT_IN_DB',
'IS_NULL_OR',
'IS_SLUG',
'IS_STRONG',
'IS_TIME',
'IS_UPLOAD_FILENAME',
'IS_UPPER',
'IS_URL',
'IS_JSON',
]
try:
from globals import current
have_current = True
except ImportError:
have_current = False
def translate(text):
if text is None:
return None
elif isinstance(text, (str, unicode)) and have_current:
if hasattr(current, 'T'):
return str(current.T(text))
return str(text)
def options_sorter(x, y):
return (str(x[1]).upper() > str(y[1]).upper() and 1) or -1
class Validator(object):
"""
Root for all validators, mainly for documentation purposes.
Validators are classes used to validate input fields (including forms
generated from database tables).
Here is an example of using a validator with a FORM::
INPUT(_name='a', requires=IS_INT_IN_RANGE(0, 10))
Here is an example of how to require a validator for a table field::
db.define_table('person', SQLField('name'))
db.person.name.requires=IS_NOT_EMPTY()
Validators are always assigned using the requires attribute of a field. A
field can have a single validator or multiple validators. Multiple
validators are made part of a list::
db.person.name.requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db, 'person.id')]
Validators are called by the function accepts on a FORM or other HTML
helper object that contains a form. They are always called in the order in
which they are listed.
Built-in validators have constructors that take the optional argument error
message which allows you to change the default error message.
Here is an example of a validator on a database table::
db.person.name.requires=IS_NOT_EMPTY(error_message=T('Fill this'))
where we have used the translation operator T to allow for
internationalization.
Notice that default error messages are not translated.
"""
def formatter(self, value):
"""
For some validators returns a formatted version (matching the validator)
of value. Otherwise just returns the value.
"""
return value
def __call__(self, value):
raise NotImplementedError
return (value, None)
class IS_MATCH(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_MATCH('.+'))
the argument of IS_MATCH is a regular expression::
>>> IS_MATCH('.+')('hello')
('hello', None)
>>> IS_MATCH('hell')('hello')
('hello', None)
>>> IS_MATCH('hell.*', strict=False)('hello')
('hello', None)
>>> IS_MATCH('hello')('shello')
('shello', 'invalid expression')
>>> IS_MATCH('hello', search=True)('shello')
('shello', None)
>>> IS_MATCH('hello', search=True, strict=False)('shellox')
('shellox', None)
>>> IS_MATCH('.*hello.*', search=True, strict=False)('shellox')
('shellox', None)
>>> IS_MATCH('.+')('')
('', 'invalid expression')
"""
def __init__(self, expression, error_message='Invalid expression',
strict=False, search=False, extract=False,
is_unicode=False):
if strict or not search:
if not expression.startswith('^'):
expression = '^(%s)' % expression
if strict:
if not expression.endswith('$'):
expression = '(%s)$' % expression
if is_unicode:
if not isinstance(expression,unicode):
expression = expression.decode('utf8')
self.regex = re.compile(expression,re.UNICODE)
else:
self.regex = re.compile(expression)
self.error_message = error_message
self.extract = extract
self.is_unicode = is_unicode
def __call__(self, value):
if self.is_unicode and not isinstance(value,unicode):
match = self.regex.search(str(value).decode('utf8'))
else:
match = self.regex.search(str(value))
if match is not None:
return (self.extract and match.group() or value, None)
return (value, translate(self.error_message))
class IS_EQUAL_TO(Validator):
"""
example::
INPUT(_type='text', _name='password')
INPUT(_type='text', _name='password2',
requires=IS_EQUAL_TO(request.vars.password))
the argument of IS_EQUAL_TO is a string
>>> IS_EQUAL_TO('aaa')('aaa')
('aaa', None)
>>> IS_EQUAL_TO('aaa')('aab')
('aab', 'no match')
"""
def __init__(self, expression, error_message='No match'):
self.expression = expression
self.error_message = error_message
def __call__(self, value):
if value == self.expression:
return (value, None)
return (value, translate(self.error_message))
class IS_EXPR(Validator):
"""
example::
INPUT(_type='text', _name='name',
requires=IS_EXPR('5 < int(value) < 10'))
the argument of IS_EXPR must be python condition::
>>> IS_EXPR('int(value) < 2')('1')
('1', None)
>>> IS_EXPR('int(value) < 2')('2')
('2', 'invalid expression')
"""
def __init__(self, expression, error_message='Invalid expression', environment=None):
self.expression = expression
self.error_message = error_message
self.environment = environment or {}
def __call__(self, value):
if callable(self.expression):
return (value, self.expression(value))
# for backward compatibility
self.environment.update(value=value)
exec '__ret__=' + self.expression in self.environment
if self.environment['__ret__']:
return (value, None)
return (value, translate(self.error_message))
class IS_LENGTH(Validator):
"""
Checks if length of field's value fits between given boundaries. Works
for both text and file inputs.
Arguments:
maxsize: maximum allowed length / size
minsize: minimum allowed length / size
Examples::
#Check if text string is shorter than 33 characters:
INPUT(_type='text', _name='name', requires=IS_LENGTH(32))
#Check if password string is longer than 5 characters:
INPUT(_type='password', _name='name', requires=IS_LENGTH(minsize=6))
#Check if uploaded file has size between 1KB and 1MB:
INPUT(_type='file', _name='name', requires=IS_LENGTH(1048576, 1024))
>>> IS_LENGTH()('')
('', None)
>>> IS_LENGTH()('1234567890')
('1234567890', None)
>>> IS_LENGTH(maxsize=5, minsize=0)('1234567890') # too long
('1234567890', 'enter from 0 to 5 characters')
>>> IS_LENGTH(maxsize=50, minsize=20)('1234567890') # too short
('1234567890', 'enter from 20 to 50 characters')
"""
def __init__(self, maxsize=255, minsize=0,
error_message='Enter from %(min)g to %(max)g characters'):
self.maxsize = maxsize
self.minsize = minsize
self.error_message = error_message
def __call__(self, value):
if value is None:
length = 0
if self.minsize <= length <= self.maxsize:
return (value, None)
elif isinstance(value, cgi.FieldStorage):
if value.file:
value.file.seek(0, os.SEEK_END)
length = value.file.tell()
value.file.seek(0, os.SEEK_SET)
elif hasattr(value, 'value'):
val = value.value
if val:
length = len(val)
else:
length = 0
if self.minsize <= length <= self.maxsize:
return (value, None)
elif isinstance(value, str):
try:
lvalue = len(value.decode('utf8'))
except:
lvalue = len(value)
if self.minsize <= lvalue <= self.maxsize:
return (value, None)
elif isinstance(value, unicode):
if self.minsize <= len(value) <= self.maxsize:
return (value.encode('utf8'), None)
elif isinstance(value, (tuple, list)):
if self.minsize <= len(value) <= self.maxsize:
return (value, None)
elif self.minsize <= len(str(value)) <= self.maxsize:
return (str(value), None)
return (value, translate(self.error_message)
% dict(min=self.minsize, max=self.maxsize))
class IS_JSON(Validator):
"""
example::
INPUT(_type='text', _name='name',
requires=IS_JSON(error_message="This is not a valid json input")
>>> IS_JSON()('{"a": 100}')
({u'a': 100}, None)
>>> IS_JSON()('spam1234')
('spam1234', 'invalid json')
"""
def __init__(self, error_message='Invalid json', native_json=False):
self.native_json = native_json
self.error_message = error_message
def __call__(self, value):
try:
if self.native_json:
simplejson.loads(value) # raises error in case of malformed json
return (value, None) # the serialized value is not passed
return (simplejson.loads(value), None)
except JSONErrors:
return (value, translate(self.error_message))
def formatter(self,value):
if value is None:
return None
return simplejson.dumps(value)
class IS_IN_SET(Validator):
"""
example::
INPUT(_type='text', _name='name',
requires=IS_IN_SET(['max', 'john'],zero=''))
the argument of IS_IN_SET must be a list or set
>>> IS_IN_SET(['max', 'john'])('max')
('max', None)
>>> IS_IN_SET(['max', 'john'])('massimo')
('massimo', 'value not allowed')
>>> IS_IN_SET(['max', 'john'], multiple=True)(('max', 'john'))
(('max', 'john'), None)
>>> IS_IN_SET(['max', 'john'], multiple=True)(('bill', 'john'))
(('bill', 'john'), 'value not allowed')
>>> IS_IN_SET(('id1','id2'), ['first label','second label'])('id1') # Traditional way
('id1', None)
>>> IS_IN_SET({'id1':'first label', 'id2':'second label'})('id1')
('id1', None)
>>> import itertools
>>> IS_IN_SET(itertools.chain(['1','3','5'],['2','4','6']))('1')
('1', None)
>>> IS_IN_SET([('id1','first label'), ('id2','second label')])('id1') # Redundant way
('id1', None)
"""
def __init__(
self,
theset,
labels=None,
error_message='Value not allowed',
multiple=False,
zero='',
sort=False,
):
self.multiple = multiple
if isinstance(theset, dict):
self.theset = [str(item) for item in theset]
self.labels = theset.values()
elif theset and isinstance(theset, (tuple, list)) \
and isinstance(theset[0], (tuple, list)) and len(theset[0]) == 2:
self.theset = [str(item) for item, label in theset]
self.labels = [str(label) for item, label in theset]
else:
self.theset = [str(item) for item in theset]
self.labels = labels
self.error_message = error_message
self.zero = zero
self.sort = sort
def options(self, zero=True):
if not self.labels:
items = [(k, k) for (i, k) in enumerate(self.theset)]
else:
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if zero and not self.zero is None and not self.multiple:
items.insert(0, ('', self.zero))
return items
def __call__(self, value):
if self.multiple:
### if below was values = re.compile("[\w\-:]+").findall(str(value))
if not value:
values = []
elif isinstance(value, (tuple, list)):
values = value
else:
values = [value]
else:
values = [value]
thestrset = [str(x) for x in self.theset]
failures = [x for x in values if not str(x) in thestrset]
if failures and self.theset:
if self.multiple and (value is None or value == ''):
return ([], None)
return (value, translate(self.error_message))
if self.multiple:
if isinstance(self.multiple, (tuple, list)) and \
not self.multiple[0] <= len(values) < self.multiple[1]:
return (values, translate(self.error_message))
return (values, None)
return (value, None)
regex1 = re.compile('\w+\.\w+')
regex2 = re.compile('%\(([^\)]+)\)\d*(?:\.\d+)?[a-zA-Z]')
class IS_IN_DB(Validator):
"""
example::
INPUT(_type='text', _name='name',
requires=IS_IN_DB(db, db.mytable.myfield, zero=''))
used for reference fields, rendered as a dropbox
"""
def __init__(
self,
dbset,
field,
label=None,
error_message='Value not in database',
orderby=None,
groupby=None,
distinct=None,
cache=None,
multiple=False,
zero='',
sort=False,
_and=None,
):
from dal import Table
if isinstance(field, Table):
field = field._id
if hasattr(dbset, 'define_table'):
self.dbset = dbset()
else:
self.dbset = dbset
(ktable, kfield) = str(field).split('.')
if not label:
label = '%%(%s)s' % kfield
if isinstance(label, str):
if regex1.match(str(label)):
label = '%%(%s)s' % str(label).split('.')[-1]
ks = regex2.findall(label)
if not kfield in ks:
ks += [kfield]
fields = ks
else:
ks = [kfield]
fields = 'all'
self.fields = fields
self.label = label
self.ktable = ktable
self.kfield = kfield
self.ks = ks
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.distinct = distinct
self.cache = cache
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
def set_self_id(self, id):
if self._and:
self._and.record_id = id
def build_set(self):
table = self.dbset.db[self.ktable]
if self.fields == 'all':
fields = [f for f in table]
else:
fields = [table[k] for k in self.fields]
ignore = (FieldVirtual,FieldMethod)
fields = filter(lambda f:not isinstance(f,ignore), fields)
if self.dbset.db._dbname != 'gae':
orderby = self.orderby or reduce(lambda a, b: a | b, fields)
groupby = self.groupby
distinct = self.distinct
dd = dict(orderby=orderby, groupby=groupby,
distinct=distinct, cache=self.cache,
cacheable=True)
records = self.dbset(table).select(*fields, **dd)
else:
orderby = self.orderby or \
reduce(lambda a, b: a | b, (
f for f in fields if not f.name == 'id'))
dd = dict(orderby=orderby, cache=self.cache, cacheable=True)
records = self.dbset(table).select(table.ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
if isinstance(self.label, str):
self.labels = [self.label % r for r in records]
else:
self.labels = [self.label(r) for r in records]
def options(self, zero=True):
self.build_set()
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if zero and not self.zero is None and not self.multiple:
items.insert(0, ('', self.zero))
return items
def __call__(self, value):
table = self.dbset.db[self.ktable]
field = table[self.kfield]
if self.multiple:
if self._and:
raise NotImplementedError
if isinstance(value, list):
values = value
elif value:
values = [value]
else:
values = []
if isinstance(self.multiple, (tuple, list)) and \
not self.multiple[0] <= len(values) < self.multiple[1]:
return (values, translate(self.error_message))
if self.theset:
if not [v for v in values if not v in self.theset]:
return (values, None)
else:
from dal import GoogleDatastoreAdapter
def count(values, s=self.dbset, f=field):
return s(f.belongs(map(int, values))).count()
if isinstance(self.dbset.db._adapter, GoogleDatastoreAdapter):
range_ids = range(0, len(values), 30)
total = sum(count(values[i:i + 30]) for i in range_ids)
if total == len(values):
return (values, None)
elif count(values) == len(values):
return (values, None)
elif self.theset:
if str(value) in self.theset:
if self._and:
return self._and(value)
else:
return (value, None)
else:
if self.dbset(field == value).count():
if self._and:
return self._and(value)
else:
return (value, None)
return (value, translate(self.error_message))
class IS_NOT_IN_DB(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_NOT_IN_DB(db, db.table))
makes the field unique
"""
def __init__(
self,
dbset,
field,
error_message='Value already in database or empty',
allowed_override=[],
ignore_common_filters=False,
):
from dal import Table
if isinstance(field, Table):
field = field._id
if hasattr(dbset, 'define_table'):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
self.error_message = error_message
self.record_id = 0
self.allowed_override = allowed_override
self.ignore_common_filters = ignore_common_filters
def set_self_id(self, id):
self.record_id = id
def __call__(self, value):
if isinstance(value,unicode):
value = value.encode('utf8')
else:
value = str(value)
if not value.strip():
return (value, translate(self.error_message))
if value in self.allowed_override:
return (value, None)
(tablename, fieldname) = str(self.field).split('.')
table = self.dbset.db[tablename]
field = table[fieldname]
subset = self.dbset(field == value,
ignore_common_filters=self.ignore_common_filters)
id = self.record_id
if isinstance(id, dict):
fields = [table[f] for f in id]
row = subset.select(*fields, **dict(limitby=(0, 1), orderby_on_limitby=False)).first()
if row and any(str(row[f]) != str(id[f]) for f in id):
return (value, translate(self.error_message))
else:
row = subset.select(table._id, field, limitby=(0, 1), orderby_on_limitby=False).first()
if row and str(row.id) != str(id):
return (value, translate(self.error_message))
return (value, None)
def range_error_message(error_message, what_to_enter, minimum, maximum):
"build the error message for the number range validators"
if error_message is None:
error_message = 'Enter ' + what_to_enter
if minimum is not None and maximum is not None:
error_message += ' between %(min)g and %(max)g'
elif minimum is not None:
error_message += ' greater than or equal to %(min)g'
elif maximum is not None:
error_message += ' less than or equal to %(max)g'
if type(maximum) in [int, long]:
maximum -= 1
return translate(error_message) % dict(min=minimum, max=maximum)
class IS_INT_IN_RANGE(Validator):
"""
Determine that the argument is (or can be represented as) an int,
and that it falls within the specified range. The range is interpreted
in the Pythonic way, so the test is: min <= value < max.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
example::
INPUT(_type='text', _name='name', requires=IS_INT_IN_RANGE(0, 10))
>>> IS_INT_IN_RANGE(1,5)('4')
(4, None)
>>> IS_INT_IN_RANGE(1,5)(4)
(4, None)
>>> IS_INT_IN_RANGE(1,5)(1)
(1, None)
>>> IS_INT_IN_RANGE(1,5)(5)
(5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(1,5)(5)
(5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(1,5)(3.5)
(3.5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(None,5)('4')
(4, None)
>>> IS_INT_IN_RANGE(None,5)('6')
('6', 'enter an integer less than or equal to 4')
>>> IS_INT_IN_RANGE(1,None)('4')
(4, None)
>>> IS_INT_IN_RANGE(1,None)('0')
('0', 'enter an integer greater than or equal to 1')
>>> IS_INT_IN_RANGE()(6)
(6, None)
>>> IS_INT_IN_RANGE()('abc')
('abc', 'enter an integer')
"""
def __init__(
self,
minimum=None,
maximum=None,
error_message=None,
):
self.minimum = int(minimum) if minimum is not None else None
self.maximum = int(maximum) if maximum is not None else None
self.error_message = range_error_message(
error_message, 'an integer', self.minimum, self.maximum)
def __call__(self, value):
if regex_isint.match(str(value)):
v = int(value)
if ((self.minimum is None or v >= self.minimum) and
(self.maximum is None or v < self.maximum)):
return (v, None)
return (value, self.error_message)
def str2dec(number):
s = str(number)
if not '.' in s:
s += '.00'
else:
s += '0' * (2 - len(s.split('.')[1]))
return s
class IS_FLOAT_IN_RANGE(Validator):
"""
Determine that the argument is (or can be represented as) a float,
and that it falls within the specified inclusive range.
The comparison is made with native arithmetic.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
example::
INPUT(_type='text', _name='name', requires=IS_FLOAT_IN_RANGE(0, 10))
>>> IS_FLOAT_IN_RANGE(1,5)('4')
(4.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(4)
(4.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(1)
(1.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(5.25)
(5.25, 'enter a number between 1 and 5')
>>> IS_FLOAT_IN_RANGE(1,5)(6.0)
(6.0, 'enter a number between 1 and 5')
>>> IS_FLOAT_IN_RANGE(1,5)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(1,None)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(None,5)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(1,None)(0.5)
(0.5, 'enter a number greater than or equal to 1')
>>> IS_FLOAT_IN_RANGE(None,5)(6.5)
(6.5, 'enter a number less than or equal to 5')
>>> IS_FLOAT_IN_RANGE()(6.5)
(6.5, None)
>>> IS_FLOAT_IN_RANGE()('abc')
('abc', 'enter a number')
"""
def __init__(
self,
minimum=None,
maximum=None,
error_message=None,
dot='.'
):
self.minimum = float(minimum) if minimum is not None else None
self.maximum = float(maximum) if maximum is not None else None
self.dot = str(dot)
self.error_message = range_error_message(
error_message, 'a number', self.minimum, self.maximum)
def __call__(self, value):
try:
if self.dot == '.':
v = float(value)
else:
v = float(str(value).replace(self.dot, '.'))
if ((self.minimum is None or v >= self.minimum) and
(self.maximum is None or v <= self.maximum)):
return (v, None)
except (ValueError, TypeError):
pass
return (value, self.error_message)
def formatter(self, value):
if value is None:
return None
return str2dec(value).replace('.', self.dot)
class IS_DECIMAL_IN_RANGE(Validator):
"""
Determine that the argument is (or can be represented as) a Python Decimal,
and that it falls within the specified inclusive range.
The comparison is made with Python Decimal arithmetic.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
example::
INPUT(_type='text', _name='name', requires=IS_DECIMAL_IN_RANGE(0, 10))
>>> IS_DECIMAL_IN_RANGE(1,5)('4')
(Decimal('4'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(4)
(Decimal('4'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(1)
(Decimal('1'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(5.25)
(5.25, 'enter a number between 1 and 5')
>>> IS_DECIMAL_IN_RANGE(5.25,6)(5.25)
(Decimal('5.25'), None)
>>> IS_DECIMAL_IN_RANGE(5.25,6)('5.25')
(Decimal('5.25'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(6.0)
(6.0, 'enter a number between 1 and 5')
>>> IS_DECIMAL_IN_RANGE(1,5)(3.5)
(Decimal('3.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,5.5)(3.5)
(Decimal('3.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,5.5)(6.5)
(6.5, 'enter a number between 1.5 and 5.5')
>>> IS_DECIMAL_IN_RANGE(1.5,None)(6.5)
(Decimal('6.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,None)(0.5)
(0.5, 'enter a number greater than or equal to 1.5')
>>> IS_DECIMAL_IN_RANGE(None,5.5)(4.5)
(Decimal('4.5'), None)
>>> IS_DECIMAL_IN_RANGE(None,5.5)(6.5)
(6.5, 'enter a number less than or equal to 5.5')
>>> IS_DECIMAL_IN_RANGE()(6.5)
(Decimal('6.5'), None)
>>> IS_DECIMAL_IN_RANGE(0,99)(123.123)
(123.123, 'enter a number between 0 and 99')
>>> IS_DECIMAL_IN_RANGE(0,99)('123.123')
('123.123', 'enter a number between 0 and 99')
>>> IS_DECIMAL_IN_RANGE(0,99)('12.34')
(Decimal('12.34'), None)
>>> IS_DECIMAL_IN_RANGE()('abc')
('abc', 'enter a number')
"""
def __init__(
self,
minimum=None,
maximum=None,
error_message=None,
dot='.'
):
self.minimum = decimal.Decimal(str(minimum)) if minimum is not None else None
self.maximum = decimal.Decimal(str(maximum)) if maximum is not None else None
self.dot = str(dot)
self.error_message = range_error_message(
error_message, 'a number', self.minimum, self.maximum)
def __call__(self, value):
try:
if isinstance(value, decimal.Decimal):
v = value
else:
v = decimal.Decimal(str(value).replace(self.dot, '.'))
if ((self.minimum is None or v >= self.minimum) and
(self.maximum is None or v <= self.maximum)):
return (v, None)
except (ValueError, TypeError, decimal.InvalidOperation):
pass
return (value, self.error_message)
def formatter(self, value):
if value is None:
return None
return str2dec(value).replace('.', self.dot)
def is_empty(value, empty_regex=None):
"test empty field"
if isinstance(value, (str, unicode)):
value = value.strip()
if empty_regex is not None and empty_regex.match(value):
value = ''
if value is None or value == '' or value == []:
return (value, True)
return (value, False)
class IS_NOT_EMPTY(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_NOT_EMPTY())
>>> IS_NOT_EMPTY()(1)
(1, None)
>>> IS_NOT_EMPTY()(0)
(0, None)
>>> IS_NOT_EMPTY()('x')
('x', None)
>>> IS_NOT_EMPTY()(' x ')
('x', None)
>>> IS_NOT_EMPTY()(None)
(None, 'enter a value')
>>> IS_NOT_EMPTY()('')
('', 'enter a value')
>>> IS_NOT_EMPTY()(' ')
('', 'enter a value')
>>> IS_NOT_EMPTY()(' \\n\\t')
('', 'enter a value')
>>> IS_NOT_EMPTY()([])
([], 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='def')('def')
('', 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='de[fg]')('deg')
('', 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='def')('abc')
('abc', None)
"""
def __init__(self, error_message='Enter a value', empty_regex=None):
self.error_message = error_message
if empty_regex is not None:
self.empty_regex = re.compile(empty_regex)
else:
self.empty_regex = None
def __call__(self, value):
value, empty = is_empty(value, empty_regex=self.empty_regex)
if empty:
return (value, translate(self.error_message))
return (value, None)
class IS_ALPHANUMERIC(IS_MATCH):
"""
example::
INPUT(_type='text', _name='name', requires=IS_ALPHANUMERIC())
>>> IS_ALPHANUMERIC()('1')
('1', None)
>>> IS_ALPHANUMERIC()('')
('', None)
>>> IS_ALPHANUMERIC()('A_a')
('A_a', None)
>>> IS_ALPHANUMERIC()('!')
('!', 'enter only letters, numbers, and underscore')
"""
def __init__(self, error_message='Enter only letters, numbers, and underscore'):
IS_MATCH.__init__(self, '^[\w]*$', error_message)
class IS_EMAIL(Validator):
"""
Checks if field's value is a valid email address. Can be set to disallow
or force addresses from certain domain(s).
Email regex adapted from
http://haacked.com/archive/2007/08/21/i-knew-how-to-validate-an-email-address-until-i.aspx,
generally following the RFCs, except that we disallow quoted strings
and permit underscores and leading numerics in subdomain labels
Arguments:
- banned: regex text for disallowed address domains
- forced: regex text for required address domains
Both arguments can also be custom objects with a match(value) method.
Examples::
#Check for valid email address:
INPUT(_type='text', _name='name',
requires=IS_EMAIL())
#Check for valid email address that can't be from a .com domain:
INPUT(_type='text', _name='name',
requires=IS_EMAIL(banned='^.*\.com(|\..*)$'))
#Check for valid email address that must be from a .edu domain:
INPUT(_type='text', _name='name',
requires=IS_EMAIL(forced='^.*\.edu(|\..*)$'))
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('abc@d_-f.us')
('abc@d_-f.us', None)
>>> IS_EMAIL()('@def.com') # missing name
('@def.com', 'enter a valid email address')
>>> IS_EMAIL()('"abc@def".com') # quoted name
('"abc@def".com', 'enter a valid email address')
>>> IS_EMAIL()('abc+def.com') # no @
('abc+def.com', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # one-char TLD
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # numeric TLD
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # double-dot in domain
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # dot starts domain
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]_m') # underscore in TLD
('[email protected]_m', 'enter a valid email address')
>>> IS_EMAIL()('NotAnEmail') # missing @
('NotAnEmail', 'enter a valid email address')
>>> IS_EMAIL()('abc@NotAnEmail') # missing TLD
('abc@NotAnEmail', 'enter a valid email address')
>>> IS_EMAIL()('customer/[email protected]')
('customer/[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('!def!xyz%[email protected]')
('!def!xyz%[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('[email protected]') # dot starts name
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # adjacent dots in name
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # dot ends name
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]') # name is bare dot
('[email protected]', 'enter a valid email address')
>>> IS_EMAIL()('[email protected]')
('[email protected]', None)
>>> IS_EMAIL()('Ima [email protected]') # space in name
('Ima [email protected]', 'enter a valid email address')
>>> IS_EMAIL()('localguy@localhost') # localhost as domain
('localguy@localhost', None)
"""
regex = re.compile('''
^(?!\.) # name may not begin with a dot
(
[-a-z0-9!\#$%&'*+/=?^_`{|}~] # all legal characters except dot
|
(?<!\.)\. # single dots only
)+
(?<!\.) # name may not end with a dot
@
(
localhost
|
(
[a-z0-9]
# [sub]domain begins with alphanumeric
(
[-\w]* # alphanumeric, underscore, dot, hyphen
[a-z0-9] # ending alphanumeric
)?
\. # ending dot
)+
[a-z]{2,} # TLD alpha-only
)$
''', re.VERBOSE | re.IGNORECASE)
regex_proposed_but_failed = re.compile('^([\w\!\#$\%\&\'\*\+\-\/\=\?\^\`{\|\}\~]+\.)*[\w\!\#$\%\&\'\*\+\-\/\=\?\^\`{\|\}\~]+@((((([a-z0-9]{1}[a-z0-9\-]{0,62}[a-z0-9]{1})|[a-z])\.)+[a-z]{2,6})|(\d{1,3}\.){3}\d{1,3}(\:\d{1,5})?)$', re.VERBOSE | re.IGNORECASE)
def __init__(self,
banned=None,
forced=None,
error_message='Enter a valid email address'):
if isinstance(banned, str):
banned = re.compile(banned)
if isinstance(forced, str):
forced = re.compile(forced)
self.banned = banned
self.forced = forced
self.error_message = error_message
def __call__(self, value):
match = self.regex.match(value)
if match:
domain = value.split('@')[1]
if (not self.banned or not self.banned.match(domain)) \
and (not self.forced or self.forced.match(domain)):
return (value, None)
return (value, translate(self.error_message))
class IS_LIST_OF_EMAILS(object):
"""
use as follows:
Field('emails','list:string',
widget=SQLFORM.widgets.text.widget,
requires=IS_LIST_OF_EMAILS(),
represent=lambda v,r: \
SPAN(*[A(x,_href='mailto:'+x) for x in (v or [])])
)
"""
split_emails = re.compile('[^,;\s]+')
def __init__(self, error_message = 'Invalid emails: %s'):
self.error_message = error_message
def __call__(self, value):
bad_emails = []
emails = []
f = IS_EMAIL()
for email in self.split_emails.findall(value):
if not email in emails:
emails.append(email)
error = f(email)[1]
if error and not email in bad_emails:
bad_emails.append(email)
if not bad_emails:
return (value, None)
else:
return (value,
translate(self.error_message) % ', '.join(bad_emails))
def formatter(self,value,row=None):
return ', '.join(value or [])
# URL scheme source:
# <http://en.wikipedia.org/wiki/URI_scheme> obtained on 2008-Nov-10
official_url_schemes = [
'aaa',
'aaas',
'acap',
'cap',
'cid',
'crid',
'data',
'dav',
'dict',
'dns',
'fax',
'file',
'ftp',
'go',
'gopher',
'h323',
'http',
'https',
'icap',
'im',
'imap',
'info',
'ipp',
'iris',
'iris.beep',
'iris.xpc',
'iris.xpcs',
'iris.lws',
'ldap',
'mailto',
'mid',
'modem',
'msrp',
'msrps',
'mtqp',
'mupdate',
'news',
'nfs',
'nntp',
'opaquelocktoken',
'pop',
'pres',
'prospero',
'rtsp',
'service',
'shttp',
'sip',
'sips',
'snmp',
'soap.beep',
'soap.beeps',
'tag',
'tel',
'telnet',
'tftp',
'thismessage',
'tip',
'tv',
'urn',
'vemmi',
'wais',
'xmlrpc.beep',
'xmlrpc.beep',
'xmpp',
'z39.50r',
'z39.50s',
]
unofficial_url_schemes = [
'about',
'adiumxtra',
'aim',
'afp',
'aw',
'callto',
'chrome',
'cvs',
'ed2k',
'feed',
'fish',
'gg',
'gizmoproject',
'iax2',
'irc',
'ircs',
'itms',
'jar',
'javascript',
'keyparc',
'lastfm',
'ldaps',
'magnet',
'mms',
'msnim',
'mvn',
'notes',
'nsfw',
'psyc',
'paparazzi:http',
'rmi',
'rsync',
'secondlife',
'sgn',
'skype',
'ssh',
'sftp',
'smb',
'sms',
'soldat',
'steam',
'svn',
'teamspeak',
'unreal',
'ut2004',
'ventrilo',
'view-source',
'webcal',
'wyciwyg',
'xfire',
'xri',
'ymsgr',
]
all_url_schemes = [None] + official_url_schemes + unofficial_url_schemes
http_schemes = [None, 'http', 'https']
# This regex comes from RFC 2396, Appendix B. It's used to split a URL into
# its component parts
# Here are the regex groups that it extracts:
# scheme = group(2)
# authority = group(4)
# path = group(5)
# query = group(7)
# fragment = group(9)
url_split_regex = \
re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?')
# Defined in RFC 3490, Section 3.1, Requirement #1
# Use this regex to split the authority component of a unicode URL into
# its component labels
label_split_regex = re.compile(u'[\u002e\u3002\uff0e\uff61]')
def escape_unicode(string):
'''
Converts a unicode string into US-ASCII, using a simple conversion scheme.
Each unicode character that does not have a US-ASCII equivalent is
converted into a URL escaped form based on its hexadecimal value.
For example, the unicode character '\u4e86' will become the string '%4e%86'
:param string: unicode string, the unicode string to convert into an
escaped US-ASCII form
:returns: the US-ASCII escaped form of the inputted string
:rtype: string
@author: Jonathan Benn
'''
returnValue = StringIO()
for character in string:
code = ord(character)
if code > 0x7F:
hexCode = hex(code)
returnValue.write('%' + hexCode[2:4] + '%' + hexCode[4:6])
else:
returnValue.write(character)
return returnValue.getvalue()
def unicode_to_ascii_authority(authority):
'''
Follows the steps in RFC 3490, Section 4 to convert a unicode authority
string into its ASCII equivalent.
For example, u'www.Alliancefran\xe7aise.nu' will be converted into
'www.xn--alliancefranaise-npb.nu'
:param authority: unicode string, the URL authority component to convert,
e.g. u'www.Alliancefran\xe7aise.nu'
:returns: the US-ASCII character equivalent to the inputed authority,
e.g. 'www.xn--alliancefranaise-npb.nu'
:rtype: string
:raises Exception: if the function is not able to convert the inputed
authority
@author: Jonathan Benn
'''
#RFC 3490, Section 4, Step 1
#The encodings.idna Python module assumes that AllowUnassigned == True
#RFC 3490, Section 4, Step 2
labels = label_split_regex.split(authority)
#RFC 3490, Section 4, Step 3
#The encodings.idna Python module assumes that UseSTD3ASCIIRules == False
#RFC 3490, Section 4, Step 4
#We use the ToASCII operation because we are about to put the authority
#into an IDN-unaware slot
asciiLabels = []
try:
import encodings.idna
for label in labels:
if label:
asciiLabels.append(encodings.idna.ToASCII(label))
else:
#encodings.idna.ToASCII does not accept an empty string, but
#it is necessary for us to allow for empty labels so that we
#don't modify the URL
asciiLabels.append('')
except:
asciiLabels = [str(label) for label in labels]
#RFC 3490, Section 4, Step 5
return str(reduce(lambda x, y: x + unichr(0x002E) + y, asciiLabels))
def unicode_to_ascii_url(url, prepend_scheme):
'''
Converts the inputed unicode url into a US-ASCII equivalent. This function
goes a little beyond RFC 3490, which is limited in scope to the domain name
(authority) only. Here, the functionality is expanded to what was observed
on Wikipedia on 2009-Jan-22:
Component Can Use Unicode?
--------- ----------------
scheme No
authority Yes
path Yes
query Yes
fragment No
The authority component gets converted to punycode, but occurrences of
unicode in other components get converted into a pair of URI escapes (we
assume 4-byte unicode). E.g. the unicode character U+4E2D will be
converted into '%4E%2D'. Testing with Firefox v3.0.5 has shown that it can
understand this kind of URI encoding.
:param url: unicode string, the URL to convert from unicode into US-ASCII
:param prepend_scheme: string, a protocol scheme to prepend to the URL if
we're having trouble parsing it.
e.g. "http". Input None to disable this functionality
:returns: a US-ASCII equivalent of the inputed url
:rtype: string
@author: Jonathan Benn
'''
#convert the authority component of the URL into an ASCII punycode string,
#but encode the rest using the regular URI character encoding
groups = url_split_regex.match(url).groups()
#If no authority was found
if not groups[3]:
#Try appending a scheme to see if that fixes the problem
scheme_to_prepend = prepend_scheme or 'http'
groups = url_split_regex.match(
unicode(scheme_to_prepend) + u'://' + url).groups()
#if we still can't find the authority
if not groups[3]:
raise Exception('No authority component found, ' +
'could not decode unicode to US-ASCII')
#We're here if we found an authority, let's rebuild the URL
scheme = groups[1]
authority = groups[3]
path = groups[4] or ''
query = groups[5] or ''
fragment = groups[7] or ''
if prepend_scheme:
scheme = str(scheme) + '://'
else:
scheme = ''
return scheme + unicode_to_ascii_authority(authority) +\
escape_unicode(path) + escape_unicode(query) + str(fragment)
class IS_GENERIC_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The URL scheme specified (if one is specified) is not valid
Based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL if and only if that's necessary to successfully parse the URL.
Please note that a scheme will be prepended only for rare cases
(e.g. 'google.ca:80')
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
@author: Jonathan Benn
>>> IS_GENERIC_URL()('http://[email protected]')
('http://[email protected]', None)
"""
def __init__(
self,
error_message='Enter a valid URL',
allowed_schemes=None,
prepend_scheme=None,
):
"""
:param error_message: a string, the error message to give the end user
if the URL does not validate
:param allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
:param prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
"""
self.error_message = error_message
if allowed_schemes is None:
self.allowed_schemes = all_url_schemes
else:
self.allowed_schemes = allowed_schemes
self.prepend_scheme = prepend_scheme
if self.prepend_scheme not in self.allowed_schemes:
raise SyntaxError("prepend_scheme='%s' is not in allowed_schemes=%s"
% (self.prepend_scheme, self.allowed_schemes))
GENERIC_URL = re.compile(r"%[^0-9A-Fa-f]{2}|%[^0-9A-Fa-f][0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]|%$|%[0-9A-Fa-f]$|%[^0-9A-Fa-f]$")
GENERIC_URL_VALID = re.compile(r"[A-Za-z0-9;/?:@&=+$,\-_\.!~*'\(\)%#]+$")
def __call__(self, value):
"""
:param value: a string, the URL to validate
:returns: a tuple, where tuple[0] is the inputed value (possible
prepended with prepend_scheme), and tuple[1] is either
None (success!) or the string error_message
"""
try:
# if the URL does not misuse the '%' character
if not self.GENERIC_URL.search(value):
# if the URL is only composed of valid characters
if self.GENERIC_URL_VALID.match(value):
# Then split up the URL into its components and check on
# the scheme
scheme = url_split_regex.match(value).group(2)
# Clean up the scheme before we check it
if not scheme is None:
scheme = urllib.unquote(scheme).lower()
# If the scheme really exists
if scheme in self.allowed_schemes:
# Then the URL is valid
return (value, None)
else:
# else, for the possible case of abbreviated URLs with
# ports, check to see if adding a valid scheme fixes
# the problem (but only do this if it doesn't have
# one already!)
if value.find('://') < 0 and None in self.allowed_schemes:
schemeToUse = self.prepend_scheme or 'http'
prependTest = self.__call__(
schemeToUse + '://' + value)
# if the prepend test succeeded
if prependTest[1] is None:
# if prepending in the output is enabled
if self.prepend_scheme:
return prependTest
else:
# else return the original,
# non-prepended value
return (value, None)
except:
pass
# else the URL is not valid
return (value, translate(self.error_message))
# Sources (obtained 2008-Nov-11):
# http://en.wikipedia.org/wiki/Top-level_domain
# http://www.iana.org/domains/root/db/
official_top_level_domains = [
'ac',
'ad',
'ae',
'aero',
'af',
'ag',
'ai',
'al',
'am',
'an',
'ao',
'aq',
'ar',
'arpa',
'as',
'asia',
'at',
'au',
'aw',
'ax',
'az',
'ba',
'bb',
'bd',
'be',
'bf',
'bg',
'bh',
'bi',
'biz',
'bj',
'bl',
'bm',
'bn',
'bo',
'br',
'bs',
'bt',
'bv',
'bw',
'by',
'bz',
'ca',
'cat',
'cc',
'cd',
'cf',
'cg',
'ch',
'ci',
'ck',
'cl',
'cm',
'cn',
'co',
'com',
'coop',
'cr',
'cu',
'cv',
'cx',
'cy',
'cz',
'de',
'dj',
'dk',
'dm',
'do',
'dz',
'ec',
'edu',
'ee',
'eg',
'eh',
'er',
'es',
'et',
'eu',
'example',
'fi',
'fj',
'fk',
'fm',
'fo',
'fr',
'ga',
'gb',
'gd',
'ge',
'gf',
'gg',
'gh',
'gi',
'gl',
'gm',
'gn',
'gov',
'gp',
'gq',
'gr',
'gs',
'gt',
'gu',
'gw',
'gy',
'hk',
'hm',
'hn',
'hr',
'ht',
'hu',
'id',
'ie',
'il',
'im',
'in',
'info',
'int',
'invalid',
'io',
'iq',
'ir',
'is',
'it',
'je',
'jm',
'jo',
'jobs',
'jp',
'ke',
'kg',
'kh',
'ki',
'km',
'kn',
'kp',
'kr',
'kw',
'ky',
'kz',
'la',
'lb',
'lc',
'li',
'lk',
'localhost',
'lr',
'ls',
'lt',
'lu',
'lv',
'ly',
'ma',
'mc',
'md',
'me',
'mf',
'mg',
'mh',
'mil',
'mk',
'ml',
'mm',
'mn',
'mo',
'mobi',
'mp',
'mq',
'mr',
'ms',
'mt',
'mu',
'museum',
'mv',
'mw',
'mx',
'my',
'mz',
'na',
'name',
'nc',
'ne',
'net',
'nf',
'ng',
'ni',
'nl',
'no',
'np',
'nr',
'nu',
'nz',
'om',
'org',
'pa',
'pe',
'pf',
'pg',
'ph',
'pk',
'pl',
'pm',
'pn',
'pr',
'pro',
'ps',
'pt',
'pw',
'py',
'qa',
're',
'ro',
'rs',
'ru',
'rw',
'sa',
'sb',
'sc',
'sd',
'se',
'sg',
'sh',
'si',
'sj',
'sk',
'sl',
'sm',
'sn',
'so',
'sr',
'st',
'su',
'sv',
'sy',
'sz',
'tc',
'td',
'tel',
'test',
'tf',
'tg',
'th',
'tj',
'tk',
'tl',
'tm',
'tn',
'to',
'tp',
'tr',
'travel',
'tt',
'tv',
'tw',
'tz',
'ua',
'ug',
'uk',
'um',
'us',
'uy',
'uz',
'va',
'vc',
've',
'vg',
'vi',
'vn',
'vu',
'wf',
'ws',
'xn--0zwm56d',
'xn--11b5bs3a9aj6g',
'xn--80akhbyknj4f',
'xn--9t4b11yi5a',
'xn--deba0ad',
'xn--g6w251d',
'xn--hgbk6aj7f53bba',
'xn--hlcj6aya9esc7a',
'xn--jxalpdlp',
'xn--kgbechtv',
'xn--p1ai',
'xn--zckzah',
'ye',
'yt',
'yu',
'za',
'zm',
'zw',
]
class IS_HTTP_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The string breaks any of the HTTP syntactic rules
* The URL scheme specified (if one is specified) is not 'http' or 'https'
* The top-level domain (if a host name is specified) does not exist
Based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL in the case of an abbreviated URL (e.g. 'google.ca').
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
@author: Jonathan Benn
>>> IS_HTTP_URL()('http://1.2.3.4')
('http://1.2.3.4', None)
>>> IS_HTTP_URL()('http://abc.com')
('http://abc.com', None)
>>> IS_HTTP_URL()('https://abc.com')
('https://abc.com', None)
>>> IS_HTTP_URL()('httpx://abc.com')
('httpx://abc.com', 'enter a valid URL')
>>> IS_HTTP_URL()('http://abc.com:80')
('http://abc.com:80', None)
>>> IS_HTTP_URL()('http://[email protected]')
('http://[email protected]', None)
>>> IS_HTTP_URL()('http://[email protected]')
('http://[email protected]', None)
"""
GENERIC_VALID_IP = re.compile(
"([\w.!~*'|;:&=+$,-]+@)?\d+\.\d+\.\d+\.\d+(:\d*)*$")
GENERIC_VALID_DOMAIN = re.compile("([\w.!~*'|;:&=+$,-]+@)?(([A-Za-z0-9]+[A-Za-z0-9\-]*[A-Za-z0-9]+\.)*([A-Za-z0-9]+\.)*)*([A-Za-z]+[A-Za-z0-9\-]*[A-Za-z0-9]+)\.?(:\d*)*$")
def __init__(
self,
error_message='Enter a valid URL',
allowed_schemes=None,
prepend_scheme='http',
):
"""
:param error_message: a string, the error message to give the end user
if the URL does not validate
:param allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
:param prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
"""
self.error_message = error_message
if allowed_schemes is None:
self.allowed_schemes = http_schemes
else:
self.allowed_schemes = allowed_schemes
self.prepend_scheme = prepend_scheme
for i in self.allowed_schemes:
if i not in http_schemes:
raise SyntaxError("allowed_scheme value '%s' is not in %s" %
(i, http_schemes))
if self.prepend_scheme not in self.allowed_schemes:
raise SyntaxError("prepend_scheme='%s' is not in allowed_schemes=%s" %
(self.prepend_scheme, self.allowed_schemes))
def __call__(self, value):
"""
:param value: a string, the URL to validate
:returns: a tuple, where tuple[0] is the inputed value
(possible prepended with prepend_scheme), and tuple[1] is either
None (success!) or the string error_message
"""
try:
# if the URL passes generic validation
x = IS_GENERIC_URL(error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme)
if x(value)[1] is None:
componentsMatch = url_split_regex.match(value)
authority = componentsMatch.group(4)
# if there is an authority component
if authority:
# if authority is a valid IP address
if self.GENERIC_VALID_IP.match(authority):
# Then this HTTP URL is valid
return (value, None)
else:
# else if authority is a valid domain name
domainMatch = self.GENERIC_VALID_DOMAIN.match(
authority)
if domainMatch:
# if the top-level domain really exists
if domainMatch.group(5).lower()\
in official_top_level_domains:
# Then this HTTP URL is valid
return (value, None)
else:
# else this is a relative/abbreviated URL, which will parse
# into the URL's path component
path = componentsMatch.group(5)
# relative case: if this is a valid path (if it starts with
# a slash)
if path.startswith('/'):
# Then this HTTP URL is valid
return (value, None)
else:
# abbreviated case: if we haven't already, prepend a
# scheme and see if it fixes the problem
if value.find('://') < 0:
schemeToUse = self.prepend_scheme or 'http'
prependTest = self.__call__(schemeToUse
+ '://' + value)
# if the prepend test succeeded
if prependTest[1] is None:
# if prepending in the output is enabled
if self.prepend_scheme:
return prependTest
else:
# else return the original, non-prepended
# value
return (value, None)
except:
pass
# else the HTTP URL is not valid
return (value, translate(self.error_message))
class IS_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The string breaks any of the HTTP syntactic rules
* The URL scheme specified (if one is specified) is not 'http' or 'https'
* The top-level domain (if a host name is specified) does not exist
(These rules are based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html)
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL in the case of an abbreviated URL (e.g. 'google.ca').
If the parameter mode='generic' is used, then this function's behavior
changes. It then rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The URL scheme specified (if one is specified) is not valid
(These rules are based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html)
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
IS_URL is compatible with the Internationalized Domain Name (IDN) standard
specified in RFC 3490 (http://tools.ietf.org/html/rfc3490). As a result,
URLs can be regular strings or unicode strings.
If the URL's domain component (e.g. google.ca) contains non-US-ASCII
letters, then the domain will be converted into Punycode (defined in
RFC 3492, http://tools.ietf.org/html/rfc3492). IS_URL goes a bit beyond
the standards, and allows non-US-ASCII characters to be present in the path
and query components of the URL as well. These non-US-ASCII characters will
be escaped using the standard '%20' type syntax. e.g. the unicode
character with hex code 0x4e86 will become '%4e%86'
Code Examples::
INPUT(_type='text', _name='name', requires=IS_URL())
>>> IS_URL()('abc.com')
('http://abc.com', None)
INPUT(_type='text', _name='name', requires=IS_URL(mode='generic'))
>>> IS_URL(mode='generic')('abc.com')
('abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(allowed_schemes=['https'], prepend_scheme='https'))
>>> IS_URL(allowed_schemes=['https'], prepend_scheme='https')('https://abc.com')
('https://abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(prepend_scheme='https'))
>>> IS_URL(prepend_scheme='https')('abc.com')
('https://abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(mode='generic', allowed_schemes=['ftps', 'https'],
prepend_scheme='https'))
>>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https'], prepend_scheme='https')('https://abc.com')
('https://abc.com', None)
>>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https', None], prepend_scheme='https')('abc.com')
('abc.com', None)
@author: Jonathan Benn
"""
def __init__(
self,
error_message='Enter a valid URL',
mode='http',
allowed_schemes=None,
prepend_scheme='http',
):
"""
:param error_message: a string, the error message to give the end user
if the URL does not validate
:param allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
:param prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
"""
self.error_message = error_message
self.mode = mode.lower()
if not self.mode in ['generic', 'http']:
raise SyntaxError("invalid mode '%s' in IS_URL" % self.mode)
self.allowed_schemes = allowed_schemes
if self.allowed_schemes:
if prepend_scheme not in self.allowed_schemes:
raise SyntaxError("prepend_scheme='%s' is not in allowed_schemes=%s"
% (prepend_scheme, self.allowed_schemes))
# if allowed_schemes is None, then we will defer testing
# prepend_scheme's validity to a sub-method
self.prepend_scheme = prepend_scheme
def __call__(self, value):
"""
:param value: a unicode or regular string, the URL to validate
:returns: a (string, string) tuple, where tuple[0] is the modified
input value and tuple[1] is either None (success!) or the
string error_message. The input value will never be modified in the
case of an error. However, if there is success then the input URL
may be modified to (1) prepend a scheme, and/or (2) convert a
non-compliant unicode URL into a compliant US-ASCII version.
"""
if self.mode == 'generic':
subMethod = IS_GENERIC_URL(error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme)
elif self.mode == 'http':
subMethod = IS_HTTP_URL(error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme)
else:
raise SyntaxError("invalid mode '%s' in IS_URL" % self.mode)
if type(value) != unicode:
return subMethod(value)
else:
try:
asciiValue = unicode_to_ascii_url(value, self.prepend_scheme)
except Exception:
#If we are not able to convert the unicode url into a
# US-ASCII URL, then the URL is not valid
return (value, translate(self.error_message))
methodResult = subMethod(asciiValue)
#if the validation of the US-ASCII version of the value failed
if not methodResult[1] is None:
# then return the original input value, not the US-ASCII version
return (value, methodResult[1])
else:
return methodResult
regex_time = re.compile(
'((?P<h>[0-9]+))([^0-9 ]+(?P<m>[0-9 ]+))?([^0-9ap ]+(?P<s>[0-9]*))?((?P<d>[ap]m))?')
class IS_TIME(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_TIME())
understands the following formats
hh:mm:ss [am/pm]
hh:mm [am/pm]
hh [am/pm]
[am/pm] is optional, ':' can be replaced by any other non-space non-digit
>>> IS_TIME()('21:30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21-30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21.30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21:30:59')
(datetime.time(21, 30, 59), None)
>>> IS_TIME()('5:30')
(datetime.time(5, 30), None)
>>> IS_TIME()('5:30 am')
(datetime.time(5, 30), None)
>>> IS_TIME()('5:30 pm')
(datetime.time(17, 30), None)
>>> IS_TIME()('5:30 whatever')
('5:30 whatever', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('5:30 20')
('5:30 20', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('24:30')
('24:30', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('21:60')
('21:60', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('21:30::')
('21:30::', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('')
('', 'enter time as hh:mm:ss (seconds, am, pm optional)')
"""
def __init__(self, error_message='Enter time as hh:mm:ss (seconds, am, pm optional)'):
self.error_message = error_message
def __call__(self, value):
try:
ivalue = value
value = regex_time.match(value.lower())
(h, m, s) = (int(value.group('h')), 0, 0)
if not value.group('m') is None:
m = int(value.group('m'))
if not value.group('s') is None:
s = int(value.group('s'))
if value.group('d') == 'pm' and 0 < h < 12:
h = h + 12
if value.group('d') == 'am' and h == 12:
h = 0
if not (h in range(24) and m in range(60) and s
in range(60)):
raise ValueError('Hours or minutes or seconds are outside of allowed range')
value = datetime.time(h, m, s)
return (value, None)
except AttributeError:
pass
except ValueError:
pass
return (ivalue, translate(self.error_message))
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return UTC.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return UTC.ZERO
utc = UTC()
class IS_DATE(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_DATE())
date has to be in the ISO8960 format YYYY-MM-DD
"""
def __init__(self, format='%Y-%m-%d',
error_message='Enter date as %(format)s',
timezone = None):
"""
timezome must be None or a pytz.timezone("America/Chicago") object
"""
self.format = translate(format)
self.error_message = str(error_message)
self.timezone = timezone
self.extremes = {}
def __call__(self, value):
ovalue = value
if isinstance(value, datetime.date):
if self.timezone is not None:
value = value - datetime.timedelta(seconds=self.timezone*3600)
return (value, None)
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(value, str(self.format))
value = datetime.date(y, m, d)
if self.timezone is not None:
value = self.timezone.localize(value).astimezone(utc)
return (value, None)
except:
self.extremes.update(IS_DATETIME.nice(self.format))
return (ovalue, translate(self.error_message) % self.extremes)
def formatter(self, value):
if value is None:
return None
format = self.format
year = value.year
y = '%.4i' % year
format = format.replace('%y', y[-2:])
format = format.replace('%Y', y)
if year < 1900:
year = 2000
if self.timezone is not None:
d = datetime.datetime(year, value.month, value.day)
d = d.replace(tzinfo=utc).astimezone(self.timezone)
else:
d = datetime.date(year, value.month, value.day)
return d.strftime(format)
class IS_DATETIME(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=IS_DATETIME())
datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss
"""
isodatetime = '%Y-%m-%d %H:%M:%S'
@staticmethod
def nice(format):
code = (('%Y', '1963'),
('%y', '63'),
('%d', '28'),
('%m', '08'),
('%b', 'Aug'),
('%B', 'August'),
('%H', '14'),
('%I', '02'),
('%p', 'PM'),
('%M', '30'),
('%S', '59'))
for (a, b) in code:
format = format.replace(a, b)
return dict(format=format)
def __init__(self, format='%Y-%m-%d %H:%M:%S',
error_message='Enter date and time as %(format)s',
timezone=None):
"""
timezome must be None or a pytz.timezone("America/Chicago") object
"""
self.format = translate(format)
self.error_message = str(error_message)
self.extremes = {}
self.timezone = timezone
def __call__(self, value):
ovalue = value
if isinstance(value, datetime.datetime):
return (value, None)
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(value, str(self.format))
value = datetime.datetime(y, m, d, hh, mm, ss)
if self.timezone is not None:
value = self.timezone.localize(value).astimezone(utc)
return (value, None)
except:
self.extremes.update(IS_DATETIME.nice(self.format))
return (ovalue, translate(self.error_message) % self.extremes)
def formatter(self, value):
if value is None:
return None
format = self.format
year = value.year
y = '%.4i' % year
format = format.replace('%y', y[-2:])
format = format.replace('%Y', y)
if year < 1900:
year = 2000
d = datetime.datetime(year, value.month, value.day,
value.hour, value.minute, value.second)
if self.timezone is not None:
d = d.replace(tzinfo=utc).astimezone(self.timezone)
return d.strftime(format)
class IS_DATE_IN_RANGE(IS_DATE):
"""
example::
>>> v = IS_DATE_IN_RANGE(minimum=datetime.date(2008,1,1), \
maximum=datetime.date(2009,12,31), \
format="%m/%d/%Y",error_message="Oops")
>>> v('03/03/2008')
(datetime.date(2008, 3, 3), None)
>>> v('03/03/2010')
('03/03/2010', 'oops')
>>> v(datetime.date(2008,3,3))
(datetime.date(2008, 3, 3), None)
>>> v(datetime.date(2010,3,3))
(datetime.date(2010, 3, 3), 'oops')
"""
def __init__(self,
minimum=None,
maximum=None,
format='%Y-%m-%d',
error_message=None,
timezone=None):
self.minimum = minimum
self.maximum = maximum
if error_message is None:
if minimum is None:
error_message = "Enter date on or before %(max)s"
elif maximum is None:
error_message = "Enter date on or after %(min)s"
else:
error_message = "Enter date in range %(min)s %(max)s"
IS_DATE.__init__(self,
format=format,
error_message=error_message,
timezone=timezone)
self.extremes = dict(min=self.formatter(minimum),
max=self.formatter(maximum))
def __call__(self, value):
ovalue = value
(value, msg) = IS_DATE.__call__(self, value)
if msg is not None:
return (value, msg)
if self.minimum and self.minimum > value:
return (ovalue, translate(self.error_message) % self.extremes)
if self.maximum and value > self.maximum:
return (ovalue, translate(self.error_message) % self.extremes)
return (value, None)
class IS_DATETIME_IN_RANGE(IS_DATETIME):
"""
example::
>>> v = IS_DATETIME_IN_RANGE(\
minimum=datetime.datetime(2008,1,1,12,20), \
maximum=datetime.datetime(2009,12,31,12,20), \
format="%m/%d/%Y %H:%M",error_message="Oops")
>>> v('03/03/2008 12:40')
(datetime.datetime(2008, 3, 3, 12, 40), None)
>>> v('03/03/2010 10:34')
('03/03/2010 10:34', 'oops')
>>> v(datetime.datetime(2008,3,3,0,0))
(datetime.datetime(2008, 3, 3, 0, 0), None)
>>> v(datetime.datetime(2010,3,3,0,0))
(datetime.datetime(2010, 3, 3, 0, 0), 'oops')
"""
def __init__(self,
minimum=None,
maximum=None,
format='%Y-%m-%d %H:%M:%S',
error_message=None,
timezone=None):
self.minimum = minimum
self.maximum = maximum
if error_message is None:
if minimum is None:
error_message = "Enter date and time on or before %(max)s"
elif maximum is None:
error_message = "Enter date and time on or after %(min)s"
else:
error_message = "Enter date and time in range %(min)s %(max)s"
IS_DATETIME.__init__(self,
format=format,
error_message=error_message,
timezone=timezone)
self.extremes = dict(min=self.formatter(minimum),
max=self.formatter(maximum))
def __call__(self, value):
ovalue = value
(value, msg) = IS_DATETIME.__call__(self, value)
if msg is not None:
return (value, msg)
if self.minimum and self.minimum > value:
return (ovalue, translate(self.error_message) % self.extremes)
if self.maximum and value > self.maximum:
return (ovalue, translate(self.error_message) % self.extremes)
return (value, None)
class IS_LIST_OF(Validator):
def __init__(self, other=None, minimum=0, maximum=100,
error_message=None):
self.other = other
self.minimum = minimum
self.maximum = maximum
self.error_message = error_message or "Enter between %(min)g and %(max)g values"
def __call__(self, value):
ivalue = value
if not isinstance(value, list):
ivalue = [ivalue]
if not self.minimum is None and len(ivalue) < self.minimum:
return (ivalue, translate(self.error_message) % dict(min=self.minimum, max=self.maximum))
if not self.maximum is None and len(ivalue) > self.maximum:
return (ivalue, translate(self.error_message) % dict(min=self.minimum, max=self.maximum))
new_value = []
other = self.other
if self.other:
if not isinstance(other, (list,tuple)):
other = [other]
for item in ivalue:
if item.strip():
v = item
for validator in other:
(v, e) = validator(v)
if e:
return (ivalue, e)
new_value.append(v)
ivalue = new_value
return (ivalue, None)
class IS_LOWER(Validator):
"""
convert to lower case
>>> IS_LOWER()('ABC')
('abc', None)
>>> IS_LOWER()('Ñ')
('\\xc3\\xb1', None)
"""
def __call__(self, value):
return (value.decode('utf8').lower().encode('utf8'), None)
class IS_UPPER(Validator):
"""
convert to upper case
>>> IS_UPPER()('abc')
('ABC', None)
>>> IS_UPPER()('ñ')
('\\xc3\\x91', None)
"""
def __call__(self, value):
return (value.decode('utf8').upper().encode('utf8'), None)
def urlify(s, maxlen=80, keep_underscores=False):
"""
Convert incoming string to a simplified ASCII subset.
if (keep_underscores): underscores are retained in the string
else: underscores are translated to hyphens (default)
"""
if isinstance(s, str):
s = s.decode('utf-8') # to unicode
s = s.lower() # to lowercase
s = unicodedata.normalize('NFKD', s) # replace special characters
s = s.encode('ascii', 'ignore') # encode as ASCII
s = re.sub('&\w+?;', '', s) # strip html entities
if keep_underscores:
s = re.sub('\s+', '-', s) # whitespace to hyphens
s = re.sub('[^\w\-]', '', s)
# strip all but alphanumeric/underscore/hyphen
else:
s = re.sub('[\s_]+', '-', s) # whitespace & underscores to hyphens
s = re.sub('[^a-z0-9\-]', '', s) # strip all but alphanumeric/hyphen
s = re.sub('[-_][-_]+', '-', s) # collapse strings of hyphens
s = s.strip('-') # remove leading and trailing hyphens
return s[:maxlen] # enforce maximum length
class IS_SLUG(Validator):
"""
convert arbitrary text string to a slug
>>> IS_SLUG()('abc123')
('abc123', None)
>>> IS_SLUG()('ABC123')
('abc123', None)
>>> IS_SLUG()('abc-123')
('abc-123', None)
>>> IS_SLUG()('abc--123')
('abc-123', None)
>>> IS_SLUG()('abc 123')
('abc-123', None)
>>> IS_SLUG()('abc\t_123')
('abc-123', None)
>>> IS_SLUG()('-abc-')
('abc', None)
>>> IS_SLUG()('--a--b--_ -c--')
('a-b-c', None)
>>> IS_SLUG()('abc&123')
('abc123', None)
>>> IS_SLUG()('abc&123&def')
('abc123def', None)
>>> IS_SLUG()('ñ')
('n', None)
>>> IS_SLUG(maxlen=4)('abc123')
('abc1', None)
>>> IS_SLUG()('abc_123')
('abc-123', None)
>>> IS_SLUG(keep_underscores=False)('abc_123')
('abc-123', None)
>>> IS_SLUG(keep_underscores=True)('abc_123')
('abc_123', None)
>>> IS_SLUG(check=False)('abc')
('abc', None)
>>> IS_SLUG(check=True)('abc')
('abc', None)
>>> IS_SLUG(check=False)('a bc')
('a-bc', None)
>>> IS_SLUG(check=True)('a bc')
('a bc', 'must be slug')
"""
@staticmethod
def urlify(value, maxlen=80, keep_underscores=False):
return urlify(value, maxlen, keep_underscores)
def __init__(self, maxlen=80, check=False, error_message='Must be slug', keep_underscores=False):
self.maxlen = maxlen
self.check = check
self.error_message = error_message
self.keep_underscores = keep_underscores
def __call__(self, value):
if self.check and value != urlify(value, self.maxlen, self.keep_underscores):
return (value, translate(self.error_message))
return (urlify(value, self.maxlen, self.keep_underscores), None)
class ANY_OF(Validator):
"""
test if any of the validators in a list return successfully
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('[email protected]')
('[email protected]', None)
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('abco')
('abco', None)
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('@ab.co')
('@ab.co', 'enter only letters, numbers, and underscore')
>>> ANY_OF([IS_ALPHANUMERIC(),IS_EMAIL()])('@ab.co')
('@ab.co', 'enter a valid email address')
"""
def __init__(self, subs):
self.subs = subs
def __call__(self, value):
for validator in self.subs:
value, error = validator(value)
if error == None:
break
return value, error
def formatter(self, value):
# Use the formatter of the first subvalidator
# that validates the value and has a formatter
for validator in self.subs:
if hasattr(validator, 'formatter') and validator(value)[1] != None:
return validator.formatter(value)
class IS_EMPTY_OR(Validator):
"""
dummy class for testing IS_EMPTY_OR
>>> IS_EMPTY_OR(IS_EMAIL())('[email protected]')
('[email protected]', None)
>>> IS_EMPTY_OR(IS_EMAIL())(' ')
(None, None)
>>> IS_EMPTY_OR(IS_EMAIL(), null='abc')(' ')
('abc', None)
>>> IS_EMPTY_OR(IS_EMAIL(), null='abc', empty_regex='def')('def')
('abc', None)
>>> IS_EMPTY_OR(IS_EMAIL())('abc')
('abc', 'enter a valid email address')
>>> IS_EMPTY_OR(IS_EMAIL())(' abc ')
('abc', 'enter a valid email address')
"""
def __init__(self, other, null=None, empty_regex=None):
(self.other, self.null) = (other, null)
if empty_regex is not None:
self.empty_regex = re.compile(empty_regex)
else:
self.empty_regex = None
if hasattr(other, 'multiple'):
self.multiple = other.multiple
if hasattr(other, 'options'):
self.options = self._options
def _options(self):
options = self.other.options()
if (not options or options[0][0] != '') and not self.multiple:
options.insert(0, ('', ''))
return options
def set_self_id(self, id):
if isinstance(self.other, (list, tuple)):
for item in self.other:
if hasattr(item, 'set_self_id'):
item.set_self_id(id)
else:
if hasattr(self.other, 'set_self_id'):
self.other.set_self_id(id)
def __call__(self, value):
value, empty = is_empty(value, empty_regex=self.empty_regex)
if empty:
return (self.null, None)
if isinstance(self.other, (list, tuple)):
error = None
for item in self.other:
value, error = item(value)
if error:
break
return value, error
else:
return self.other(value)
def formatter(self, value):
if hasattr(self.other, 'formatter'):
return self.other.formatter(value)
return value
IS_NULL_OR = IS_EMPTY_OR # for backward compatibility
class CLEANUP(Validator):
"""
example::
INPUT(_type='text', _name='name', requires=CLEANUP())
removes special characters on validation
"""
REGEX_CLEANUP = re.compile('[^\x09\x0a\x0d\x20-\x7e]')
def __init__(self, regex=None):
self.regex = self.REGEX_CLEANUP if regex is None \
else re.compile(regex)
def __call__(self, value):
v = self.regex.sub('', str(value).strip())
return (v, None)
class LazyCrypt(object):
"""
Stores a lazy password hash
"""
def __init__(self, crypt, password):
"""
crypt is an instance of the CRYPT validator,
password is the password as inserted by the user
"""
self.crypt = crypt
self.password = password
self.crypted = None
def __str__(self):
"""
Encrypted self.password and caches it in self.crypted.
If self.crypt.salt the output is in the format <algorithm>$<salt>$<hash>
Try get the digest_alg from the key (if it exists)
else assume the default digest_alg. If not key at all, set key=''
If a salt is specified use it, if salt is True, set salt to uuid
(this should all be backward compatible)
Options:
key = 'uuid'
key = 'md5:uuid'
key = 'sha512:uuid'
...
key = 'pbkdf2(1000,64,sha512):uuid' 1000 iterations and 64 chars length
"""
if self.crypted:
return self.crypted
if self.crypt.key:
if ':' in self.crypt.key:
digest_alg, key = self.crypt.key.split(':', 1)
else:
digest_alg, key = self.crypt.digest_alg, self.crypt.key
else:
digest_alg, key = self.crypt.digest_alg, ''
if self.crypt.salt:
if self.crypt.salt == True:
salt = str(web2py_uuid()).replace('-', '')[-16:]
else:
salt = self.crypt.salt
else:
salt = ''
hashed = simple_hash(self.password, key, salt, digest_alg)
self.crypted = '%s$%s$%s' % (digest_alg, salt, hashed)
return self.crypted
def __eq__(self, stored_password):
"""
compares the current lazy crypted password with a stored password
"""
# LazyCrypt objects comparison
if isinstance(stored_password, self.__class__):
return ((self is stored_password) or
((self.crypt.key == stored_password.crypt.key) and
(self.password == stored_password.password)))
if self.crypt.key:
if ':' in self.crypt.key:
key = self.crypt.key.split(':')[1]
else:
key = self.crypt.key
else:
key = ''
if stored_password is None:
return False
elif stored_password.count('$') == 2:
(digest_alg, salt, hash) = stored_password.split('$')
h = simple_hash(self.password, key, salt, digest_alg)
temp_pass = '%s$%s$%s' % (digest_alg, salt, h)
else: # no salting
# guess digest_alg
digest_alg = DIGEST_ALG_BY_SIZE.get(len(stored_password), None)
if not digest_alg:
return False
else:
temp_pass = simple_hash(self.password, key, '', digest_alg)
return temp_pass == stored_password
def __ne__(self, other):
return not self.__eq__(other)
class CRYPT(object):
"""
example::
INPUT(_type='text', _name='name', requires=CRYPT())
encodes the value on validation with a digest.
If no arguments are provided CRYPT uses the MD5 algorithm.
If the key argument is provided the HMAC+MD5 algorithm is used.
If the digest_alg is specified this is used to replace the
MD5 with, for example, SHA512. The digest_alg can be
the name of a hashlib algorithm as a string or the algorithm itself.
min_length is the minimal password length (default 4) - IS_STRONG for serious security
error_message is the message if password is too short
Notice that an empty password is accepted but invalid. It will not allow login back.
Stores junk as hashed password.
Specify an algorithm or by default we will use sha512.
Typical available algorithms:
md5, sha1, sha224, sha256, sha384, sha512
If salt, it hashes a password with a salt.
If salt is True, this method will automatically generate one.
Either case it returns an encrypted password string in the following format:
<algorithm>$<salt>$<hash>
Important: hashed password is returned as a LazyCrypt object and computed only if needed.
The LasyCrypt object also knows how to compare itself with an existing salted password
Supports standard algorithms
>>> for alg in ('md5','sha1','sha256','sha384','sha512'):
... print str(CRYPT(digest_alg=alg,salt=True)('test')[0])
md5$...$...
sha1$...$...
sha256$...$...
sha384$...$...
sha512$...$...
The syntax is always alg$salt$hash
Supports for pbkdf2
>>> alg = 'pbkdf2(1000,20,sha512)'
>>> print str(CRYPT(digest_alg=alg,salt=True)('test')[0])
pbkdf2(1000,20,sha512)$...$...
An optional hmac_key can be specified and it is used as salt prefix
>>> a = str(CRYPT(digest_alg='md5',key='mykey',salt=True)('test')[0])
>>> print a
md5$...$...
Even if the algorithm changes the hash can still be validated
>>> CRYPT(digest_alg='sha1',key='mykey',salt=True)('test')[0] == a
True
If no salt is specified CRYPT can guess the algorithms from length:
>>> a = str(CRYPT(digest_alg='sha1',salt=False)('test')[0])
>>> a
'sha1$$a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'
>>> CRYPT(digest_alg='sha1',salt=False)('test')[0] == a
True
>>> CRYPT(digest_alg='sha1',salt=False)('test')[0] == a[6:]
True
>>> CRYPT(digest_alg='md5',salt=False)('test')[0] == a
True
>>> CRYPT(digest_alg='md5',salt=False)('test')[0] == a[6:]
True
"""
def __init__(self,
key=None,
digest_alg='pbkdf2(1000,20,sha512)',
min_length=0,
error_message='Too short', salt=True,
max_length=1024):
"""
important, digest_alg='md5' is not the default hashing algorithm for
web2py. This is only an example of usage of this function.
The actual hash algorithm is determined from the key which is
generated by web2py in tools.py. This defaults to hmac+sha512.
"""
self.key = key
self.digest_alg = digest_alg
self.min_length = min_length
self.max_length = max_length
self.error_message = error_message
self.salt = salt
def __call__(self, value):
value = value and value[:self.max_length]
if len(value) < self.min_length:
return ('', translate(self.error_message))
return (LazyCrypt(self, value), None)
# entropy calculator for IS_STRONG
#
lowerset = frozenset(unicode('abcdefghijklmnopqrstuvwxyz'))
upperset = frozenset(unicode('ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
numberset = frozenset(unicode('0123456789'))
sym1set = frozenset(unicode('!@#$%^&*()'))
sym2set = frozenset(unicode('~`-_=+[]{}\\|;:\'",.<>?/'))
otherset = frozenset(
unicode('0123456789abcdefghijklmnopqrstuvwxyz')) # anything else
def calc_entropy(string):
" calculate a simple entropy for a given string "
import math
alphabet = 0 # alphabet size
other = set()
seen = set()
lastset = None
if isinstance(string, str):
string = unicode(string, encoding='utf8')
for c in string:
# classify this character
inset = otherset
for cset in (lowerset, upperset, numberset, sym1set, sym2set):
if c in cset:
inset = cset
break
# calculate effect of character on alphabet size
if inset not in seen:
seen.add(inset)
alphabet += len(inset) # credit for a new character set
elif c not in other:
alphabet += 1 # credit for unique characters
other.add(c)
if inset is not lastset:
alphabet += 1 # credit for set transitions
lastset = cset
entropy = len(
string) * math.log(alphabet) / 0.6931471805599453 # math.log(2)
return round(entropy, 2)
class IS_STRONG(object):
"""
example::
INPUT(_type='password', _name='passwd',
requires=IS_STRONG(min=10, special=2, upper=2))
enforces complexity requirements on a field
>>> IS_STRONG(es=True)('Abcd1234')
('Abcd1234',
'Must include at least 1 of the following: ~!@#$%^&*()_+-=?<>,.:;{}[]|')
>>> IS_STRONG(es=True)('Abcd1234!')
('Abcd1234!', None)
>>> IS_STRONG(es=True, entropy=1)('a')
('a', None)
>>> IS_STRONG(es=True, entropy=1, min=2)('a')
('a', 'Minimum length is 2')
>>> IS_STRONG(es=True, entropy=100)('abc123')
('abc123', 'Entropy (32.35) less than required (100)')
>>> IS_STRONG(es=True, entropy=100)('and')
('and', 'Entropy (14.57) less than required (100)')
>>> IS_STRONG(es=True, entropy=100)('aaa')
('aaa', 'Entropy (14.42) less than required (100)')
>>> IS_STRONG(es=True, entropy=100)('a1d')
('a1d', 'Entropy (15.97) less than required (100)')
>>> IS_STRONG(es=True, entropy=100)('añd')
('a\\xc3\\xb1d', 'Entropy (18.13) less than required (100)')
"""
def __init__(self, min=None, max=None, upper=None, lower=None, number=None,
entropy=None,
special=None, specials=r'~!@#$%^&*()_+-=?<>,.:;{}[]|',
invalid=' "', error_message=None, es=False):
self.entropy = entropy
if entropy is None:
# enforce default requirements
self.min = 8 if min is None else min
self.max = max # was 20, but that doesn't make sense
self.upper = 1 if upper is None else upper
self.lower = 1 if lower is None else lower
self.number = 1 if number is None else number
self.special = 1 if special is None else special
else:
# by default, an entropy spec is exclusive
self.min = min
self.max = max
self.upper = upper
self.lower = lower
self.number = number
self.special = special
self.specials = specials
self.invalid = invalid
self.error_message = error_message
self.estring = es # return error message as string (for doctest)
def __call__(self, value):
failures = []
if value and len(value) == value.count('*') > 4:
return (value, None)
if self.entropy is not None:
entropy = calc_entropy(value)
if entropy < self.entropy:
failures.append(translate("Entropy (%(have)s) less than required (%(need)s)")
% dict(have=entropy, need=self.entropy))
if type(self.min) == int and self.min > 0:
if not len(value) >= self.min:
failures.append(translate("Minimum length is %s") % self.min)
if type(self.max) == int and self.max > 0:
if not len(value) <= self.max:
failures.append(translate("Maximum length is %s") % self.max)
if type(self.special) == int:
all_special = [ch in value for ch in self.specials]
if self.special > 0:
if not all_special.count(True) >= self.special:
failures.append(translate("Must include at least %s of the following: %s")
% (self.special, self.specials))
if self.invalid:
all_invalid = [ch in value for ch in self.invalid]
if all_invalid.count(True) > 0:
failures.append(translate("May not contain any of the following: %s")
% self.invalid)
if type(self.upper) == int:
all_upper = re.findall("[A-Z]", value)
if self.upper > 0:
if not len(all_upper) >= self.upper:
failures.append(translate("Must include at least %s upper case")
% str(self.upper))
else:
if len(all_upper) > 0:
failures.append(
translate("May not include any upper case letters"))
if type(self.lower) == int:
all_lower = re.findall("[a-z]", value)
if self.lower > 0:
if not len(all_lower) >= self.lower:
failures.append(translate("Must include at least %s lower case")
% str(self.lower))
else:
if len(all_lower) > 0:
failures.append(
translate("May not include any lower case letters"))
if type(self.number) == int:
all_number = re.findall("[0-9]", value)
if self.number > 0:
numbers = "number"
if self.number > 1:
numbers = "numbers"
if not len(all_number) >= self.number:
failures.append(translate("Must include at least %s %s")
% (str(self.number), numbers))
else:
if len(all_number) > 0:
failures.append(translate("May not include any numbers"))
if len(failures) == 0:
return (value, None)
if not self.error_message:
if self.estring:
return (value, '|'.join(failures))
from html import XML
return (value, XML('<br />'.join(failures)))
else:
return (value, translate(self.error_message))
class IS_IN_SUBSET(IS_IN_SET):
REGEX_W = re.compile('\w+')
def __init__(self, *a, **b):
IS_IN_SET.__init__(self, *a, **b)
def __call__(self, value):
values = self.REGEX_W.findall(str(value))
failures = [x for x in values if IS_IN_SET.__call__(self, x)[1]]
if failures:
return (value, translate(self.error_message))
return (value, None)
class IS_IMAGE(Validator):
"""
Checks if file uploaded through file input was saved in one of selected
image formats and has dimensions (width and height) within given boundaries.
Does *not* check for maximum file size (use IS_LENGTH for that). Returns
validation failure if no data was uploaded.
Supported file formats: BMP, GIF, JPEG, PNG.
Code parts taken from
http://mail.python.org/pipermail/python-list/2007-June/617126.html
Arguments:
extensions: iterable containing allowed *lowercase* image file extensions
('jpg' extension of uploaded file counts as 'jpeg')
maxsize: iterable containing maximum width and height of the image
minsize: iterable containing minimum width and height of the image
Use (-1, -1) as minsize to pass image size check.
Examples::
#Check if uploaded file is in any of supported image formats:
INPUT(_type='file', _name='name', requires=IS_IMAGE())
#Check if uploaded file is either JPEG or PNG:
INPUT(_type='file', _name='name',
requires=IS_IMAGE(extensions=('jpeg', 'png')))
#Check if uploaded file is PNG with maximum size of 200x200 pixels:
INPUT(_type='file', _name='name',
requires=IS_IMAGE(extensions=('png'), maxsize=(200, 200)))
"""
def __init__(self,
extensions=('bmp', 'gif', 'jpeg', 'png'),
maxsize=(10000, 10000),
minsize=(0, 0),
error_message='Invalid image'):
self.extensions = extensions
self.maxsize = maxsize
self.minsize = minsize
self.error_message = error_message
def __call__(self, value):
try:
extension = value.filename.rfind('.')
assert extension >= 0
extension = value.filename[extension + 1:].lower()
if extension == 'jpg':
extension = 'jpeg'
assert extension in self.extensions
if extension == 'bmp':
width, height = self.__bmp(value.file)
elif extension == 'gif':
width, height = self.__gif(value.file)
elif extension == 'jpeg':
width, height = self.__jpeg(value.file)
elif extension == 'png':
width, height = self.__png(value.file)
else:
width = -1
height = -1
assert self.minsize[0] <= width <= self.maxsize[0] \
and self.minsize[1] <= height <= self.maxsize[1]
value.file.seek(0)
return (value, None)
except:
return (value, translate(self.error_message))
def __bmp(self, stream):
if stream.read(2) == 'BM':
stream.read(16)
return struct.unpack("<LL", stream.read(8))
return (-1, -1)
def __gif(self, stream):
if stream.read(6) in ('GIF87a', 'GIF89a'):
stream = stream.read(5)
if len(stream) == 5:
return tuple(struct.unpack("<HHB", stream)[:-1])
return (-1, -1)
def __jpeg(self, stream):
if stream.read(2) == '\xFF\xD8':
while True:
(marker, code, length) = struct.unpack("!BBH", stream.read(4))
if marker != 0xFF:
break
elif code >= 0xC0 and code <= 0xC3:
return tuple(reversed(
struct.unpack("!xHH", stream.read(5))))
else:
stream.read(length - 2)
return (-1, -1)
def __png(self, stream):
if stream.read(8) == '\211PNG\r\n\032\n':
stream.read(4)
if stream.read(4) == "IHDR":
return struct.unpack("!LL", stream.read(8))
return (-1, -1)
class IS_UPLOAD_FILENAME(Validator):
"""
Checks if name and extension of file uploaded through file input matches
given criteria.
Does *not* ensure the file type in any way. Returns validation failure
if no data was uploaded.
Arguments::
filename: filename (before dot) regex
extension: extension (after dot) regex
lastdot: which dot should be used as a filename / extension separator:
True means last dot, eg. file.png -> file / png
False means first dot, eg. file.tar.gz -> file / tar.gz
case: 0 - keep the case, 1 - transform the string into lowercase (default),
2 - transform the string into uppercase
If there is no dot present, extension checks will be done against empty
string and filename checks against whole value.
Examples::
#Check if file has a pdf extension (case insensitive):
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(extension='pdf'))
#Check if file has a tar.gz extension and name starting with backup:
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(filename='backup.*',
extension='tar.gz', lastdot=False))
#Check if file has no extension and name matching README
#(case sensitive):
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(filename='^README$',
extension='^$', case=0))
"""
def __init__(self, filename=None, extension=None, lastdot=True, case=1,
error_message='Enter valid filename'):
if isinstance(filename, str):
filename = re.compile(filename)
if isinstance(extension, str):
extension = re.compile(extension)
self.filename = filename
self.extension = extension
self.lastdot = lastdot
self.case = case
self.error_message = error_message
def __call__(self, value):
try:
string = value.filename
except:
return (value, translate(self.error_message))
if self.case == 1:
string = string.lower()
elif self.case == 2:
string = string.upper()
if self.lastdot:
dot = string.rfind('.')
else:
dot = string.find('.')
if dot == -1:
dot = len(string)
if self.filename and not self.filename.match(string[:dot]):
return (value, translate(self.error_message))
elif self.extension and not self.extension.match(string[dot + 1:]):
return (value, translate(self.error_message))
else:
return (value, None)
class IS_IPV4(Validator):
"""
Checks if field's value is an IP version 4 address in decimal form. Can
be set to force addresses from certain range.
IPv4 regex taken from: http://regexlib.com/REDetails.aspx?regexp_id=1411
Arguments:
minip: lowest allowed address; accepts:
str, eg. 192.168.0.1
list or tuple of octets, eg. [192, 168, 0, 1]
maxip: highest allowed address; same as above
invert: True to allow addresses only from outside of given range; note
that range boundaries are not matched this way
is_localhost: localhost address treatment:
None (default): indifferent
True (enforce): query address must match localhost address
(127.0.0.1)
False (forbid): query address must not match localhost
address
is_private: same as above, except that query address is checked against
two address ranges: 172.16.0.0 - 172.31.255.255 and
192.168.0.0 - 192.168.255.255
is_automatic: same as above, except that query address is checked against
one address range: 169.254.0.0 - 169.254.255.255
Minip and maxip may also be lists or tuples of addresses in all above
forms (str, int, list / tuple), allowing setup of multiple address ranges:
minip = (minip1, minip2, ... minipN)
| | |
| | |
maxip = (maxip1, maxip2, ... maxipN)
Longer iterable will be truncated to match length of shorter one.
Examples::
#Check for valid IPv4 address:
INPUT(_type='text', _name='name', requires=IS_IPV4())
#Check for valid IPv4 address belonging to specific range:
INPUT(_type='text', _name='name',
requires=IS_IPV4(minip='100.200.0.0', maxip='100.200.255.255'))
#Check for valid IPv4 address belonging to either 100.110.0.0 -
#100.110.255.255 or 200.50.0.0 - 200.50.0.255 address range:
INPUT(_type='text', _name='name',
requires=IS_IPV4(minip=('100.110.0.0', '200.50.0.0'),
maxip=('100.110.255.255', '200.50.0.255')))
#Check for valid IPv4 address belonging to private address space:
INPUT(_type='text', _name='name', requires=IS_IPV4(is_private=True))
#Check for valid IPv4 address that is not a localhost address:
INPUT(_type='text', _name='name', requires=IS_IPV4(is_localhost=False))
>>> IS_IPV4()('1.2.3.4')
('1.2.3.4', None)
>>> IS_IPV4()('255.255.255.255')
('255.255.255.255', None)
>>> IS_IPV4()('1.2.3.4 ')
('1.2.3.4 ', 'enter valid IPv4 address')
>>> IS_IPV4()('1.2.3.4.5')
('1.2.3.4.5', 'enter valid IPv4 address')
>>> IS_IPV4()('123.123')
('123.123', 'enter valid IPv4 address')
>>> IS_IPV4()('1111.2.3.4')
('1111.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('0111.2.3.4')
('0111.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('256.2.3.4')
('256.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('300.2.3.4')
('300.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(minip='1.2.3.4', maxip='1.2.3.4')('1.2.3.4')
('1.2.3.4', None)
>>> IS_IPV4(minip='1.2.3.5', maxip='1.2.3.9', error_message='Bad ip')('1.2.3.4')
('1.2.3.4', 'bad ip')
>>> IS_IPV4(maxip='1.2.3.4', invert=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPV4(maxip='1.2.3.4', invert=True)('1.2.3.4')
('1.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(is_localhost=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPV4(is_localhost=True)('1.2.3.4')
('1.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(is_localhost=False)('127.0.0.1')
('127.0.0.1', 'enter valid IPv4 address')
>>> IS_IPV4(maxip='100.0.0.0', is_localhost=True)('127.0.0.1')
('127.0.0.1', 'enter valid IPv4 address')
"""
regex = re.compile(
'^(([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.){3}([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$')
numbers = (16777216, 65536, 256, 1)
localhost = 2130706433
private = ((2886729728L, 2886795263L), (3232235520L, 3232301055L))
automatic = (2851995648L, 2852061183L)
def __init__(
self,
minip='0.0.0.0',
maxip='255.255.255.255',
invert=False,
is_localhost=None,
is_private=None,
is_automatic=None,
error_message='Enter valid IPv4 address'):
for n, value in enumerate((minip, maxip)):
temp = []
if isinstance(value, str):
temp.append(value.split('.'))
elif isinstance(value, (list, tuple)):
if len(value) == len(filter(lambda item: isinstance(item, int), value)) == 4:
temp.append(value)
else:
for item in value:
if isinstance(item, str):
temp.append(item.split('.'))
elif isinstance(item, (list, tuple)):
temp.append(item)
numbers = []
for item in temp:
number = 0
for i, j in zip(self.numbers, item):
number += i * int(j)
numbers.append(number)
if n == 0:
self.minip = numbers
else:
self.maxip = numbers
self.invert = invert
self.is_localhost = is_localhost
self.is_private = is_private
self.is_automatic = is_automatic
self.error_message = error_message
def __call__(self, value):
if self.regex.match(value):
number = 0
for i, j in zip(self.numbers, value.split('.')):
number += i * int(j)
ok = False
for bottom, top in zip(self.minip, self.maxip):
if self.invert != (bottom <= number <= top):
ok = True
if not (self.is_localhost is None or self.is_localhost ==
(number == self.localhost)):
ok = False
if not (self.is_private is None or self.is_private ==
(sum([number[0] <= number <= number[1] for number in self.private]) > 0)):
ok = False
if not (self.is_automatic is None or self.is_automatic ==
(self.automatic[0] <= number <= self.automatic[1])):
ok = False
if ok:
return (value, None)
return (value, translate(self.error_message))
class IS_IPV6(Validator):
"""
Checks if field's value is an IP version 6 address. First attempts to
use the ipaddress library and falls back to contrib/ipaddr.py from Google
(https://code.google.com/p/ipaddr-py/)
Arguments:
is_private: None (default): indifferent
True (enforce): address must be in fc00::/7 range
False (forbid): address must NOT be in fc00::/7 range
is_link_local: Same as above but uses fe80::/10 range
is_reserved: Same as above but uses IETF reserved range
is_mulicast: Same as above but uses ff00::/8 range
is_routeable: Similar to above but enforces not private, link_local,
reserved or multicast
is_6to4: Same as above but uses 2002::/16 range
is_teredo: Same as above but uses 2001::/32 range
subnets: value must be a member of at least one from list of subnets
Examples:
#Check for valid IPv6 address:
INPUT(_type='text', _name='name', requires=IS_IPV6())
#Check for valid IPv6 address is a link_local address:
INPUT(_type='text', _name='name', requires=IS_IPV6(is_link_local=True))
#Check for valid IPv6 address that is Internet routeable:
INPUT(_type='text', _name='name', requires=IS_IPV6(is_routeable=True))
#Check for valid IPv6 address in specified subnet:
INPUT(_type='text', _name='name', requires=IS_IPV6(subnets=['2001::/32'])
>>> IS_IPV6()('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6()('192.168.1.1')
('192.168.1.1', 'enter valid IPv6 address')
>>> IS_IPV6(error_message='Bad ip')('192.168.1.1')
('192.168.1.1', 'bad ip')
>>> IS_IPV6(is_link_local=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_link_local=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_link_local=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_multicast=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_multicast=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_routeable=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_routeable=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(subnets='2001::/32')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPV6(subnets='fb00::/8')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(subnets=['fc00::/8','2001::/32'])('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPV6(subnets='invalidsubnet')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'invalid subnet provided')
"""
def __init__(
self,
is_private=None,
is_link_local=None,
is_reserved=None,
is_multicast=None,
is_routeable=None,
is_6to4=None,
is_teredo=None,
subnets=None,
error_message='Enter valid IPv6 address'):
self.is_private = is_private
self.is_link_local = is_link_local
self.is_reserved = is_reserved
self.is_multicast = is_multicast
self.is_routeable = is_routeable
self.is_6to4 = is_6to4
self.is_teredo = is_teredo
self.subnets = subnets
self.error_message = error_message
def __call__(self, value):
try:
import ipaddress
except ImportError:
from gluon.contrib import ipaddr as ipaddress
try:
ip = ipaddress.IPv6Address(value)
ok = True
except ipaddress.AddressValueError:
return (value, translate(self.error_message))
if self.subnets:
# iterate through self.subnets to see if value is a member
ok = False
if isinstance(self.subnets, str):
self.subnets = [self.subnets]
for network in self.subnets:
try:
ipnet = ipaddress.IPv6Network(network)
except (ipaddress.NetmaskValueError, ipaddress.AddressValueError):
return (value, translate('invalid subnet provided'))
if ip in ipnet:
ok = True
if self.is_routeable:
self.is_private = False
self.is_link_local = False
self.is_reserved = False
self.is_multicast = False
if not (self.is_private is None or self.is_private ==
ip.is_private):
ok = False
if not (self.is_link_local is None or self.is_link_local ==
ip.is_link_local):
ok = False
if not (self.is_reserved is None or self.is_reserved ==
ip.is_reserved):
ok = False
if not (self.is_multicast is None or self.is_multicast ==
ip.is_multicast):
ok = False
if not (self.is_6to4 is None or self.is_6to4 ==
ip.is_6to4):
ok = False
if not (self.is_teredo is None or self.is_teredo ==
ip.is_teredo):
ok = False
if ok:
return (value, None)
return (value, translate(self.error_message))
class IS_IPADDRESS(Validator):
"""
Checks if field's value is an IP Address (v4 or v6). Can be set to force
addresses from within a specific range. Checks are done with the correct
IS_IPV4 and IS_IPV6 validators.
Uses ipaddress library if found, falls back to PEP-3144 ipaddr.py from
Google (in contrib).
Universal arguments:
minip: lowest allowed address; accepts:
str, eg. 192.168.0.1
list or tuple of octets, eg. [192, 168, 0, 1]
maxip: highest allowed address; same as above
invert: True to allow addresses only from outside of given range; note
that range boundaries are not matched this way
IPv4 specific arguments:
is_localhost: localhost address treatment:
None (default): indifferent
True (enforce): query address must match localhost address
(127.0.0.1)
False (forbid): query address must not match localhost
address
is_private: same as above, except that query address is checked against
two address ranges: 172.16.0.0 - 172.31.255.255 and
192.168.0.0 - 192.168.255.255
is_automatic: same as above, except that query address is checked against
one address range: 169.254.0.0 - 169.254.255.255
is_ipv4: None (default): indifferent
True (enforce): must be an IPv4 address
False (forbid): must NOT be an IPv4 address
IPv6 specific arguments:
is_link_local: Same as above but uses fe80::/10 range
is_reserved: Same as above but uses IETF reserved range
is_mulicast: Same as above but uses ff00::/8 range
is_routeable: Similar to above but enforces not private, link_local,
reserved or multicast
is_6to4: Same as above but uses 2002::/16 range
is_teredo: Same as above but uses 2001::/32 range
subnets: value must be a member of at least one from list of subnets
is_ipv6: None (default): indifferent
True (enforce): must be an IPv6 address
False (forbid): must NOT be an IPv6 address
Minip and maxip may also be lists or tuples of addresses in all above
forms (str, int, list / tuple), allowing setup of multiple address ranges:
minip = (minip1, minip2, ... minipN)
| | |
| | |
maxip = (maxip1, maxip2, ... maxipN)
Longer iterable will be truncated to match length of shorter one.
>>> IS_IPADDRESS()('192.168.1.5')
('192.168.1.5', None)
>>> IS_IPADDRESS(is_ipv6=False)('192.168.1.5')
('192.168.1.5', None)
>>> IS_IPADDRESS()('255.255.255.255')
('255.255.255.255', None)
>>> IS_IPADDRESS()('192.168.1.5 ')
('192.168.1.5 ', 'enter valid IP address')
>>> IS_IPADDRESS()('192.168.1.1.5')
('192.168.1.1.5', 'enter valid IP address')
>>> IS_IPADDRESS()('123.123')
('123.123', 'enter valid IP address')
>>> IS_IPADDRESS()('1111.2.3.4')
('1111.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('0111.2.3.4')
('0111.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('256.2.3.4')
('256.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('300.2.3.4')
('300.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS(minip='192.168.1.0', maxip='192.168.1.255')('192.168.1.100')
('192.168.1.100', None)
>>> IS_IPADDRESS(minip='1.2.3.5', maxip='1.2.3.9', error_message='Bad ip')('1.2.3.4')
('1.2.3.4', 'bad ip')
>>> IS_IPADDRESS(maxip='1.2.3.4', invert=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPADDRESS(maxip='192.168.1.4', invert=True)('192.168.1.4')
('192.168.1.4', 'enter valid IP address')
>>> IS_IPADDRESS(is_localhost=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPADDRESS(is_localhost=True)('192.168.1.10')
('192.168.1.10', 'enter valid IP address')
>>> IS_IPADDRESS(is_localhost=False)('127.0.0.1')
('127.0.0.1', 'enter valid IP address')
>>> IS_IPADDRESS(maxip='100.0.0.0', is_localhost=True)('127.0.0.1')
('127.0.0.1', 'enter valid IP address')
>>> IS_IPADDRESS()('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_ipv4=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS()('fe80::126c:8ffa:fe22:b3af ')
('fe80::126c:8ffa:fe22:b3af ', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv4=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv6=True)('192.168.1.1')
('192.168.1.1', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv6=True, error_message='Bad ip')('192.168.1.1')
('192.168.1.1', 'bad ip')
>>> IS_IPADDRESS(is_link_local=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_link_local=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_link_local=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_multicast=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_multicast=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_routeable=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_routeable=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(subnets='2001::/32')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(subnets='fb00::/8')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(subnets=['fc00::/8','2001::/32'])('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(subnets='invalidsubnet')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'invalid subnet provided')
"""
def __init__(
self,
minip='0.0.0.0',
maxip='255.255.255.255',
invert=False,
is_localhost=None,
is_private=None,
is_automatic=None,
is_ipv4=None,
is_link_local=None,
is_reserved=None,
is_multicast=None,
is_routeable=None,
is_6to4=None,
is_teredo=None,
subnets=None,
is_ipv6=None,
error_message='Enter valid IP address'):
self.minip = minip,
self.maxip = maxip,
self.invert = invert
self.is_localhost = is_localhost
self.is_private = is_private
self.is_automatic = is_automatic
self.is_ipv4 = is_ipv4
self.is_private = is_private
self.is_link_local = is_link_local
self.is_reserved = is_reserved
self.is_multicast = is_multicast
self.is_routeable = is_routeable
self.is_6to4 = is_6to4
self.is_teredo = is_teredo
self.subnets = subnets
self.is_ipv6 = is_ipv6
self.error_message = error_message
def __call__(self, value):
try:
import ipaddress
except ImportError:
from gluon.contrib import ipaddr as ipaddress
try:
ip = ipaddress.ip_address(value)
except ValueError, e:
return (value, translate(self.error_message))
if self.is_ipv4 and isinstance(ip, ipaddress.IPv6Address):
retval = (value, translate(self.error_message))
elif self.is_ipv6 and isinstance(ip, ipaddress.IPv4Address):
retval = (value, translate(self.error_message))
elif self.is_ipv4 or isinstance(ip, ipaddress.IPv4Address):
retval = IS_IPV4(
minip=self.minip,
maxip=self.maxip,
invert=self.invert,
is_localhost=self.is_localhost,
is_private=self.is_private,
is_automatic=self.is_automatic,
error_message=self.error_message
)(value)
elif self.is_ipv6 or isinstance(ip, ipaddress.IPv6Address):
retval = IS_IPV6(
is_private=self.is_private,
is_link_local=self.is_link_local,
is_reserved=self.is_reserved,
is_multicast=self.is_multicast,
is_routeable=self.is_routeable,
is_6to4=self.is_6to4,
is_teredo=self.is_teredo,
subnets=self.subnets,
error_message=self.error_message
)(value)
else:
retval = (value, translate(self.error_message))
return retval
|
bsd-3-clause
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/numpy/polynomial/laguerre.py
|
10
|
56151
|
"""
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
- `lagweight` -- Laguerre weight function.
- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol):
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c):
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0, 1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0:
return np.array([off + scl, -scl])
else:
return np.array([off])
def lagfromroots(roots):
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(c, pow, maxpower=16):
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = lagmul(prd, c)
return prd
def lagder(c, m=1, scl=1, axis=0):
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
return c
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
return c
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
c = lagval(z, c, tensor=False)
return c
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c
def lagvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def lagvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
vz = lagvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = lagvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = lagvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1, n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
m = lagcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
with the weight function :math:`f(x) = \\exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
class Laguerre(ABCPolyBase):
"""A Laguerre series class.
The Laguerre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [0, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(lagadd)
_sub = staticmethod(lagsub)
_mul = staticmethod(lagmul)
_div = staticmethod(lagdiv)
_pow = staticmethod(lagpow)
_val = staticmethod(lagval)
_int = staticmethod(lagint)
_der = staticmethod(lagder)
_fit = staticmethod(lagfit)
_line = staticmethod(lagline)
_roots = staticmethod(lagroots)
_fromroots = staticmethod(lagfromroots)
# Virtual properties
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
|
bsd-2-clause
|
labordoc/labordoc-next
|
modules/miscutil/lib/data_cacher.py
|
8
|
4095
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Tool for caching important infos, which are slow to rebuild, but that
rarely change.
"""
from invenio.dbquery import run_sql, get_table_update_time
import time
class InvenioDataCacherError(Exception):
"""Error raised by data cacher."""
pass
class DataCacher:
"""
DataCacher is an abstract cacher system, for caching informations
that are slow to retrieve but that don't change too much during
time.
The .timestamp and .cache objects are exposed to clients. Most
use cases use a dict internal structure for .cache, but some use
lists.
"""
def __init__(self, cache_filler, timestamp_verifier):
""" @param cache_filler: a function that fills the cache dictionary.
@param timestamp_verifier: a function that returns a timestamp for
checking if something has changed after cache creation.
"""
self.timestamp = 0 # WARNING: may be exposed to clients
self.cache = {} # WARNING: may be exposed to clients; lazy
# clients may even alter this object on the fly
if not callable(cache_filler):
raise InvenioDataCacherError, "cache_filler is not callable"
self.cache_filler = cache_filler
if not callable(timestamp_verifier):
raise InvenioDataCacherError, "timestamp_verifier is not callable"
self.timestamp_verifier = timestamp_verifier
self.is_ok_p = True
self.create_cache()
def clear(self):
"""Clear the cache rebuilding it."""
self.create_cache()
def create_cache(self):
"""
Create and populate cache by calling cache filler. Called on
startup and used later during runtime as needed by clients.
"""
# We empty the cache first to force freeing of the variable
# this is useful when it is really big like our citations dictionary
self.cache = None
self.cache = self.cache_filler()
self.timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def recreate_cache_if_needed(self):
"""
Recreate cache if needed, by verifying the cache timestamp
against the timestamp verifier function.
"""
if self.timestamp_verifier() > self.timestamp:
self.create_cache()
class SQLDataCacher(DataCacher):
"""
SQLDataCacher is a cacher system, for caching single queries and
their results.
"""
def __init__(self, query, param=None, affected_tables=()):
""" @param query: the query to cache
@param param: its optional parameters as a tuple
@param affected_tables: the list of tables queried by the query.
"""
self.query = query
self.affected_tables = affected_tables
assert(affected_tables)
def cache_filler():
"""Standard SQL filler, with results from sql query."""
return run_sql(self.query, param)
def timestamp_verifier():
"""The standard timestamp verifier is looking at affected
tables time stamp."""
return max([get_table_update_time(table)
for table in self.affected_tables])
DataCacher.__init__(self, cache_filler, timestamp_verifier)
|
gpl-2.0
|
ngageoint/geoq
|
geoq/workflow/migrations/0001_initial.py
|
1
|
11369
|
# Generated by Django 3.0.5 on 2020-04-17 14:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Event summary')),
('description', models.TextField(blank=True, verbose_name='Description')),
('is_mandatory', models.BooleanField(default=False, help_text='This event must be marked as complete before moving out of the associated state.', verbose_name='Mandatory event')),
],
options={
'verbose_name': 'Event',
'verbose_name_plural': 'Events',
},
),
migrations.CreateModel(
name='EventType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Event Type Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('disabled', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Participant',
'verbose_name_plural': 'Participants',
'ordering': ['-disabled', 'workflowactivity', 'user'],
},
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='Name of Role')),
('description', models.TextField(blank=True, verbose_name='Description')),
],
options={
'verbose_name': 'Role',
'verbose_name_plural': 'Roles',
'ordering': ['name'],
'permissions': (('can_define_roles', 'Can define roles'),),
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('is_start_state', models.BooleanField(default=False, help_text='There can only be one start state for a workflow', verbose_name='Is the start state?')),
('is_end_state', models.BooleanField(default=False, help_text='An end state shows that the workflow is complete', verbose_name='Is an end state?')),
('estimation_value', models.IntegerField(default=0, help_text='Use whole numbers', verbose_name='Estimated time (value)')),
('estimation_unit', models.IntegerField(choices=[(1, 'Second(s)'), (60, 'Minute(s)'), (3600, 'Hour(s)'), (86400, 'Day(s)'), (604800, 'Week(s)')], default=86400, verbose_name='Estimation unit of time')),
('slug', models.SlugField(blank=True, verbose_name='Slug')),
('color', jsonfield.fields.JSONField(blank=True, help_text='Color properties for this state. Should be json that includes color, fillColor, fillOpacity, opacity and weight', null=True)),
('roles', models.ManyToManyField(blank=True, to='workflow.Role')),
],
options={
'verbose_name': 'State',
'verbose_name_plural': 'States',
'ordering': ['-is_start_state', 'is_end_state'],
},
),
migrations.CreateModel(
name='Transition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Use an "active" verb. e.g. "Close Issue", "Open Vacancy" or "Start Interviews"', max_length=128, verbose_name='Name of transition')),
('from_state', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transitions_from', to='workflow.State')),
('roles', models.ManyToManyField(blank=True, to='workflow.Role')),
('to_state', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transitions_into', to='workflow.State')),
],
options={
'verbose_name': 'Transition',
'verbose_name_plural': 'Transitions',
},
),
migrations.CreateModel(
name='Workflow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Workflow Name')),
('slug', models.SlugField(verbose_name='Slug')),
('description', models.TextField(blank=True, verbose_name='Description')),
('status', models.IntegerField(choices=[(0, 'In definition'), (1, 'Active'), (2, 'Retired')], default=0, verbose_name='Status')),
('created_on', models.DateTimeField(auto_now_add=True)),
('cloned_from', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='workflow.Workflow')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Workflow',
'verbose_name_plural': 'Workflows',
'ordering': ['status', 'name'],
'permissions': (('can_manage_workflows', 'Can manage workflows'),),
},
),
migrations.CreateModel(
name='WorkflowActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('completed_on', models.DateTimeField(blank=True, null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('workflow', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='workflow.Workflow')),
],
options={
'verbose_name': 'Workflow Activity',
'verbose_name_plural': 'Workflow Activites',
'ordering': ['-completed_on', '-created_on'],
'permissions': (('can_start_workflow', 'Can start a workflow'), ('can_assign_roles', 'Can assign roles')),
},
),
migrations.CreateModel(
name='WorkflowHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('log_type', models.IntegerField(choices=[(1, 'Transition'), (2, 'Event'), (3, 'Role'), (4, 'Comment')], help_text='The sort of thing being logged')),
('created_on', models.DateTimeField(auto_now_add=True)),
('note', models.TextField(blank=True, verbose_name='Note')),
('deadline', models.DateTimeField(blank=True, help_text='The deadline for staying in this state', null=True, verbose_name='Deadline')),
('event', models.ForeignKey(help_text='The event relating to this happening in the workflow history', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='history', to='workflow.Event')),
('participant', models.ForeignKey(help_text='The participant who triggered this happening in the workflow history', on_delete=django.db.models.deletion.PROTECT, to='workflow.Participant')),
('state', models.ForeignKey(help_text='The state at this point in the workflow history', null=True, on_delete=django.db.models.deletion.PROTECT, to='workflow.State')),
('transition', models.ForeignKey(help_text='The transition relating to this happening in the workflow history', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='history', to='workflow.Transition')),
('workflowactivity', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='history', to='workflow.WorkflowActivity')),
],
options={
'verbose_name': 'Workflow History',
'verbose_name_plural': 'Workflow Histories',
'ordering': ['-created_on'],
},
),
migrations.AddField(
model_name='transition',
name='workflow',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transitions', to='workflow.Workflow'),
),
migrations.AddField(
model_name='state',
name='workflow',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='states', to='workflow.Workflow'),
),
migrations.AddField(
model_name='participant',
name='roles',
field=models.ManyToManyField(blank=True, to='workflow.Role'),
),
migrations.AddField(
model_name='participant',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='participant',
name='workflowactivity',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='participants', to='workflow.WorkflowActivity'),
),
migrations.AddField(
model_name='event',
name='event_types',
field=models.ManyToManyField(to='workflow.EventType'),
),
migrations.AddField(
model_name='event',
name='roles',
field=models.ManyToManyField(to='workflow.Role'),
),
migrations.AddField(
model_name='event',
name='state',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='events', to='workflow.State'),
),
migrations.AddField(
model_name='event',
name='workflow',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='events', to='workflow.Workflow'),
),
migrations.AlterUniqueTogether(
name='participant',
unique_together={('user', 'workflowactivity')},
),
]
|
mit
|
pmisik/buildbot
|
master/buildbot/test/unit/data/test_workers.py
|
5
|
10489
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import exceptions
from buildbot.data import resultspec
from buildbot.data import workers
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.test.util.misc import TestReactorMixin
testData = [
fakedb.Builder(id=40, name='b1'),
fakedb.Builder(id=41, name='b2'),
fakedb.Master(id=13),
fakedb.Master(id=14),
fakedb.BuilderMaster(id=4013, builderid=40, masterid=13),
fakedb.BuilderMaster(id=4014, builderid=40, masterid=14),
fakedb.BuilderMaster(id=4113, builderid=41, masterid=13),
fakedb.Worker(id=1, name='linux', info={}),
fakedb.ConfiguredWorker(id=14013,
workerid=1, buildermasterid=4013),
fakedb.ConfiguredWorker(id=14014,
workerid=1, buildermasterid=4014),
fakedb.ConnectedWorker(id=113, masterid=13, workerid=1),
fakedb.Worker(id=2, name='windows', info={"a": "b"}),
fakedb.ConfiguredWorker(id=24013,
workerid=2, buildermasterid=4013),
fakedb.ConfiguredWorker(id=24014,
workerid=2, buildermasterid=4014),
fakedb.ConfiguredWorker(id=24113,
workerid=2, buildermasterid=4113),
fakedb.ConnectedWorker(id=214, masterid=14, workerid=2),
]
def configuredOnKey(worker):
return (worker.get('masterid', 0),
worker.get('builderid', 0))
def _filt(bs, builderid, masterid):
bs['connected_to'] = sorted(
[d for d in bs['connected_to']
if not masterid or masterid == d['masterid']])
bs['configured_on'] = sorted(
[d for d in bs['configured_on']
if (not masterid or masterid == d['masterid'])
and (not builderid or builderid == d['builderid'])], key=configuredOnKey)
return bs
def w1(builderid=None, masterid=None):
return _filt({
'workerid': 1,
'name': 'linux',
'workerinfo': {},
'paused': False,
'graceful': False,
'connected_to': [
{'masterid': 13},
],
'configured_on': sorted([
{'builderid': 40, 'masterid': 13},
{'builderid': 40, 'masterid': 14},
], key=configuredOnKey),
}, builderid, masterid)
def w2(builderid=None, masterid=None):
return _filt({
'workerid': 2,
'name': 'windows',
'workerinfo': {'a': 'b'},
'paused': False,
'graceful': False,
'connected_to': [
{'masterid': 14},
],
'configured_on': sorted([
{'builderid': 40, 'masterid': 13},
{'builderid': 41, 'masterid': 13},
{'builderid': 40, 'masterid': 14},
], key=configuredOnKey),
}, builderid, masterid)
class WorkerEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = workers.WorkerEndpoint
resourceTypeClass = workers.Worker
def setUp(self):
self.setUpEndpoint()
return self.db.insertTestData(testData)
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
worker = yield self.callGet(('workers', 2))
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2())
@defer.inlineCallbacks
def test_get_existing_name(self):
worker = yield self.callGet(('workers', 'linux'))
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w1())
@defer.inlineCallbacks
def test_get_existing_masterid(self):
worker = yield self.callGet(('masters', 14, 'workers', 2))
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(masterid=14))
@defer.inlineCallbacks
def test_get_existing_builderid(self):
worker = yield self.callGet(('builders', 40, 'workers', 2))
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(builderid=40))
@defer.inlineCallbacks
def test_get_existing_masterid_builderid(self):
worker = yield self.callGet(('masters', 13, 'builders', 40, 'workers', 2))
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(masterid=13, builderid=40))
@defer.inlineCallbacks
def test_get_missing(self):
worker = yield self.callGet(('workers', 99))
self.assertEqual(worker, None)
@defer.inlineCallbacks
def test_setWorkerState(self):
yield self.master.data.updates.setWorkerState(2, True, False)
worker = yield self.callGet(('workers', 2))
self.validateData(worker)
self.assertEqual(worker['paused'], True)
@defer.inlineCallbacks
def test_actions(self):
for action in ("stop", "pause", "unpause", "kill"):
yield self.callControl(action, {}, ('masters', 13, 'builders', 40, 'workers', 2))
self.master.mq.assertProductions(
[(('control', 'worker', '2', action), {'reason': 'no reason'})])
@defer.inlineCallbacks
def test_bad_actions(self):
with self.assertRaises(exceptions.InvalidControlException):
yield self.callControl("bad_action", {},
('masters', 13, 'builders', 40, 'workers', 2))
class WorkersEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = workers.WorkersEndpoint
resourceTypeClass = workers.Worker
def setUp(self):
self.setUpEndpoint()
return self.db.insertTestData(testData)
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
workers = yield self.callGet(('workers',))
for b in workers:
self.validateData(b)
b['configured_on'] = sorted(b['configured_on'],
key=configuredOnKey)
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w1(), w2()], key=configuredOnKey))
@defer.inlineCallbacks
def test_get_masterid(self):
workers = yield self.callGet(('masters', '13', 'workers',))
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w1(masterid=13), w2(masterid=13)], key=configuredOnKey))
@defer.inlineCallbacks
def test_get_builderid(self):
workers = yield self.callGet(('builders', '41', 'workers',))
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w2(builderid=41)], key=configuredOnKey))
@defer.inlineCallbacks
def test_get_masterid_builderid(self):
workers = yield self.callGet(('masters', '13', 'builders', '41',
'workers',))
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w2(masterid=13, builderid=41)], key=configuredOnKey))
@defer.inlineCallbacks
def test_setWorkerStateFindByPaused(self):
yield self.master.data.updates.setWorkerState(2, True, False)
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('paused', 'eq', [True])])
workers = yield self.callGet(('workers',), resultSpec=resultSpec)
self.assertEqual(len(workers), 1)
worker = workers[0]
self.validateData(worker)
self.assertEqual(worker['paused'], True)
class Worker(TestReactorMixin, interfaces.InterfaceTests, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantMq=True, wantDb=True,
wantData=True)
self.rtype = workers.Worker(self.master)
return self.master.db.insertTestData([
fakedb.Master(id=13),
fakedb.Master(id=14),
])
def test_signature_findWorkerId(self):
@self.assertArgSpecMatches(
self.master.data.updates.findWorkerId, # fake
self.rtype.findWorkerId) # real
def findWorkerId(self, name):
pass
def test_signature_workerConfigured(self):
@self.assertArgSpecMatches(
self.master.data.updates.workerConfigured, # fake
self.rtype.workerConfigured) # real
def workerConfigured(self, workerid, masterid, builderids):
pass
def test_findWorkerId(self):
# this just passes through to the db method, so test that
rv = defer.succeed(None)
self.master.db.workers.findWorkerId = \
mock.Mock(return_value=rv)
self.assertIdentical(self.rtype.findWorkerId('foo'), rv)
def test_findWorkerId_not_id(self):
with self.assertRaises(ValueError):
self.rtype.findWorkerId(b'foo')
with self.assertRaises(ValueError):
self.rtype.findWorkerId('123/foo')
|
gpl-2.0
|
OpenCanada/website
|
articles/migrations/0003_initial_pages.py
|
2
|
1624
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_pages(apps, schema_editor):
Page = apps.get_model("wagtailcore", "Page")
SeriesListPage = apps.get_model("articles", "SeriesListPage")
ArticleListPage = apps.get_model("articles", "ArticleListPage")
home_page = Page.objects.get(slug="home")
ContentType = apps.get_model("contenttypes", "ContentType")
article_list_page_content_type, created = ContentType.objects.get_or_create(
model='articlelistpage',
app_label='articles'
)
# Create features page
features_page = ArticleListPage.objects.create(
title="Features",
slug='features',
content_type_id=article_list_page_content_type.pk,
path='000100010001',
depth=3,
numchild=0,
url_path='/home/features/',
)
home_page.numchild += 1
series_list_page_content_type, created = ContentType.objects.get_or_create(
model='serieslistpage',
app_label='articles'
)
# Create indepth page
SeriesListPage.objects.create(
title="In Depth",
slug='indepth',
content_type_id=series_list_page_content_type.pk,
path='000100010002',
depth=3,
numchild=0,
url_path='/home/indepth/',
)
home_page.numchild += 1
home_page.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('articles', '0002_articleauthorlink_author'),
]
operations = [
migrations.RunPython(create_pages),
]
|
mit
|
NeCTAR-RC/nova
|
nova/api/openstack/compute/legacy_v2/contrib/extended_server_attributes.py
|
1
|
3340
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Server Attributes API extension."""
from oslo_config import cfg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import utils
CONF = cfg.CONF
authorize = extensions.soft_extension_authorizer('compute',
'extended_server_attributes')
class ExtendedServerAttributesController(wsgi.Controller):
def _get_hypervisor_instance_name(self, context, instance):
if not CONF.cells.enable:
return instance['name']
sys_metadata = utils.instance_sys_meta(instance)
return sys_metadata.get('instance_name', '')
def _extend_server(self, context, server, instance):
key = "%s:hypervisor_hostname" % Extended_server_attributes.alias
server[key] = instance.node
for attr in ['host', 'name']:
if attr == 'name':
key = "%s:instance_%s" % (Extended_server_attributes.alias,
attr)
server[key] = self._get_hypervisor_instance_name(context,
instance)
else:
key = "%s:%s" % (Extended_server_attributes.alias, attr)
server[key] = instance[attr]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
class Extended_server_attributes(extensions.ExtensionDescriptor):
"""Extended Server Attributes support."""
name = "ExtendedServerAttributes"
alias = "OS-EXT-SRV-ATTR"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_status/api/v1.1")
updated = "2011-11-03T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedServerAttributesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
apache-2.0
|
40223232/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/markdown2.py
|
669
|
8143
|
import browser.html
import re
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
res = '<pre class="marked">%s</pre>\n' %res
return res,[]
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and (i==0 or ul or not lines[i-1].strip()):
print('is ul',lines[i])
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][1+nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol:
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].strip() \
and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if line.strip():
if section.line:
section.line += ' '
section.line += line
else:
sections.append(section)
section = Marked()
i += 1
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += '<p>'+mk+'\n'
scripts += _scripts
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
# replace \` by `
src = re.sub(r'\\\`','`',src)
# escape < > & in inline code
code_pattern = r'\`(\S.*?\S)\`'
src = re.sub(code_pattern,s_escape,src)
# also convert _
src = re.sub(code_pattern,s_unmark,src)
# inline links
link_pattern1 = r'\[(.+?)\]\s?\((.+?)\)'
def repl(mo):
g1,g2 = mo.groups()
g2 = re.sub('_','_',g2)
return '<a href="%s">%s</a>' %(g2,g1)
src = re.sub(link_pattern1,repl,src)
# reference links
link_pattern2 = r'\[(.+?)\]\s?\[(.*?)\]'
while True:
mo = re.search(link_pattern2,src)
if mo is None:break
text,key = mo.groups()
print(text,key)
if not key:key=text # implicit link name
if key.lower() not in refs:
raise KeyError('unknow reference %s' %key)
url = refs[key.lower()]
repl = '<a href="'+url.href+'"'
if url.alt:
repl += ' title="'+url.alt+'"'
repl += '>%s</a>' %text
src = re.sub(link_pattern2,repl,src,count=1)
# emphasis
# replace \* by *
src = re.sub(r'\\\*','*',src)
# replace \_ by _
src = re.sub(r'\\\_','_',src)
# _ and * surrounded by spaces are not markup
src = re.sub(r' _ ',' _ ',src)
src = re.sub(r' \* ',' * ',src)
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
# replace \` by `
src = re.sub(r'\\\`','`',src)
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# ordered lists
lines = src.split('\n')
atx_header_pattern = '^(#+)(.*)(#*)'
for i,line in enumerate(lines):
print('line [%s]' %line, line.startswith('#'))
mo = re.search(atx_header_pattern,line)
if not mo:continue
print('pattern matches')
level = len(mo.groups()[0])
lines[i] = re.sub(atx_header_pattern,
'<H%s>%s</H%s>\n' %(level,mo.groups()[1],level),
line,count=1)
src = '\n'.join(lines)
src = re.sub('\n\n+','\n<p>',src)+'\n'
return src,scripts
|
gpl-3.0
|
thiriel/maps
|
venv/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/forms.py
|
90
|
7471
|
from django import forms, http
from django.conf import settings
from django.test import TestCase
from django.template.response import TemplateResponse
from django.utils.importlib import import_module
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
return super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class UserForm(forms.ModelForm):
class Meta:
model = User
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {u'0': Step1, u'1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {u'start': Step1, u'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {u'0': Step1, u'1': Step2, u'finish': Step3})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, u'0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = User()
testform = TestWizard.as_view([('start', UserForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
def test_formset_instance(self):
request = get_request()
the_instance1, created = User.objects.get_or_create(
username='testuser1')
the_instance2, created = User.objects.get_or_create(
username='testuser2')
testform = TestWizard.as_view([('start', UserFormSet), ('step2', Step2)],
instance_dict={'start': User.objects.filter(username='testuser1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
|
bsd-3-clause
|
TheNameIsNigel/opencog
|
opencog/cython/setup.py
|
36
|
3230
|
from distutils.core import setup
from distutils.extension import Extension
from distutils.sysconfig import get_python_inc
from Cython.Distutils import build_ext
import os
# WARNING: This file is deprecated as the cython bindings are integrated with
# CMake now. This file is still useful for reference on how to use distutils
# with cython
# How can we make this configurable by CMake?
# Perhaps has a environment variable or a command line argument
opencog_library_dir = "/usr/local/lib/opencog"
# for OSX:
#opencog_library_dir = "/opt/local/lib/opencog"
# for non-installed build:
#opencog_library_dir = "../../bin/opencog/atomspace",
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
incdir = os.path.join(get_python_inc(plat_specific=1), 'Numerical')
# This extension stuff should use info from CMake somehow...
ext = Extension(
"opencog", # name of extension
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
sources=["opencog.pyx"], # filename of our Cython source
language="c++", # this causes Cython to create C++ source
include_dirs=[".", # needed to find local pyx/pxd files
"../..", # to support building in source directory
"/usr/local/include", # For local includes
"/opt/local/include" # For MacPorts
],
libraries=["stdc++",
"boost_system-mt","boost_thread-mt", # boost dependencies
# opencog libraries
"atomspace",
"util"
],
library_dirs=[
"/opt/local/lib", # For MacPorts
opencog_library_dir],
runtime_library_dirs=[opencog_library_dir]
)
# This extension stuff should use info from CMake somehow...
helper_ext = Extension(
"agent_finder", # name of extension
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
sources=["agent_finder.pyx"], # filename of our Cython source
language="c++", # this causes Cython to create C++ source
include_dirs=[".", # needed to find local pyx/pxd files
"/usr/local/include", # For local includes
"/opt/local/include" # For MacPorts
],
libraries=["stdc++",
"boost_system-mt","boost_thread-mt", # boost dependencies
],
library_dirs=[
"/opt/local/lib", # For MacPorts
]
)
setup(name = 'pyopencog',
description = 'OpenCog Python bindings',
author = 'Joel Pitt',
author_email = '[email protected]',
url = 'http://wiki.opencog.org/w/Python',
long_description = read('README'),
version = '0.1',
classifiers=[
"Development Status :: 3 - Alpha",
#"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: GNU Affero General Public License v3",
],
cmdclass = {'build_ext': build_ext},
ext_modules = [ext, helper_ext]
)
|
agpl-3.0
|
ligo-cbc/pycbc-glue
|
pycbc_glue/ligolw/ilwd.py
|
1
|
7449
|
# Copyright (C) 2006,2012 Kipp Cannon
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# ILWDs
#
# =============================================================================
#
"""
The ilwd:char type is used to store ID strings for objects within LIGO
Light-Weight XML files. This module and its associated C extention module
_ilwd provide a class for memory-efficient storage of ilwd:char strings.
LIGO Light Weight XML "ilwd:char" IDs are strings of the form
"table:column:integer", for example "process:process_id:10". Large complex
documents can have many millions of these strings, and their storage
represents a significant RAM burden. However, while there can be millions
of ID strings in a document there might be only a small number (e.g., 10 or
fewer) unique ID prefixes in a document (the table name and column name
part). The amount of RAM required to load a document can be significantly
reduced if the small number of unique string prefixes are stored separately
and reused. This module provides the machinery used to do this.
The ilwdchar class in this module converts a string or unicode object
containing an ilwd:char ID into a more memory efficient representation.
Example:
>>> x = ilwdchar("process:process_id:10")
>>> print x
process:process_id:10
Like strings, the object resulting from this is immutable. It provides two
read-only attributes, "table_name" and "column_name", that can be used to
access the table and column parts of the original ID string. The integer
suffix can be retrieved by converting the object to an integer.
Example:
>>> x.table_name
'process'
>>> int(x)
10
The object also provides the read-only attribute "index_offset", giving the
length of the string preceding the interger suffix.
Example:
>>> x.index_offset
19
The objects support some arithmetic operations.
Example:
>>> y = x + 5
>>> str(y)
'process:process_id:15'
>>> int(y - x)
5
The objects are pickle-able.
Example:
>>> import pickle
>>> x == pickle.loads(pickle.dumps(x))
True
To simplify interaction with documents that do not contain fully-populated
columns, None is allowed as an input value and is not converted.
Example:
>>> print ilwdchar(None)
None
Implementation details
======================
Memory is reduced by storing the table_name, column_name, and index_offset
values as class attributes, so only one copy is present in memory and is
shared across all instances of the class. This means that each unique
table_name and column_name pair requires its own class. These classes are
created on the fly as new IDs are processed, and get added to this module's
name space. They are all subclasses of _ilwd.ilwdchar, which implements
the low-level machinery. After a new class is created it can be accessed
as a symbol in this module, but each of those symbols does not exist until
at least one corresponding ID string has been processed.
Example:
>>> import ilwd
>>> "foo_bar_class" in ilwd.__dict__
False
>>> x = ilwd.ilwdchar("foo:bar:0")
>>> type(x)
<class 'pycbc_glue.ligolw.ilwd.foo_bar_class'>
>>> "foo_bar_class" in ilwd.__dict__
True
>>> print ilwd.foo_bar_class(10)
foo:bar:10
The ilwdchar class itself is never instantiated, its .__new__() method
parses the ID string parameter and creates an instance of the appropriate
subclass of _ilwd.ilwdchar, creating a new subclass before doing so if
neccessary.
"""
import copy_reg
from pycbc_glue import git_version
from . import _ilwd
__author__ = "Kipp Cannon <[email protected]>"
__version__ = "git id %s" % git_version.id
__date__ = git_version.date
#
# =============================================================================
#
# Cached Classes
#
# =============================================================================
#
#
# Function for retrieving ilwdchar subclasses.
#
def get_ilwdchar_class(tbl_name, col_name, namespace = globals()):
"""
Searches this module's namespace for a subclass of _ilwd.ilwdchar
whose table_name and column_name attributes match those provided.
If a matching subclass is found it is returned; otherwise a new
class is defined, added to this module's namespace, and returned.
Example:
>>> process_id = get_ilwdchar_class("process", "process_id")
>>> x = process_id(10)
>>> str(type(x))
"<class 'pycbc_glue.ligolw.ilwd.process_process_id_class'>"
>>> str(x)
'process:process_id:10'
Retrieving and storing the class provides a convenient mechanism
for quickly constructing new ID objects.
Example:
>>> for i in range(10):
... print str(process_id(i))
...
process:process_id:0
process:process_id:1
process:process_id:2
process:process_id:3
process:process_id:4
process:process_id:5
process:process_id:6
process:process_id:7
process:process_id:8
process:process_id:9
"""
#
# if the class already exists, retrieve and return it
#
key = (str(tbl_name), str(col_name))
cls_name = "%s_%s_class" % key
assert cls_name != "get_ilwdchar_class"
try:
return namespace[cls_name]
except KeyError:
pass
#
# otherwise define a new class, and add it to the cache
#
class new_class(_ilwd.ilwdchar):
__slots__ = ()
table_name, column_name = key
index_offset = len("%s:%s:" % key)
new_class.__name__ = cls_name
namespace[cls_name] = new_class
#
# pickle support
#
copy_reg.pickle(new_class, lambda x: (ilwdchar, (unicode(x),)))
#
# return the new class
#
return new_class
#
# Metaclass to redirect instantiation to the correct subclass for
# _ilwd.ilwdchar
#
class ilwdchar(object):
"""
Metaclass wrapper of pycbc_glue.ligolw._ilwd.ilwdchar class.
Instantiating this class constructs and returns an instance of a
subclass of pycbc_glue.ligolw._ilwd.ilwdchar.
"""
def __new__(cls, s):
"""
Convert an ilwd:char-formated string into an instance of
the matching subclass of _ilwd.ilwdchar. If the input is
None then the return value is None.
Example:
>>> x = ilwdchar("process:process_id:10")
>>> str(x)
'process:process_id:10'
>>> x.table_name
'process'
>>> x.column_name
'process_id'
>>> int(x)
10
>>> x.index_offset
19
>>> str(x)[x.index_offset:]
'10'
>>> print ilwdchar(None)
None
"""
#
# None is no-op
#
if s is None:
return None
#
# try parsing the string as an ilwd:char formated string
#
try:
table_name, column_name, i = s.strip().split(":")
except (ValueError, AttributeError):
raise ValueError("invalid ilwd:char '%s'" % repr(s))
#
# retrieve the matching class from the ID class cache, and
# return an instance initialized to the desired value
#
return get_ilwdchar_class(table_name, column_name)(int(i))
|
gpl-3.0
|
citrix-openstack-build/neutron
|
neutron/tests/unit/test_linux_ip_lib.py
|
4
|
28091
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo')
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
root_helper='sudo')
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
root_helper=None)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase('sudo')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_no_root_helper(self):
base = ip_lib.SubProcessBase()
self.assertRaises(exceptions.SudoRequired,
base._as_root,
[], 'link', ('list',))
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_get_devices(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE)
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_devices_malformed_line(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish'])
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces('sudo')
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with('', 'netns', ('list',),
root_helper='sudo')
def test_add_tuntap(self):
ip_lib.IPWrapper('sudo').add_tuntap('tap0')
self.execute.assert_called_once_with('', 'tuntap',
('add', 'tap0', 'mode', 'tap'),
'sudo', None)
def test_add_veth(self):
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1')
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
'sudo', None)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
'sudo', None)
def test_get_device(self):
dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0')
self.assertEqual(dev.root_helper, 'sudo')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper('sudo')
with mock.patch.object(ip.netns, 'exists') as ns_exists:
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'), 'sudo', None)])
ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo').add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
self.assertNotEqual(dev1, None)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.root_helper = 'sudo'
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run('link', 'show')
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run('link', options='o')
self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))])
def test_as_root(self):
self.ip_cmd._as_root('link')
self.ip.assert_has_calls(
[mock.call._as_root([], 'foo', ('link', ), False)])
def test_as_root_with_options(self):
self.ip_cmd._as_root('link', options='o')
self.ip.assert_has_calls(
[mock.call._as_root('o', 'foo', ('link', ), False)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev.root_helper = 'sudo'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
self.parent.root_helper = 'sudo'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, force_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
force_root_namespace)])
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call('o', ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'global', 'dev', 'tap0'))
def test_add_address_scoped(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255',
scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'link', 'dev', 'tap0'))
def test_del_address(self):
self.addr_cmd.delete(4, '192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush()
self._assert_sudo([], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64',
broadcast='::'),
dict(ip_version=6, scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64',
broadcast='::')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
def test_add_gateway(self):
gateway = '192.168.45.100'
metric = 100
self.route_cmd.add_gateway(gateway, metric)
self._assert_sudo([],
('replace', 'default', 'via', gateway,
'metric', metric,
'dev', self.parent.name))
def test_del_gateway(self):
gateway = '192.168.45.100'
self.route_cmd.delete_gateway(gateway)
self._assert_sudo([],
('del', 'default', 'via', gateway,
'dev', self.parent.name))
def test_get_gateway(self):
test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}}]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), force_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
def test_delete_namespace(self):
with mock.patch('neutron.agent.linux.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True)
def test_namespace_exists(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertTrue(
self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_namespace_doest_not_exist(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertFalse(
self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
root_helper='sudo',
check_exit_code=True)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env', 'FOO=1', 'BAR=2',
'ip', 'link', 'list'],
root_helper='sudo', check_exit_code=True)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with('o', 'link', ('show', 'eth0'))
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
|
apache-2.0
|
fevxie/sale-workflow
|
sale_rental/wizard/__init__.py
|
31
|
1070
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Sale Rental module for Odoo
# Copyright (C) 2014-2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import create_rental_product
|
agpl-3.0
|
ncliam/serverpos
|
openerp/addons/base_iban/base_iban.py
|
278
|
8657
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import string
from openerp.osv import fields, osv
from openerp.tools.translate import _
# Reference Examples of IBAN
_ref_iban = { 'al':'ALkk BBBS SSSK CCCC CCCC CCCC CCCC', 'ad':'ADkk BBBB SSSS CCCC CCCC CCCC',
'at':'ATkk BBBB BCCC CCCC CCCC', 'be': 'BEkk BBBC CCCC CCKK', 'ba': 'BAkk BBBS SSCC CCCC CCKK',
'bg': 'BGkk BBBB SSSS DDCC CCCC CC', 'bh': 'BHkk BBBB SSSS SSSS SSSS SS',
'cr': 'CRkk BBBC CCCC CCCC CCCC C',
'hr': 'HRkk BBBB BBBC CCCC CCCC C', 'cy': 'CYkk BBBS SSSS CCCC CCCC CCCC CCCC',
'cz': 'CZkk BBBB SSSS SSCC CCCC CCCC', 'dk': 'DKkk BBBB CCCC CCCC CC',
'do': 'DOkk BBBB CCCC CCCC CCCC CCCC CCCC',
'ee': 'EEkk BBSS CCCC CCCC CCCK', 'fo': 'FOkk CCCC CCCC CCCC CC',
'fi': 'FIkk BBBB BBCC CCCC CK', 'fr': 'FRkk BBBB BGGG GGCC CCCC CCCC CKK',
'ge': 'GEkk BBCC CCCC CCCC CCCC CC', 'de': 'DEkk BBBB BBBB CCCC CCCC CC',
'gi': 'GIkk BBBB CCCC CCCC CCCC CCC', 'gr': 'GRkk BBBS SSSC CCCC CCCC CCCC CCC',
'gl': 'GLkk BBBB CCCC CCCC CC', 'hu': 'HUkk BBBS SSSC CCCC CCCC CCCC CCCC',
'is':'ISkk BBBB SSCC CCCC XXXX XXXX XX', 'ie': 'IEkk BBBB SSSS SSCC CCCC CC',
'il': 'ILkk BBBS SSCC CCCC CCCC CCC', 'it': 'ITkk KBBB BBSS SSSC CCCC CCCC CCC',
'kz': 'KZkk BBBC CCCC CCCC CCCC', 'kw': 'KWkk BBBB CCCC CCCC CCCC CCCC CCCC CC',
'lv': 'LVkk BBBB CCCC CCCC CCCC C',
'lb': 'LBkk BBBB CCCC CCCC CCCC CCCC CCCC', 'li': 'LIkk BBBB BCCC CCCC CCCC C',
'lt': 'LTkk BBBB BCCC CCCC CCCC', 'lu': 'LUkk BBBC CCCC CCCC CCCC' ,
'mk': 'MKkk BBBC CCCC CCCC CKK', 'mt': 'MTkk BBBB SSSS SCCC CCCC CCCC CCCC CCC',
'mr': 'MRkk BBBB BSSS SSCC CCCC CCCC CKK',
'mu': 'MUkk BBBB BBSS CCCC CCCC CCCC CCCC CC', 'mc': 'MCkk BBBB BGGG GGCC CCCC CCCC CKK',
'me': 'MEkk BBBC CCCC CCCC CCCC KK',
'nl': 'NLkk BBBB CCCC CCCC CC', 'no': 'NOkk BBBB CCCC CCK',
'pl':'PLkk BBBS SSSK CCCC CCCC CCCC CCCC',
'pt': 'PTkk BBBB SSSS CCCC CCCC CCCK K', 'ro': 'ROkk BBBB CCCC CCCC CCCC CCCC',
'sm': 'SMkk KBBB BBSS SSSC CCCC CCCC CCC', 'sa': 'SAkk BBCC CCCC CCCC CCCC CCCC',
'rs': 'RSkk BBBC CCCC CCCC CCCC KK', 'sk': 'SKkk BBBB SSSS SSCC CCCC CCCC',
'si': 'SIkk BBSS SCCC CCCC CKK', 'es': 'ESkk BBBB SSSS KKCC CCCC CCCC',
'se': 'SEkk BBBB CCCC CCCC CCCC CCCC', 'ch': 'CHkk BBBB BCCC CCCC CCCC C',
'tn': 'TNkk BBSS SCCC CCCC CCCC CCCC', 'tr': 'TRkk BBBB BRCC CCCC CCCC CCCC CC',
'ae': 'AEkk BBBC CCCC CCCC CCCC CCC',
'gb': 'GBkk BBBB SSSS SSCC CCCC CC',
}
def _format_iban(iban_str):
'''
This function removes all characters from given 'iban_str' that isn't a alpha numeric and converts it to upper case.
'''
res = ""
if iban_str:
for char in iban_str:
if char.isalnum():
res += char.upper()
return res
def _pretty_iban(iban_str):
"return iban_str in groups of four characters separated by a single space"
res = []
while iban_str:
res.append(iban_str[:4])
iban_str = iban_str[4:]
return ' '.join(res)
class res_partner_bank(osv.osv):
_inherit = "res.partner.bank"
def create(self, cr, uid, vals, context=None):
#overwrite to format the iban number correctly
if (vals.get('state',False)=='iban') and vals.get('acc_number', False):
vals['acc_number'] = _format_iban(vals['acc_number'])
vals['acc_number'] = _pretty_iban(vals['acc_number'])
return super(res_partner_bank, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
#overwrite to format the iban number correctly
if (vals.get('state',False)=='iban') and vals.get('acc_number', False):
vals['acc_number'] = _format_iban(vals['acc_number'])
vals['acc_number'] = _pretty_iban(vals['acc_number'])
return super(res_partner_bank, self).write(cr, uid, ids, vals, context)
def is_iban_valid(self, cr, uid, iban, context=None):
""" Check if IBAN is valid or not
@param iban: IBAN as string
@return: True if IBAN is valid, False otherwise
"""
if not iban:
return False
iban = _format_iban(iban).lower()
if iban[:2] in _ref_iban and len(iban) != len(_format_iban(_ref_iban[iban[:2]])):
return False
#the four first digits have to be shifted to the end
iban = iban[4:] + iban[:4]
#letters have to be transformed into numbers (a = 10, b = 11, ...)
iban2 = ""
for char in iban:
if char.isalpha():
iban2 += str(ord(char)-87)
else:
iban2 += char
#iban is correct if modulo 97 == 1
return int(iban2) % 97 == 1
def check_iban(self, cr, uid, ids, context=None):
'''
Check the IBAN number
'''
for bank_acc in self.browse(cr, uid, ids, context=context):
if bank_acc.state != 'iban':
continue
if not self.is_iban_valid(cr, uid, bank_acc.acc_number, context=context):
return False
return True
def _construct_constraint_msg(self, cr, uid, ids, context=None):
def default_iban_check(iban_cn):
return iban_cn and iban_cn[0] in string.ascii_lowercase and iban_cn[1] in string.ascii_lowercase
iban_country = self.browse(cr, uid, ids)[0].acc_number and self.browse(cr, uid, ids)[0].acc_number[:2].lower()
if default_iban_check(iban_country):
if iban_country in _ref_iban:
return _('The IBAN does not seem to be correct. You should have entered something like this %s'), \
('%s \nWhere B = National bank code, S = Branch code,'\
' C = Account No, K = Check digit' % _ref_iban[iban_country])
return _('This IBAN does not pass the validation check, please verify it'), ()
return _('The IBAN is invalid, it should begin with the country code'), ()
def _check_bank(self, cr, uid, ids, context=None):
for partner_bank in self.browse(cr, uid, ids, context=context):
if partner_bank.state == 'iban' and not partner_bank.bank.bic:
return False
return True
def get_bban_from_iban(self, cr, uid, ids, context=None):
'''
This function returns the bank account number computed from the iban account number, thanks to the mapping_list dictionary that contains the rules associated to its country.
'''
res = {}
mapping_list = {
#TODO add rules for others countries
'be': lambda x: x[4:],
'fr': lambda x: x[14:],
'ch': lambda x: x[9:],
'gb': lambda x: x[14:],
}
for record in self.browse(cr, uid, ids, context=context):
if not record.acc_number:
res[record.id] = False
continue
res[record.id] = False
for code, function in mapping_list.items():
if record.acc_number.lower().startswith(code):
res[record.id] = function(record.acc_number)
break
return res
_columns = {
# Deprecated: we keep it for backward compatibility, to be removed in v7
# We use acc_number instead of IBAN since v6.1, but we keep this field
# to not break community modules.
'iban': fields.related('acc_number', string='IBAN', size=34, readonly=True, help="International Bank Account Number", type="char"),
}
_constraints = [
(check_iban, _construct_constraint_msg, ["iban", "acc_number", "state"]),
(_check_bank, '\nPlease define BIC/Swift code on bank for bank type IBAN Account to make valid payments', ['bic'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
citrix-openstack-build/neutron-lbaas
|
neutron_lbaas/services/loadbalancer/drivers/embrane/driver.py
|
1
|
15783
|
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heleosapi import backend_operations as h_op
from heleosapi import constants as h_con
from heleosapi import info as h_info
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer as lb_ext
from neutron.i18n import _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pcon
from neutron.plugins.embrane.common import contexts as embrane_ctx
from neutron.plugins.embrane.common import exceptions as h_exc
from neutron.plugins.embrane.common import utils
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.services.loadbalancer import constants as lbcon
from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
from neutron_lbaas.services.loadbalancer.drivers.embrane.agent \
import dispatcher
from neutron_lbaas.services.loadbalancer.drivers.embrane import config # noqa
from neutron_lbaas.services.loadbalancer.drivers.embrane \
import constants as econ
from neutron_lbaas.services.loadbalancer.drivers.embrane import db as edb
from neutron_lbaas.services.loadbalancer.drivers.embrane import poller
LOG = logging.getLogger(__name__)
conf = cfg.CONF.heleoslb
confh = {}
try:
confh = cfg.CONF.heleos
except cfg.NoSuchOptError:
pass
def get_conf(x):
try:
return conf.get(x) or confh.get(x)
except cfg.NoSuchOptError:
return
class EmbraneLbaas(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
config_esm_mgmt = get_conf('esm_mgmt')
config_admin_username = get_conf('admin_username')
config_admin_password = get_conf('admin_password')
config_lb_image_id = get_conf('lb_image')
config_security_zones = {h_con.SzType.IB: get_conf('inband_id'),
h_con.SzType.OOB: get_conf('oob_id'),
h_con.SzType.MGMT: get_conf('mgmt_id'),
h_con.SzType.DUMMY: get_conf('dummy_utif_id')}
config_resource_pool = get_conf('resource_pool_id')
self._heleos_api = h_op.BackendOperations(
esm_mgmt=config_esm_mgmt,
admin_username=config_admin_username,
admin_password=config_admin_password,
lb_image_id=config_lb_image_id,
security_zones=config_security_zones,
resource_pool=config_resource_pool)
self._dispatcher = dispatcher.Dispatcher(
self, get_conf("async_requests"))
self.plugin = plugin
poll_interval = conf.get('sync_interval')
if poll_interval > 0:
self._loop_call = poller.Poller(self)
self._loop_call.start_polling(conf.get('sync_interval'))
self._flavor = get_conf('lb_flavor')
def _validate_vip(self, vip):
if vip.get('connection_limit') and vip['connection_limit'] != -1:
raise h_exc.UnsupportedException(
err_msg=_('Connection limit is not supported by Embrane LB'))
persistence = vip.get('session_persistence')
if (persistence and persistence.get('type') ==
lbcon.SESSION_PERSISTENCE_APP_COOKIE):
p_type = vip['session_persistence']['type']
raise h_exc.UnsupportedException(
err_msg=_('Session persistence %s '
'not supported by Embrane LBaaS') % p_type)
def _delete_vip(self, context, vip):
with context.session.begin(subtransactions=True):
self.plugin._delete_db_vip(context, vip['id'])
return econ.DELETED
def _delete_member(self, context, member):
self.plugin._delete_db_member(context, member['id'])
def _delete_pool_hm(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def _update_vip_graph_state(self, context, vip):
self._heleos_api.update_vip_status(vip)
self.plugin.update_status(context, ldb.Vip, vip['id'],
vip['status'])
if vip['status'] != pcon.ERROR:
pool = self.plugin.get_pool(context, vip['pool_id'])
pool_members = pool['members']
# Manages possible manual changes and monitor actions
self._heleos_api.update_pool_status(vip['id'], pool)
self._heleos_api.update_members_status(vip['id'], pool['id'],
pool_members)
self.plugin.update_status(context, ldb.Pool, pool['id'],
pool['status'])
for member in pool_members:
self.plugin.update_status(context, ldb.Member,
member['id'], member['status'])
def _create_backend_port(self, context, db_pool):
try:
subnet = self.plugin._core_plugin.get_subnet(context,
db_pool["subnet_id"])
except n_exc.SubnetNotFound:
LOG.warning(_LW("Subnet assigned to pool %s doesn't exist, "
"backend port can't be created"), db_pool['id'])
return
fixed_ip = {'subnet_id': subnet['id'],
'fixed_ips': attributes.ATTR_NOT_SPECIFIED}
port_data = {
'tenant_id': db_pool['tenant_id'],
'name': 'pool-' + db_pool['id'],
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': '',
'fixed_ips': [fixed_ip]
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
return edb.add_pool_port(context, db_pool['id'], port['id'])
def _retrieve_utif_info(self, context, neutron_port):
network = self.plugin._core_plugin.get_network(
context, neutron_port['network_id'])
result = h_info.UtifInfo(network.get('provider:segmentation_id'),
network['name'],
network['id'],
False,
network['tenant_id'],
neutron_port['id'],
neutron_port['mac_address'],
network.get('provider:network_type'))
return result
def create_vip(self, context, vip):
self._validate_vip(vip)
db_vip = self.plugin.populate_vip_graph(context, vip)
vip_port = self.plugin._core_plugin._get_port(context,
db_vip['port_id'])
vip_utif_info = self._retrieve_utif_info(context, vip_port)
vip_ip_allocation_info = utils.retrieve_ip_allocation_info(
context, vip_port)
vip_ip_allocation_info.is_gw = True
db_pool = pool_utif_info = pool_ip_allocation_info = None
members = monitors = []
if db_vip['pool_id']:
db_pool = self.plugin.get_pool(
context, db_vip['pool_id'])
pool_port = edb.get_pool_port(context, db_pool["id"])
if pool_port:
db_port = self.plugin._core_plugin._get_port(
context, pool_port["port_id"])
pool_utif_info = self._retrieve_utif_info(context, db_port)
pool_ip_allocation_info = utils.retrieve_ip_allocation_info(
context, db_port)
members = self.plugin.get_members(
context, filters={'id': db_pool['members']})
monitors = self.plugin.get_members(
context, filters={'id': db_pool['health_monitors']})
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.CREATE_VIP,
db_vip, context, None),
self._flavor, vip_utif_info, vip_ip_allocation_info,
pool_utif_info, pool_ip_allocation_info, db_pool, members,
monitors)
def update_vip(self, context, old_vip, vip):
new_pool = old_port_id = removed_ip = None
new_pool_utif = new_pool_ip_allocation = None
old_pool = {}
members = monitors = []
if old_vip['pool_id'] != vip['pool_id']:
new_pool = self.plugin.get_pool(
context, vip['pool_id'])
members = self.plugin.get_members(
context, filters={'id': new_pool['members']})
monitors = self.plugin.get_members(
context, filters={'id': new_pool['health_monitors']})
new_pool_port = edb.get_pool_port(context, new_pool["id"])
if new_pool_port:
db_port = self.plugin._core_plugin._get_port(
context, new_pool_port["port_id"])
new_pool_utif = self._retrieve_utif_info(context, db_port)
new_pool_ip_allocation = utils.retrieve_ip_allocation_info(
context, db_port)
old_pool = self.plugin.get_pool(
context, old_vip['pool_id'])
old_pool_port = edb.get_pool_port(context, old_pool["id"])
if old_pool_port:
old_port = self.plugin._core_plugin._get_port(
context, old_pool_port['port_id'])
# remove that subnet ip
removed_ip = old_port['fixed_ips'][0]['ip_address']
old_port_id = old_port['id']
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.UPDATE_VIP, vip,
context, None),
old_pool.get('id'), old_port_id, removed_ip, new_pool_utif,
new_pool_ip_allocation, new_pool, members, monitors)
def delete_vip(self, context, vip):
db_vip = self.plugin.populate_vip_graph(context, vip)
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_VIP, db_vip, context, None))
def create_pool(self, context, pool):
if pool['subnet_id']:
self._create_backend_port(context, pool)
def update_pool(self, context, old_pool, pool):
with context.session.begin(subtransactions=True):
if old_pool['vip_id']:
try:
db_vip = self.plugin._get_resource(
context, ldb.Vip, old_pool['vip_id'])
except lb_ext.VipNotFound:
return
monitors = self.plugin.get_members(
context, filters={'id': old_pool['health_monitors']})
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.UPDATE_POOL,
db_vip, context, None),
pool, monitors)
def delete_pool(self, context, pool):
edb.delete_pool_backend(context, pool['id'])
self.plugin._delete_db_pool(context, pool['id'])
def create_member(self, context, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None),
member, db_pool['protocol'])
def update_member(self, context, old_member, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if member['pool_id'] != old_member['pool_id']:
old_pool = self.plugin.get_pool(context, old_member['pool_id'])
if old_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
old_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.REMOVE_MEMBER, db_vip, context, None),
old_member)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(
context, ldb.Vip, db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None),
member, db_pool['protocol'])
def delete_member(self, context, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_MEMBER, db_vip, context, None),
member)
else:
self._delete_member(context, member)
def stats(self, context, pool_id):
return {'bytes_in': 0,
'bytes_out': 0,
'active_connections': 0,
'total_connections': 0}
def create_pool_health_monitor(self, context, health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
# API call only if vip exists
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.UPDATE_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
else:
self._delete_pool_hm(context, health_monitor, pool_id)
|
apache-2.0
|
llonchj/sentry
|
src/sentry/migrations/0059_auto__add_filterkey__add_unique_filterkey_project_key.py
|
36
|
19605
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FilterKey'
db.create_table('sentry_filterkey', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('sentry', ['FilterKey'])
# Adding unique constraint on 'FilterKey', fields ['project', 'key']
db.create_unique('sentry_filterkey', ['project_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'FilterKey', fields ['project', 'key']
db.delete_unique('sentry_filterkey', ['project_id', 'key'])
# Deleting model 'FilterKey'
db.delete_table('sentry_filterkey')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
diegocortassa/TACTIC
|
src/context/client/tactic-api-python-4.0.api04/Lib/inspect.py
|
5
|
39146
|
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <[email protected]>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.func_code.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in module')
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in class')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('arg is not a module, class, method, '
'function, traceback, frame, or code object')
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda info:
(-len(info[0]), info[0], info[1], info[2]),
imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object) or getfile(object)
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args varargs keywords')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return Arguments(args, varargs, varkw)
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.func_code)
return ArgSpec(args, varargs, varkw, func.func_defaults)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
else:
currentframe = lambda _=None: None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
|
epl-1.0
|
dpac-vlsi/SynchroTrace
|
src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py
|
91
|
2169
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# PFACC
# PFNACC
# PFPNACC
'''
|
bsd-3-clause
|
FireBladeNooT/Medusa_1_6
|
lib/github/NamedUser.py
|
3
|
22789
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.Event
class NamedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents NamedUsers as returned for example by http://developer.github.com/v3/todo
"""
def __repr__(self):
return self.get__repr__({"login": self._login.value})
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def contributions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._contributions)
return self._contributions.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def get_events(self):
"""
:calls: `GET /users/:user/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_followers(self):
"""
:calls: `GET /users/:user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
NamedUser,
self._requester,
self.url + "/followers",
None
)
def get_following(self):
"""
:calls: `GET /users/:user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
NamedUser,
self._requester,
self.url + "/following",
None
)
def get_gists(self):
"""
:calls: `GET /users/:user/gists <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
self.url + "/gists",
None
)
def get_keys(self):
"""
:calls: `GET /users/:user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
self.url + "/keys",
None
)
def get_orgs(self):
"""
:calls: `GET /users/:user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
self.url + "/orgs",
None
)
def get_public_events(self):
"""
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events/public",
None
)
def get_public_received_events(self):
"""
:calls: `GET /users/:user/received_events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/received_events/public",
None
)
def get_received_events(self):
"""
:calls: `GET /users/:user/received_events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/received_events",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet):
"""
:calls: `GET /users/:user/repos <http://developer.github.com/v3/repos>`_
:param type: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /users/:user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /users/:user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/subscriptions",
None
)
def get_watched(self):
"""
:calls: `GET /users/:user/watched <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/watched",
None
)
def has_in_following(self, following):
"""
:calls: `GET /users/:user/following/:target_user <http://developer.github.com/v3/users/followers/#check-if-one-user-follows-another>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/following/" + following._identity
)
return status == 204
@property
def _identity(self):
return self.login
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._contributions = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "contributions" in attributes: # pragma no branch
self._contributions = self._makeIntAttribute(attributes["contributions"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
gpl-3.0
|
JioCloud/heat
|
heat/tests/test_dependencies.py
|
5
|
8553
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from heat.engine.dependencies import Dependencies
from heat.engine.dependencies import CircularDependencyException
class dependenciesTest(testtools.TestCase):
def _dep_test(self, func, checkorder, deps):
nodes = set.union(*[set(e) for e in deps])
d = Dependencies(deps)
order = list(func(d))
for n in nodes:
self.assertTrue(n in order, '"%s" is not in the sequence' % n)
self.assertEqual(order.count(n), 1)
self.assertEqual(len(order), len(nodes))
for l, f in deps:
checkorder(order.index(f), order.index(l))
def _dep_test_fwd(self, *deps):
def assertLess(a, b):
self.assertTrue(a < b,
'"%s" is not less than "%s"' % (str(a), str(b)))
self._dep_test(iter, assertLess, deps)
def _dep_test_rev(self, *deps):
def assertGreater(a, b):
self.assertTrue(a > b,
'"%s" is not greater than "%s"' % (str(a), str(b)))
self._dep_test(reversed, assertGreater, deps)
def test_edges(self):
input_edges = [('1', None), ('2', '3'), ('2', '4')]
dp = Dependencies(input_edges)
self.assertEqual(set(dp.graph().edges()), set(input_edges))
def test_repr(self):
dp = Dependencies([('1', None), ('2', '3'), ('2', '4')])
s = "Dependencies([('1', None), ('2', '3'), ('2', '4')])"
self.assertEqual(repr(dp), s)
def test_single_node(self):
d = Dependencies([('only', None)])
l = list(iter(d))
self.assertEqual(len(l), 1)
self.assertEqual(l[0], 'only')
def test_disjoint(self):
d = Dependencies([('1', None), ('2', None)])
l = list(iter(d))
self.assertEqual(len(l), 2)
self.assertTrue('1' in l)
self.assertTrue('2' in l)
def test_single_fwd(self):
self._dep_test_fwd(('second', 'first'))
def test_single_rev(self):
self._dep_test_rev(('second', 'first'))
def test_chain_fwd(self):
self._dep_test_fwd(('third', 'second'), ('second', 'first'))
def test_chain_rev(self):
self._dep_test_rev(('third', 'second'), ('second', 'first'))
def test_diamond_fwd(self):
self._dep_test_fwd(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first'))
def test_diamond_rev(self):
self._dep_test_rev(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first'))
def test_complex_fwd(self):
self._dep_test_fwd(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first'))
def test_complex_rev(self):
self._dep_test_rev(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first'))
def test_many_edges_fwd(self):
self._dep_test_fwd(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3'))
def test_many_edges_rev(self):
self._dep_test_rev(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3'))
def test_dbldiamond_fwd(self):
self._dep_test_fwd(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first'))
def test_dbldiamond_rev(self):
self._dep_test_rev(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first'))
def test_circular_fwd(self):
d = Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(CircularDependencyException, list, iter(d))
def test_circular_rev(self):
d = Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(CircularDependencyException, list, reversed(d))
def test_self_ref(self):
d = Dependencies([('node', 'node')])
self.assertRaises(CircularDependencyException, list, iter(d))
def test_complex_circular_fwd(self):
d = Dependencies([('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3'),
('e3', 'mid1')])
self.assertRaises(CircularDependencyException, list, iter(d))
def test_complex_circular_rev(self):
d = Dependencies([('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3'),
('e3', 'mid1')])
self.assertRaises(CircularDependencyException, list, reversed(d))
def test_noexist_partial(self):
d = Dependencies([('foo', 'bar')])
get = lambda i: d[i]
self.assertRaises(KeyError, get, 'baz')
def test_single_partial(self):
d = Dependencies([('last', 'first')])
p = d['last']
l = list(iter(p))
self.assertEqual(len(l), 1)
self.assertEqual(l[0], 'last')
def test_simple_partial(self):
d = Dependencies([('last', 'middle'), ('middle', 'first')])
p = d['middle']
order = list(iter(p))
self.assertEqual(len(order), 2)
for n in ('last', 'middle'):
self.assertTrue(n in order,
"'%s' not found in dependency order" % n)
self.assertTrue(order.index('last') > order.index('middle'))
def test_simple_multilevel_partial(self):
d = Dependencies([('last', 'middle'),
('middle', 'target'),
('target', 'first')])
p = d['target']
order = list(iter(p))
self.assertEqual(len(order), 3)
for n in ('last', 'middle', 'target'):
self.assertTrue(n in order,
"'%s' not found in dependency order" % n)
def test_complex_partial(self):
d = Dependencies([('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')])
p = d['mid3']
order = list(iter(p))
self.assertEqual(len(order), 4)
for n in ('last', 'mid1', 'mid2', 'mid3'):
self.assertTrue(n in order,
"'%s' not found in dependency order" % n)
def test_required_by(self):
d = Dependencies([('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')])
self.assertEqual(0, len(list(d.required_by('last'))))
required_by = list(d.required_by('mid3'))
self.assertEqual(len(required_by), 2)
for n in ('mid1', 'mid2'):
self.assertTrue(n in required_by,
"'%s' not found in required_by" % n)
required_by = list(d.required_by('e2'))
self.assertEqual(len(required_by), 1)
self.assertTrue('mid1' in required_by,
"'%s' not found in required_by" % n)
self.assertRaises(KeyError, d.required_by, 'foo')
|
apache-2.0
|
blade2005/dosage
|
dosagelib/plugins/w.py
|
1
|
7377
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile, escape, IGNORECASE
from ..scraper import _BasicScraper, _ParserScraper
from ..util import tagre
from ..helpers import indirectStarter
from .common import _ComicControlScraper, _WordPressScraper, xpath_class
class WapsiSquare(_WordPressScraper):
url = 'http://wapsisquare.com/'
firstStripUrl = url + 'comic/09092001/'
class WastedTalent(_BasicScraper):
url = 'http://www.wastedtalent.ca/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'anime-crack'
imageSearch = compile(tagre("img", "src", r'(http://www\.wastedtalent\.ca/sites/default/files/imagecache/comic_full/comics/\d+/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/comic/[^"]+)',
after="comic_prev"))
help = 'Index format: stripname'
class WebDesignerCOTW(_BasicScraper):
url = 'http://www.webdesignerdepot.com/'
rurl = escape(url)
starter = indirectStarter
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2009/11/comics-of-the-week-1'
imageSearch = (
compile(tagre("img", "src", r'(http://netdna\.webdesignerdepot\.com/uploads/\d+/\d+/\d+s?\.[^"]+)')),
compile(tagre("img", "src", r'(http://netdna\.webdesignerdepot\.com/uploads/\d+/\d+/Christmas\d+\.[^"]+)')),
compile(tagre("img", "src", r'(http://netdna\.webdesignerdepot\.com/uploads/comics\d+[a-z0-9]*/\d+a?\.[^"]+)')),
compile(tagre("img", "src", r'(http://netdna\.webdesignerdepot\.com/uploads/comics/\d+\.[^"]+)')),
)
multipleImagesPerStrip = True
prevSearch = compile(tagre("link", "href", r"(%s\d+/\d+/[^']+)" % rurl,
before='prev', quote="'"))
latestSearch = compile(tagre("a", "href", r'(%s\d+/\d+/[^"]+/)' % rurl))
help = 'Index format: yyyy/mm/stripname'
def shouldSkipUrl(self, url, data):
"""Skip non-comic URLs."""
return 'comics-of-the-week' not in url
def namer(self, image_url, page_url):
imagename = image_url.rsplit('/', 1)[1]
week = compile(r'week-(\d+)').search(page_url).group(1)
return "%s-%s" % (week, imagename)
class WeCanSleepTomorrow(_BasicScraper):
url = 'http://wecansleeptomorrow.com/'
rurl = escape(url)
stripUrl = url + '%s/'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/dd/stripname'
class Weregeek(_BasicScraper):
url = 'http://www.weregeek.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2006/11/27/'
imageSearch = compile(tagre("img", "src",
r'(%scomics/\d+-\d+-\d+[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'((%s)?/?\d+/\d+/\d+/)' % rurl) +
'\s*' + tagre('img', 'src', '[^"]*previous_day.gif'))
help = 'Index format: yyyy/mm/dd'
class WhiteNoise(_WordPressScraper):
url = 'http://whitenoisecomic.com/'
firstStripUrl = url + 'comic/book-one/'
prevSearch = '//a[%s]' % xpath_class('previous-webcomic-link')
class Whomp(_ComicControlScraper):
url = 'http://www.whompcomic.com/'
firstStripUrl = url + 'comic/06152010'
textSearch = '//img[@id="cc-comic"]/@title'
class WhyTheLongFace(_BasicScraper):
baseUrl = 'http://www.absurdnotions.org/'
rurl = escape(baseUrl)
url = baseUrl + 'wtlf200709.html'
stripUrl = baseUrl + 'wtlf%s.html'
firstStripUrl = stripUrl % '200306'
imageSearch = compile(r'<img src="(%swtlf.+?|lf\d+.\w{1,4})"' % rurl,
IGNORECASE)
multipleImagesPerStrip = True
prevSearch = compile(r'HREF="(.+?)"><IMG SRC="nprev.gif" ')
help = 'Index format: yyyymm'
class Wigu(_ParserScraper):
stripUrl = 'http://www.wigucomics.com/adventures/index.php?comic=%s'
url = stripUrl % '-1'
firstStripUrl = stripUrl % '1'
imageSearch = '//div[@id="comic"]//img[contains(@src, "/comics/")]'
prevSearch = '//a[@alt="go back"]'
endOfLife = True
help = 'Index format: n'
class Wonderella(_BasicScraper):
url = 'http://nonadventures.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2006/09/09/the-torment-of-a-thousand-yesterdays'
imageSearch = compile(tagre("div", "id", r"comic", quote=r'["\']') +
r"\s*" +
tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\d+/\d+/\d+/[^"]+)' % rurl,
after="prev"))
help = 'Index format: yyyy/mm/dd/name'
class Wondermark(_BasicScraper):
url = 'http://wondermark.com/'
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '001'
imageSearch = compile(r'<img src="(http://wondermark.com/c/.+?)"')
prevSearch = compile(r'<a href="(.+?)" rel="prev">')
help = 'Index format: nnn'
class WorldOfMrToast(_BasicScraper):
baseUrl = 'http://www.theimaginaryworld.com/'
url = baseUrl + 'mrTcomicA.html'
imageSearch = compile(tagre("img", "src", r'(comic[^"]+)'))
# list the archive links since there is no prev/next navigation
prevurls = (
url,
baseUrl + 'mrTcomicW02.html',
baseUrl + 'mrTcomicW01.html',
baseUrl + 'mrGcomic03.html',
baseUrl + 'mrGcomic02.html',
baseUrl + 'mrGcomic01.html',
baseUrl + 'mrTcomicT05.html',
baseUrl + 'mrTcomicT04.html',
baseUrl + 'mrTcomicT03.html',
baseUrl + 'mrTcomicT02.html',
baseUrl + 'mrTcomicT01.html',
baseUrl + 'mrTcomicIW3.html',
baseUrl + 'mrTcomicIW2.html',
baseUrl + 'mrTcomicIW1.html',
)
firstStripUrl = prevurls[-1]
multipleImagesPerStrip = True
endOfLife = True
def getPrevUrl(self, url, data):
idx = self.prevurls.index(url)
try:
return self.prevurls[idx + 1]
except IndexError:
return None
class WorldOfWarcraftEh(_WordPressScraper):
url = 'http://woweh.com/'
class WormWorldSaga(_BasicScraper):
url = 'http://www.wormworldsaga.com/'
stripUrl = url + 'chapters/%s/index.php'
firstStripUrl = stripUrl % 'chapter01/EN'
imageSearch = (
compile(tagre("img", "src", r'(images/CH\d+_\d+\.[^"]+)')),
compile(tagre("img", "src", r'(panels/CH\d+_[^"]+)')),
)
latestChapter = 5
multipleImagesPerStrip = True
def starter(self):
return '%schapters/chapter%02d/%s/index.php' % (
self.url, self.latestChapter, self.lang.upper())
def getPrevUrl(self, url, data):
"""Find previous URL."""
if 'chapter04' in url:
return url.replace('chapter04', 'chapter03')
if 'chapter03' in url:
return url.replace('chapter03', 'chapter02')
if 'chapter02' in url:
return url.replace('chapter02', 'chapter01')
return None
class WormWorldSagaFrench(WormWorldSaga):
lang = 'fr'
class WormWorldSagaGerman(WormWorldSaga):
lang = 'de'
class WormWorldSagaSpanish(WormWorldSaga):
lang = 'es'
|
mit
|
dllsf/odootest
|
addons/document/wizard/__init__.py
|
444
|
1084
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
marineam/systemd
|
test/sysv-generator-test.py
|
4
|
16017
|
# systemd-sysv-generator integration test
#
# (C) 2015 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
import os
import subprocess
import tempfile
import shutil
from glob import glob
try:
from configparser import RawConfigParser
except ImportError:
# python 2
from ConfigParser import RawConfigParser
sysv_generator = os.path.join(os.environ.get('builddir', '.'), 'systemd-sysv-generator')
class SysvGeneratorTest(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp(prefix='sysv-gen-test.')
self.init_d_dir = os.path.join(self.workdir, 'init.d')
os.mkdir(self.init_d_dir)
self.rcnd_dir = self.workdir
self.unit_dir = os.path.join(self.workdir, 'systemd')
os.mkdir(self.unit_dir)
self.out_dir = os.path.join(self.workdir, 'output')
os.mkdir(self.out_dir)
def tearDown(self):
shutil.rmtree(self.workdir)
#
# Helper methods
#
def run_generator(self, expect_error=False):
'''Run sysv-generator.
Fail if stderr contains any "Fail", unless expect_error is True.
Return (stderr, filename -> ConfigParser) pair with ouput to stderr and
parsed generated units.
'''
env = os.environ.copy()
env['SYSTEMD_LOG_LEVEL'] = 'debug'
env['SYSTEMD_LOG_TARGET'] = 'console'
env['SYSTEMD_SYSVINIT_PATH'] = self.init_d_dir
env['SYSTEMD_SYSVRCND_PATH'] = self.rcnd_dir
env['SYSTEMD_UNIT_PATH'] = self.unit_dir
gen = subprocess.Popen(
[sysv_generator, 'ignored', 'ignored', self.out_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, env=env)
(out, err) = gen.communicate()
if not expect_error:
self.assertFalse('Fail' in err, err)
self.assertEqual(gen.returncode, 0, err)
results = {}
for service in glob(self.out_dir + '/*.service'):
if os.path.islink(service):
continue
cp = RawConfigParser()
cp.optionxform = lambda o: o # don't lower-case option names
with open(service) as f:
cp.readfp(f)
results[os.path.basename(service)] = cp
return (err, results)
def add_sysv(self, fname, keys, enable=False, prio=1):
'''Create a SysV init script with the given keys in the LSB header
There are sensible default values for all fields.
If enable is True, links will be created in the rcN.d dirs. In that
case, the priority can be given with "prio" (default to 1).
Return path of generated script.
'''
name_without_sh = fname.endswith('.sh') and fname[:-3] or fname
keys.setdefault('Provides', name_without_sh)
keys.setdefault('Required-Start', '$local_fs')
keys.setdefault('Required-Stop', keys['Required-Start'])
keys.setdefault('Default-Start', '2 3 4 5')
keys.setdefault('Default-Stop', '0 1 6')
keys.setdefault('Short-Description', 'test %s service' %
name_without_sh)
keys.setdefault('Description', 'long description for test %s service' %
name_without_sh)
script = os.path.join(self.init_d_dir, fname)
with open(script, 'w') as f:
f.write('#!/bin/init-d-interpreter\n### BEGIN INIT INFO\n')
for k, v in keys.items():
if v is not None:
f.write('#%20s %s\n' % (k + ':', v))
f.write('### END INIT INFO\ncode --goes here\n')
os.chmod(script, 0o755)
if enable:
def make_link(prefix, runlevel):
d = os.path.join(self.rcnd_dir, 'rc%s.d' % runlevel)
if not os.path.isdir(d):
os.mkdir(d)
os.symlink('../init.d/' + fname, os.path.join(d, prefix + fname))
for rl in keys['Default-Start'].split():
make_link('S%02i' % prio, rl)
for rl in keys['Default-Stop'].split():
make_link('K%02i' % (99 - prio), rl)
return script
def assert_enabled(self, unit, targets):
'''assert that a unit is enabled in precisely the given targets'''
all_targets = ['multi-user', 'graphical']
# should be enabled
for target in all_targets:
link = os.path.join(self.out_dir, '%s.target.wants' % target, unit)
if target in targets:
unit_file = os.readlink(link)
self.assertTrue(os.path.exists(unit_file))
self.assertEqual(os.path.basename(unit_file), unit)
else:
self.assertFalse(os.path.exists(link),
'%s unexpectedly exists' % link)
#
# test cases
#
def test_nothing(self):
'''no input files'''
results = self.run_generator()[1]
self.assertEqual(results, {})
self.assertEqual(os.listdir(self.out_dir), [])
def test_simple_disabled(self):
'''simple service without dependencies, disabled'''
self.add_sysv('foo', {}, enable=False)
err, results = self.run_generator()
self.assertEqual(len(results), 1)
# no enablement links or other stuff
self.assertEqual(os.listdir(self.out_dir), ['foo.service'])
s = results['foo.service']
self.assertEqual(s.sections(), ['Unit', 'Service'])
self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')
# $local_fs does not need translation, don't expect any dependency
# fields here
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description']))
self.assertEqual(s.get('Service', 'Type'), 'forking')
init_script = os.path.join(self.init_d_dir, 'foo')
self.assertEqual(s.get('Service', 'ExecStart'),
'%s start' % init_script)
self.assertEqual(s.get('Service', 'ExecStop'),
'%s stop' % init_script)
self.assertNotIn('Overwriting', err)
def test_simple_enabled_all(self):
'''simple service without dependencies, enabled in all runlevels'''
self.add_sysv('foo', {}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', ['multi-user', 'graphical'])
self.assertNotIn('Overwriting', err)
def test_simple_enabled_some(self):
'''simple service without dependencies, enabled in some runlevels'''
self.add_sysv('foo', {'Default-Start': '2 4'}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', ['multi-user'])
def test_lsb_macro_dep_single(self):
'''single LSB macro dependency: $network'''
self.add_sysv('foo', {'Required-Start': '$network'})
s = self.run_generator()[1]['foo.service']
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description', 'After', 'Wants']))
self.assertEqual(s.get('Unit', 'After'), 'network-online.target')
self.assertEqual(s.get('Unit', 'Wants'), 'network-online.target')
def test_lsb_macro_dep_multi(self):
'''multiple LSB macro dependencies'''
self.add_sysv('foo', {'Required-Start': '$named $portmap'})
s = self.run_generator()[1]['foo.service']
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description', 'After']))
self.assertEqual(s.get('Unit', 'After'), 'nss-lookup.target rpcbind.target')
def test_lsb_deps(self):
'''LSB header dependencies to other services'''
# also give symlink priorities here; they should be ignored
self.add_sysv('foo', {'Required-Start': 'must1 must2',
'Should-Start': 'may1 ne_may2'},
enable=True, prio=40)
self.add_sysv('must1', {}, enable=True, prio=10)
self.add_sysv('must2', {}, enable=True, prio=15)
self.add_sysv('may1', {}, enable=True, prio=20)
# do not create ne_may2
err, results = self.run_generator()
self.assertEqual(sorted(results),
['foo.service', 'may1.service', 'must1.service', 'must2.service'])
# foo should depend on all of them
self.assertEqual(sorted(results['foo.service'].get('Unit', 'After').split()),
['may1.service', 'must1.service', 'must2.service', 'ne_may2.service'])
# other services should not depend on each other
self.assertFalse(results['must1.service'].has_option('Unit', 'After'))
self.assertFalse(results['must2.service'].has_option('Unit', 'After'))
self.assertFalse(results['may1.service'].has_option('Unit', 'After'))
def test_symlink_prio_deps(self):
'''script without LSB headers use rcN.d priority'''
# create two init.d scripts without LSB header and enable them with
# startup priorities
for prio, name in [(10, 'provider'), (15, 'consumer')]:
with open(os.path.join(self.init_d_dir, name), 'w') as f:
f.write('#!/bin/init-d-interpreter\ncode --goes here\n')
os.fchmod(f.fileno(), 0o755)
d = os.path.join(self.rcnd_dir, 'rc2.d')
if not os.path.isdir(d):
os.mkdir(d)
os.symlink('../init.d/' + name, os.path.join(d, 'S%02i%s' % (prio, name)))
err, results = self.run_generator()
self.assertEqual(sorted(results), ['consumer.service', 'provider.service'])
self.assertFalse(results['provider.service'].has_option('Unit', 'After'))
self.assertEqual(results['consumer.service'].get('Unit', 'After'),
'provider.service')
def test_multiple_provides(self):
'''multiple Provides: names'''
self.add_sysv('foo', {'Provides': 'foo bar baz'})
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assertEqual(set(results['foo.service'].options('Unit')),
set(['Documentation', 'SourcePath', 'Description']))
# should create symlinks for the alternative names
for f in ['bar.service', 'baz.service']:
self.assertEqual(os.readlink(os.path.join(self.out_dir, f)),
'foo.service')
self.assertNotIn('Overwriting', err)
def test_same_provides_in_multiple_scripts(self):
'''multiple init.d scripts provide the same name'''
self.add_sysv('foo', {'Provides': 'foo common'}, enable=True, prio=1)
self.add_sysv('bar', {'Provides': 'bar common'}, enable=True, prio=2)
err, results = self.run_generator()
self.assertEqual(sorted(results), ['bar.service', 'foo.service'])
# should create symlink for the alternative name for either unit
self.assertIn(os.readlink(os.path.join(self.out_dir, 'common.service')),
['foo.service', 'bar.service'])
def test_provide_other_script(self):
'''init.d scripts provides the name of another init.d script'''
self.add_sysv('foo', {'Provides': 'foo bar'}, enable=True)
self.add_sysv('bar', {'Provides': 'bar'}, enable=True)
err, results = self.run_generator()
self.assertEqual(sorted(results), ['bar.service', 'foo.service'])
# we do expect an overwrite here, bar.service should overwrite the
# alias link from foo.service
self.assertIn('Overwriting', err)
def test_nonexecutable_script(self):
'''ignores non-executable init.d script'''
os.chmod(self.add_sysv('foo', {}), 0o644)
err, results = self.run_generator()
self.assertEqual(results, {})
def test_sh_suffix(self):
'''init.d script with .sh suffix'''
self.add_sysv('foo.sh', {}, enable=True)
err, results = self.run_generator()
s = results['foo.service']
self.assertEqual(s.sections(), ['Unit', 'Service'])
# should not have a .sh
self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')
# calls correct script with .sh
init_script = os.path.join(self.init_d_dir, 'foo.sh')
self.assertEqual(s.get('Service', 'ExecStart'),
'%s start' % init_script)
self.assertEqual(s.get('Service', 'ExecStop'),
'%s stop' % init_script)
self.assert_enabled('foo.service', ['multi-user', 'graphical'])
def test_sh_suffix_with_provides(self):
'''init.d script with .sh suffix and Provides:'''
self.add_sysv('foo.sh', {'Provides': 'foo bar'})
err, results = self.run_generator()
# ensure we don't try to create a symlink to itself
self.assertNotIn('itself', err)
self.assertEqual(list(results), ['foo.service'])
self.assertEqual(results['foo.service'].get('Unit', 'Description'),
'LSB: test foo service')
# should create symlink for the alternative name
self.assertEqual(os.readlink(os.path.join(self.out_dir, 'bar.service')),
'foo.service')
def test_hidden_files(self):
'''init.d script with hidden file suffix'''
script = self.add_sysv('foo', {}, enable=True)
# backup files (not enabled in rcN.d/)
shutil.copy(script, script + '.dpkg-new')
shutil.copy(script, script + '.dpkg-dist')
shutil.copy(script, script + '.swp')
shutil.copy(script, script + '.rpmsave')
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', ['multi-user', 'graphical'])
def test_backup_file(self):
'''init.d script with backup file'''
script = self.add_sysv('foo', {}, enable=True)
# backup files (not enabled in rcN.d/)
shutil.copy(script, script + '.bak')
shutil.copy(script, script + '.old')
err, results = self.run_generator()
print(err)
self.assertEqual(sorted(results),
['foo.bak.service', 'foo.old.service', 'foo.service'])
# ensure we don't try to create a symlink to itself
self.assertNotIn('itself', err)
self.assert_enabled('foo.service', ['multi-user', 'graphical'])
self.assert_enabled('foo.bak.service', [])
self.assert_enabled('foo.old.service', [])
def test_existing_native_unit(self):
'''existing native unit'''
with open(os.path.join(self.unit_dir, 'foo.service'), 'w') as f:
f.write('[Unit]\n')
self.add_sysv('foo.sh', {'Provides': 'foo bar'}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), [])
# no enablement or alias links, as native unit is disabled
self.assertEqual(os.listdir(self.out_dir), [])
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))
|
gpl-2.0
|
utamaro/youtube-dl
|
youtube_dl/extractor/cinchcast.py
|
177
|
1678
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unified_strdate,
xpath_text,
)
class CinchcastIE(InfoExtractor):
_VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
_TEST = {
# Actual test is run in generic, look for undergroundwellness
'url': 'http://player.cinchcast.com/?platformId=1&assetType=single&assetId=7141703',
'only_matching': True,
}
def _real_extract(self, url):
video_id = self._match_id(url)
doc = self._download_xml(
'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
video_id)
item = doc.find('.//item')
title = xpath_text(item, './title', fatal=True)
date_str = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}date')
upload_date = unified_strdate(date_str, day_first=False)
# duration is present but wrong
formats = [{
'format_id': 'main',
'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'],
}]
backup_url = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}backupContent')
if backup_url:
formats.append({
'preference': 2, # seems to be more reliable
'format_id': 'backup',
'url': backup_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'formats': formats,
}
|
unlicense
|
kekeadou/ycmd
|
ycmd/completers/all/identifier_completer.py
|
14
|
7991
|
#!/usr/bin/env python
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import ycm_core
from collections import defaultdict
from ycmd.completers.general_completer import GeneralCompleter
from ycmd import identifier_utils
from ycmd import utils
from ycmd.utils import ToUtf8IfNeeded
from ycmd import responses
SYNTAX_FILENAME = 'YCM_PLACEHOLDER_FOR_SYNTAX'
class IdentifierCompleter( GeneralCompleter ):
def __init__( self, user_options ):
super( IdentifierCompleter, self ).__init__( user_options )
self._completer = ycm_core.IdentifierCompleter()
self._tags_file_last_mtime = defaultdict( int )
self._logger = logging.getLogger( __name__ )
self._max_candidates = user_options[ 'max_num_identifier_candidates' ]
def ShouldUseNow( self, request_data ):
return self.QueryLengthAboveMinThreshold( request_data )
def ComputeCandidates( self, request_data ):
if not self.ShouldUseNow( request_data ):
return []
completions = self._completer.CandidatesForQueryAndType(
ToUtf8IfNeeded( utils.SanitizeQuery( request_data[ 'query' ] ) ),
ToUtf8IfNeeded( request_data[ 'filetypes' ][ 0 ] ) )
completions = completions[ : self._max_candidates ]
completions = _RemoveSmallCandidates(
completions, self.user_options[ 'min_num_identifier_candidate_chars' ] )
return [ responses.BuildCompletionData( x ) for x in completions ]
def AddIdentifier( self, identifier, request_data ):
try:
filetype = request_data[ 'filetypes' ][ 0 ]
except KeyError:
filetype = None
filepath = request_data[ 'filepath' ]
if not filetype or not filepath or not identifier:
return
vector = ycm_core.StringVector()
vector.append( ToUtf8IfNeeded( identifier ) )
self._logger.info( 'Adding ONE buffer identifier for file: %s', filepath )
self._completer.AddIdentifiersToDatabase( vector,
ToUtf8IfNeeded( filetype ),
ToUtf8IfNeeded( filepath ) )
def AddPreviousIdentifier( self, request_data ):
self.AddIdentifier(
_PreviousIdentifier(
self.user_options[ 'min_num_of_chars_for_completion' ],
request_data ),
request_data )
def AddIdentifierUnderCursor( self, request_data ):
cursor_identifier = _GetCursorIdentifier( request_data )
if not cursor_identifier:
return
self.AddIdentifier( cursor_identifier, request_data )
def AddBufferIdentifiers( self, request_data ):
try:
filetype = request_data[ 'filetypes' ][ 0 ]
except KeyError:
filetype = None
filepath = request_data[ 'filepath' ]
collect_from_comments_and_strings = bool( self.user_options[
'collect_identifiers_from_comments_and_strings' ] )
if not filetype or not filepath:
return
text = request_data[ 'file_data' ][ filepath ][ 'contents' ]
self._logger.info( 'Adding buffer identifiers for file: %s', filepath )
self._completer.ClearForFileAndAddIdentifiersToDatabase(
_IdentifiersFromBuffer( text,
filetype,
collect_from_comments_and_strings ),
ToUtf8IfNeeded( filetype ),
ToUtf8IfNeeded( filepath ) )
def AddIdentifiersFromTagFiles( self, tag_files ):
absolute_paths_to_tag_files = ycm_core.StringVector()
for tag_file in tag_files:
try:
current_mtime = os.path.getmtime( tag_file )
except:
continue
last_mtime = self._tags_file_last_mtime[ tag_file ]
# We don't want to repeatedly process the same file over and over; we only
# process if it's changed since the last time we looked at it
if current_mtime <= last_mtime:
continue
self._tags_file_last_mtime[ tag_file ] = current_mtime
absolute_paths_to_tag_files.append( ToUtf8IfNeeded( tag_file ) )
if not absolute_paths_to_tag_files:
return
self._completer.AddIdentifiersToDatabaseFromTagFiles(
absolute_paths_to_tag_files )
def AddIdentifiersFromSyntax( self, keyword_list, filetypes ):
keyword_vector = ycm_core.StringVector()
for keyword in keyword_list:
keyword_vector.append( ToUtf8IfNeeded( keyword ) )
filepath = SYNTAX_FILENAME + filetypes[ 0 ]
self._completer.AddIdentifiersToDatabase( keyword_vector,
ToUtf8IfNeeded( filetypes[ 0 ] ),
ToUtf8IfNeeded( filepath ) )
def OnFileReadyToParse( self, request_data ):
self.AddBufferIdentifiers( request_data )
if 'tag_files' in request_data:
self.AddIdentifiersFromTagFiles( request_data[ 'tag_files' ] )
if 'syntax_keywords' in request_data:
self.AddIdentifiersFromSyntax( request_data[ 'syntax_keywords' ],
request_data[ 'filetypes' ] )
def OnInsertLeave( self, request_data ):
self.AddIdentifierUnderCursor( request_data )
def OnCurrentIdentifierFinished( self, request_data ):
self.AddPreviousIdentifier( request_data )
# This looks for the previous identifier and returns it; this might mean looking
# at last identifier on the previous line if a new line has just been created.
def _PreviousIdentifier( min_num_candidate_size_chars, request_data ):
def PreviousIdentifierOnLine( line, column ):
nearest_ident = ''
for match in identifier_utils.IdentifierRegexForFiletype(
filetype ).finditer( line ):
if match.end() <= column:
nearest_ident = match.group()
return nearest_ident
line_num = request_data[ 'line_num' ] - 1
column_num = request_data[ 'column_num' ] - 1
filepath = request_data[ 'filepath' ]
try:
filetype = request_data[ 'filetypes' ][ 0 ]
except KeyError:
filetype = None
contents_per_line = (
request_data[ 'file_data' ][ filepath ][ 'contents' ].split( '\n' ) )
ident = PreviousIdentifierOnLine( contents_per_line[ line_num ], column_num )
if ident:
if len( ident ) < min_num_candidate_size_chars:
return ''
return ident
prev_line = contents_per_line[ line_num - 1 ]
ident = PreviousIdentifierOnLine( prev_line, len( prev_line ) )
if len( ident ) < min_num_candidate_size_chars:
return ''
return ident
def _RemoveSmallCandidates( candidates, min_num_candidate_size_chars ):
if min_num_candidate_size_chars == 0:
return candidates
return [ x for x in candidates if len( x ) >= min_num_candidate_size_chars ]
def _GetCursorIdentifier( request_data ):
try:
filetype = request_data[ 'filetypes' ][ 0 ]
except KeyError:
filetype = None
return identifier_utils.IdentifierAtIndex( request_data[ 'line_value' ],
request_data[ 'column_num' ] - 1,
filetype )
def _IdentifiersFromBuffer( text,
filetype,
collect_from_comments_and_strings ):
if not collect_from_comments_and_strings:
text = identifier_utils.RemoveIdentifierFreeText( text )
idents = identifier_utils.ExtractIdentifiersFromText( text, filetype )
vector = ycm_core.StringVector()
for ident in idents:
vector.append( ToUtf8IfNeeded( ident ) )
return vector
|
gpl-3.0
|
iamchenchen/study-appium
|
python-client/test/functional/ios/multi_action_tests.py
|
1
|
1635
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from time import sleep
from appium import webdriver
import desired_capabilities
class MultiActionTests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('TestApp.app.zip')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
# this test does not assert anything.
# it has to be watched in order to see if it works
def test_driver_pinch_zoom(self):
els = self.driver.find_elements_by_class_name('UIAButton')
els[5].click()
sleep(1)
el = self.driver.find_element_by_name('OK')
el.click()
sleep(1)
el = self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAMapView[1]')
self.driver.zoom(el)
sleep(5)
self.driver.pinch(el)
sleep(5)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(MultiActionTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
|
drpngx/tensorflow
|
tensorflow/python/ops/string_ops.py
|
13
|
7342
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors.
See the @{$python/string_ops} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Expose regex_full_match in strings namespace
tf_export("strings.regex_full_match")(regex_full_match)
@tf_export("string_split")
def string_split(source, delimiter=" ", skip_empty=True): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `delimiter` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split(
source, delimiter=delimiter, skip_empty=skip_empty)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
@tf_export("strings.split")
def string_split_v2(source, sep=None, maxsplit=-1):
"""Split elements of `source` based on `sep` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `sep` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
then the output will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the startor end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
if sep is None:
sep = ''
sep = ops.convert_to_tensor(sep, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split_v2(
source, sep=sep, maxsplit=maxsplit)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
@tf_export("reduce_join")
def reduce_join(inputs, axis=None,
keep_dims=False,
separator="",
name=None,
reduction_indices=None):
inputs_t = ops.convert_to_tensor(inputs)
reduction_indices = _reduce_join_reduction_dims(
inputs_t, axis, reduction_indices)
return gen_string_ops.reduce_join(
inputs=inputs_t,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator,
name=name)
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
ops.NotDifferentiable("RegexReplace")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
|
apache-2.0
|
Medigate/cutiuta-server
|
cutiuta-server/env/lib/python3.4/site-packages/coreapi/compat.py
|
1
|
1367
|
# coding: utf-8
import base64
__all__ = [
'urlparse', 'string_types',
'COMPACT_SEPARATORS', 'VERBOSE_SEPARATORS'
]
try:
# Python 2
import urlparse
string_types = (basestring,)
text_type = unicode
COMPACT_SEPARATORS = (b',', b':')
VERBOSE_SEPARATORS = (b',', b': ')
def is_file(obj):
return isinstance(obj, file)
def b64encode(input_string):
# Provide a consistently-as-unicode interface across 2.x and 3.x
return base64.b64encode(input_string)
except ImportError:
# Python 3
import urllib.parse as urlparse
from io import IOBase
string_types = (str,)
text_type = str
COMPACT_SEPARATORS = (',', ':')
VERBOSE_SEPARATORS = (',', ': ')
def is_file(obj):
return isinstance(obj, IOBase)
def b64encode(input_string):
# Provide a consistently-as-unicode interface across 2.x and 3.x
return base64.b64encode(input_string.encode('ascii')).decode('ascii')
def force_bytes(string):
if isinstance(string, string_types):
return string.encode('utf-8')
return string
def force_text(string):
if not isinstance(string, string_types):
return string.decode('utf-8')
return string
try:
import click
console_style = click.style
except ImportError:
def console_style(text, **kwargs):
return text
|
gpl-3.0
|
trozet/tacker
|
tacker/common/driver_manager.py
|
3
|
2848
|
# Copyright 2013, 2014 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
import stevedore.named
LOG = logging.getLogger(__name__)
class DriverManager(object):
def __init__(self, namespace, driver_list, **kwargs):
super(DriverManager, self).__init__()
manager = stevedore.named.NamedExtensionManager(
namespace, driver_list, invoke_on_load=True, **kwargs)
drivers = {}
for ext in manager:
type_ = ext.obj.get_type()
if type_ in drivers:
msg = _("driver '%(new_driver)s' ignored because "
"driver '%(old_driver)s' is already "
"registered for driver '%(type)s'") % {
'new_driver': ext.name,
'old_driver': drivers[type].name,
'type': type_}
LOG.error(msg)
raise SystemExit(msg)
drivers[type_] = ext
self._drivers = dict((type_, ext.obj)
for (type_, ext) in drivers.items())
LOG.info(_("Registered drivers from %(namespace)s: %(keys)s"),
{'namespace': namespace, 'keys': self._drivers.keys()})
@staticmethod
def _driver_name(driver):
return driver.__module__ + '.' + driver.__class__.__name__
def register(self, type_, driver):
if type_ in self._drivers:
new_driver = self._driver_name(driver)
old_driver = self._driver_name(self._drivers[type_])
msg = _("can't load driver '%(new_driver)s' because "
"driver '%(old_driver)s' is already "
"registered for driver '%(type)s'") % {
'new_driver': new_driver,
'old_driver': old_driver,
'type': type_}
LOG.error(msg)
raise SystemExit(msg)
self._drivers[type_] = driver
def invoke(self, type_, method_name, **kwargs):
driver = self._drivers[type_]
return getattr(driver, method_name)(**kwargs)
def __getitem__(self, type_):
return self._drivers[type_]
def __contains__(self, type_):
return type_ in self._drivers
|
apache-2.0
|
calmofthestorm/aenea
|
server/linux_wayland/evdevImpl.py
|
1
|
7210
|
import evdev
import logging
import subprocess
import time
from server.core import AbstractAeneaPlatformRpcs
from qwerty import Qwerty
from azerty import Azerty
mappings = { "qwerty" : Qwerty(),
"azerty" : Azerty(),
}
special = { "enter" : evdev.ecodes.KEY_ENTER,
"tab" : evdev.ecodes.KEY_TAB,
"alt" : evdev.ecodes.KEY_LEFTALT,
"win" : evdev.ecodes.KEY_LEFTMETA,
"super" : evdev.ecodes.KEY_LEFTMETA,
"shift" : evdev.ecodes.KEY_LEFTSHIFT,
"control" : evdev.ecodes.KEY_LEFTCTRL,
"space" : " ",
"plus" : "+",
"minus" : "-",
"backspace" : evdev.ecodes.KEY_BACKSPACE,
"del" : evdev.ecodes.KEY_DELETE,
"lbrace" : "{",
"rbrace" : "}",
"left" : evdev.ecodes.KEY_LEFT,
"right" : evdev.ecodes.KEY_RIGHT,
"up" : evdev.ecodes.KEY_UP,
"down" : evdev.ecodes.KEY_DOWN,
"lparen" : "(",
"rparen" : ")",
"lbracket" : "[",
"rbracket" : "]",
"colon" : ":",
"comma" : ",",
"semicolon" : ";",
"dot" : ".",
"slash" : "/",
"hash" : "#",
"percent" : "%",
"asterisk" : "*",
"dollar" : "$",
"backslash" : "\\",
"apostrophe" : "'",
"dquote" : "\"",
"rangle" : ">",
"langle" : "<",
"equal" : "=",
"exclamation" : "!",
"question" : "?",
"bar" : "|",
"underscore" : "_",
"ampersand" : "&",
"at" : "@",
"f1" : evdev.ecodes.KEY_F1,
"f2" : evdev.ecodes.KEY_F2,
"f3" : evdev.ecodes.KEY_F3,
"f4" : evdev.ecodes.KEY_F4,
"f5" : evdev.ecodes.KEY_F5,
"f6" : evdev.ecodes.KEY_F6,
"f7" : evdev.ecodes.KEY_F7,
"f8" : evdev.ecodes.KEY_F8,
"f9" : evdev.ecodes.KEY_F9,
"f10" : evdev.ecodes.KEY_F10,
"f11" : evdev.ecodes.KEY_F11,
"f12" : evdev.ecodes.KEY_F12,
}
fixed = { "\n" : [evdev.ecodes.KEY_ENTER],
" " : [evdev.ecodes.KEY_SPACE],
"\t" : [evdev.ecodes.KEY_TAB],
}
_SERVER_INFO = {
"window_manager": "sway",
"operating_system": "linux",
"platform": "linux",
"display": "Wayland",
"server": "aenea_reference",
"server_version": 1
}
buttons = { "right" : evdev.ecodes.BTN_RIGHT,
"left" : evdev.ecodes.BTN_LEFT,
"middle" : evdev.ecodes.BTN_MIDDLE,
}
class EvdevPlatformRpcs(AbstractAeneaPlatformRpcs):
def __init__(self, config, mapping, keyEvent, mouseEvent):
super(EvdevPlatformRpcs, self).__init__(logger=logging.getLogger("aenea.XdotoolPlatformRpcs"))
self.mapping = mappings.get(mapping, "qwerty")
key = evdev.InputDevice(keyEvent)
mouse = evdev.InputDevice(mouseEvent)
self.ui = evdev.UInput.from_device(key, mouse)
def server_info(self):
return _SERVER_INFO
def get_context(self):
self.logger.info("get_context Not implemented yet")
return {}
def key_press(self,
key=None,
modifiers=(),
direction="press",
count=1,
count_delay=None):
"""press a key possibly modified by modifiers. direction may be
'press', 'down', or 'up'. modifiers may contain 'alt', 'shift',
'control', 'super'. this X11 server also supports 'hyper',
'meta', and 'flag' (same as super). count is number of times to
press it. count_delay delay in ms between presses."""
assert key is not None
delay_millis = 0 if count_delay is None or count == 1 else count_delay
modifiers = [special.get(mod) for mod in modifiers]
key = special.get(key, key) #convert to usable str or to a key code
if type(key) is str: #need to convert to key codes
keys = fixed.get(key)
if keys is None: #not a fixed
keys = self.mapping.solo().get(key)
if keys is None: #basic key
keys = [evdev.ecodes.ecodes["KEY_" + key.upper()]]
else:
keys = [key]
for _ in range(0, count):
#modifiers down:
for m in modifiers:
self.ui.write(evdev.ecodes.EV_KEY, m, 1)
#key:
if direction == "press" or direction == "down":
for k in keys:
self.ui.write(evdev.ecodes.EV_KEY, k, 1)
if direction == "press" or direction == "up":
for k in keys:
self.ui.write(evdev.ecodes.EV_KEY, k, 0)
#modifiers up:
for m in modifiers:
self.ui.write(evdev.ecodes.EV_KEY, m, 0)
self.ui.syn()
time.sleep(delay_millis / 1000.0)
def write_text(self, text):
for letter in text:
#check if letter need more than 1 key
seq = self.mapping.multi().get(letter)
if seq is not None:
for k in seq:
self.ui.write(evdev.ecodes.EV_KEY, k[0], k[1])
else:
#"standard" letter
seq = fixed.get(letter)
if seq is None:
seq = self.mapping.solo().get(letter)
if seq is not None:
#fixed key or solo.
for k in seq:
#keys down:
self.ui.write(evdev.ecodes.EV_KEY, k, 1)
for k in reversed(seq):
#keys up:
self.ui.write(evdev.ecodes.EV_KEY, k, 0)
else:
# standard key:
if letter.isupper():
#Press shift to have upper letter
self.ui.write(evdev.ecodes.EV_KEY,
evdev.ecodes.KEY_LEFTSHIFT,
1)
k = evdev.ecodes.ecodes["KEY_" + letter.upper()]
#press key
self.ui.write(evdev.ecodes.EV_KEY,k, 1)
#release key
self.ui.write(evdev.ecodes.EV_KEY,k, 0)
if letter.isupper():
# shift up
self.ui.write(evdev.ecodes.EV_KEY,
evdev.ecodes.KEY_LEFTSHIFT,
0)
self.ui.syn()
#if no pause, some events are lost, I don't know why
time.sleep(0.000001)
def click_mouse(self, button, direction="click", count=1, count_delay=None):
delay_millis = 0 if count_delay is None or count == 1 else count_delay
print("click mouse " + button + " " + direction)
for _ in range(0, count):
b = buttons.get(button)
if button == "wheeldown":
self.ui.write(evdev.ecodes.EV_REL,
evdev.ecodes.REL_WHEEL,
-1)
self.ui.syn()
elif button == "wheelup":
print("wheelup")
self.ui.write(evdev.ecodes.EV_REL,
evdev.ecodes.REL_WHEEL,
1)
self.ui.syn()
else:
if direction == "click" or direction == "down":
self.ui.write(evdev.ecodes.EV_KEY,
b,
1)
self.ui.syn()
if direction == "click" or direction == "up":
self.ui.write(evdev.ecodes.EV_KEY,
b,
0)
self.ui.syn()
time.sleep(delay_millis / 1000.0)
def move_mouse(self,
x, y,
reference="absolute",
proportional=False,
phantom=None):
self.logger.info("move_mouse Not implemented yet")
def pause(self, amount):
time.sleep(amount / 1000.)
def notify(self, message):
try:
subprocess.Popen(["notify-send", message])
except Exception as e:
self.logger.warn("failed to start notify-send process: %s" % e)
|
lgpl-3.0
|
jmartinezchaine/OpenERP
|
openerp/addons/fetchmail_hr_recruitment/__init__.py
|
36
|
1069
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP S.A. (<http://www.openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
hn8841182/W11
|
static/Brython3.1.1-20150328-091302/Lib/sre_constants.py
|
692
|
7172
|
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
#MAXREPEAT = 2147483648
#from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
|
gpl-3.0
|
kimiyoung/transformer-xl
|
tf/vocabulary.py
|
1
|
5149
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter, OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.gfile import Open as open
from tensorflow.gfile import Exists as exists
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert exists(path)
sents = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert exists(path)
encoded = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_nparray(symbols))
if ordered:
encoded = np.concatenate(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_nparray(symbols))
if ordered:
encoded = np.concatenate(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_nparray(self, symbols):
nparray = np.array(self.get_indices(symbols), dtype=np.int64)
return nparray
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
|
apache-2.0
|
fishcorn/pylearn2
|
pylearn2/scripts/plot_monitor.py
|
37
|
10204
|
#!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
|
bsd-3-clause
|
telwertowski/Books-Mac-OS-X
|
Export Plugins/WhatsOnMyBookShelf Exporter/SOAPpy/SOAPBuilder.py
|
8
|
22127
|
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py,v 1.27 2005/02/21 20:24:13 warnes Exp $'
from version import __version__
import cgi
import copy
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
elif pythonHasBooleanType and type(obj) == BooleanType:
obj_type = "boolean"
else:
obj_type = type(obj).__name__
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
else:
meth(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(None, "double", obj, tag, typed, ns_map,
self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
for i in data:
self.dump(i, elemsname, not same_type, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_instance.", "obj=", obj, "tag=", tag
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring))
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
|
mit
|
xiaoyaozi5566/DiamondCache
|
ext/ply/test/yacc_unused_rule.py
|
174
|
1596
|
# -----------------------------------------------------------------------------
# yacc_unused_rule.py
#
# Grammar with an unused rule
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_integer(t):
'integer : NUMBER'
t[0] = t[1]
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
bsd-3-clause
|
lrowe/rdflib
|
rdflib/plugins/parsers/pyRdfa/__init__.py
|
8
|
47186
|
# -*- coding: utf-8 -*-
"""
RDFa 1.1 parser, also referred to as a “RDFa Distiller”. It is
deployed, via a CGI front-end, on the U{W3C RDFa 1.1 Distiller page<http://www.w3.org/2012/pyRdfa/>}.
For details on RDFa, the reader should consult the U{RDFa Core 1.1<http://www.w3.org/TR/rdfa-core/>}, U{XHTML+RDFa1.1<http://www.w3.org/TR/2010/xhtml-rdfa>}, and the U{RDFa 1.1 Lite<http://www.w3.org/TR/rdfa-lite/>} documents.
The U{RDFa 1.1 Primer<http://www.w3.org/TR/owl2-primer/>} may also prove helpful.
This package can also be downloaded U{from GitHub<https://github.com/RDFLib/pyrdfa3>}. The
distribution also includes the CGI front-end and a separate utility script to be run locally.
Note that this package is an updated version of a U{previous RDFa distiller<http://www.w3.org/2007/08/pyRdfa>} that was developed
for RDFa 1.0. Although it reuses large portions of that code, it has been quite thoroughly rewritten, hence put in a completely
different project. (The version numbering has been continued, though, to avoid any kind of misunderstandings. This version has version numbers "3.0.0" or higher.)
(Simple) Usage
==============
From a Python file, expecting a Turtle output::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename')
Other output formats are also possible. E.g., to produce RDF/XML output, one could use::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename', outputFormat='pretty-xml')
It is also possible to embed an RDFa processing. Eg, using::
from pyRdfa import pyRdfa
graph = pyRdfa().graph_from_source('filename')
returns an RDFLib.Graph object instead of a serialization thereof. See the the description of the
L{pyRdfa class<pyRdfa.pyRdfa>} for further possible entry points details.
There is also, as part of this module, a L{separate entry for CGI calls<processURI>}.
Return (serialization) formats
------------------------------
The package relies on RDFLib. By default, it relies therefore on the serializers coming with the local RDFLib distribution. However, there has been some issues with serializers of older RDFLib releases; also, some output formats, like JSON-LD, are not (yet) part of the standard RDFLib distribution. A companion package, called pyRdfaExtras, is part of the download, and it includes some of those extra serializers. The extra format (not part of the RDFLib core) is U{JSON-LD<http://json-ld.org/spec/latest/json-ld-syntax/>}, whose 'key' is 'json', when used in the 'parse' method of an RDFLib graph.
Options
=======
The package also implements some optional features that are not part of the RDFa recommendations. At the moment these are:
- possibility for plain literals to be normalized in terms of white spaces. Default: false. (The RDFa specification requires keeping the white spaces and leave applications to normalize them, if needed)
- inclusion of embedded RDF: Turtle content may be enclosed in a C{script} element and typed as C{text/turtle}, U{defined by the RDF Working Group<http://www.w3.org/TR/turtle/>}. Alternatively, some XML dialects (e.g., SVG) allows the usage of RDF/XML as part of their core content to define metadata in RDF. For both of these cases pyRdfa parses these serialized RDF content and adds the resulting triples to the output Graph. Default: true.
- extra, built-in transformers are executed on the DOM tree prior to RDFa processing (see below). These transformers can be provided by the end user.
Options are collected in an instance of the L{Options} class and may be passed to the processing functions as an extra argument. E.g., to allow the inclusion of embedded content::
from pyRdfa.options import Options
options = Options(embedded_rdf=True)
print pyRdfa(options=options).rdf_from_source('filename')
See the description of the L{Options} class for the details.
Host Languages
==============
RDFa 1.1. Core is defined for generic XML; there are specific documents to describe how the generic specification is applied to
XHTML and HTML5.
pyRdfa makes an automatic switch among these based on the content type of the source as returned by an HTTP request. The following are the
possible host languages:
- if the content type is C{text/html}, the content is HTML5
- if the content type is C{application/xhtml+xml} I{and} the right DTD is used, the content is XHTML1
- if the content type is C{application/xhtml+xml} and no or an unknown DTD is used, the content is XHTML5
- if the content type is C{application/svg+xml}, the content type is SVG
- if the content type is C{application/atom+xml}, the content type is SVG
- if the content type is C{application/xml} or C{application/xxx+xml} (but 'xxx' is not 'atom' or 'svg'), the content type is XML
If local files are used, pyRdfa makes a guess on the content type based on the file name suffix: C{.html} is for HTML5, C{.xhtml} for XHTML1, C{.svg} for SVG, anything else is considered to be general XML. Finally, the content type may be set by the caller when initializing the L{pyRdfa class<pyRdfa.pyRdfa>}.
Beyond the differences described in the RDFa specification, the main difference is the parser used to parse the source. In the case of HTML5, pyRdfa uses an U{HTML5 parser<http://code.google.com/p/html5lib/>}; for all other cases the simple XML parser, part of the core Python environment, is used. This may be significant in the case of erronuous sources: indeed, the HTML5 parser may do adjustments on
the DOM tree before handing it over to the distiller. Furthermore, SVG is also recognized as a type that allows embedded RDF in the form of RDF/XML.
See the variables in the L{host} module if a new host language is added to the system. The current host language information is available for transformers via the option argument, too, and can be used to control the effect of the transformer.
Vocabularies
============
RDFa 1.1 has the notion of vocabulary files (using the C{@vocab} attribute) that may be used to expand the generated RDF graph. Expansion is based on some very simply RDF Schema and OWL statements on sub-properties and sub-classes, and equivalences.
pyRdfa implements this feature, although it does not do this by default. The extra C{vocab_expansion} parameter should be used for this extra step, for example::
from pyRdfa.options import Options
options = Options(vocab_expansion=True)
print pyRdfa(options=options).rdf_from_source('filename')
The triples in the vocabulary files themselves (i.e., the small ontology in RDF Schema and OWL) are removed from the result, leaving the inferred property and type relationships only (additionally to the “core” RDF content).
Vocabulary caching
------------------
By default, pyRdfa uses a caching mechanism instead of fetching the vocabulary files each time their URI is met as a C{@vocab} attribute value. (This behavior can be switched off setting the C{vocab_cache} option to false.)
Caching happens in a file system directory. The directory itself is determined by the platform the tool is used on, namely:
- On Windows, it is the C{pyRdfa-cache} subdirectory of the C{%APPDATA%} environment variable
- On MacOS, it is the C{~/Library/Application Support/pyRdfa-cache}
- Otherwise, it is the C{~/.pyRdfa-cache}
This automatic choice can be overridden by the C{PyRdfaCacheDir} environment variable.
Caching can be set to be read-only, i.e., the setup might generate the cache files off-line instead of letting the tool writing its own cache when operating, e.g., as a service on the Web. This can be achieved by making the cache directory read only.
If the directories are neither readable nor writable, the vocabulary files are retrieved via HTTP every time they are hit. This may slow down processing, it is advised to avoid such a setup for the package.
The cache includes a separate index file and a file for each vocabulary file. Cache control is based upon the C{EXPIRES} header of a vocabulary file’s HTTP return header: when first seen, this data is stored in the index file and controls whether the cache has to be renewed or not. If the HTTP return header does not have this entry, the date is artificially set ot the current date plus one day.
(The cache files themselves are dumped and loaded using U{Python’s built in cPickle package<http://docs.python.org/release/2.7/library/pickle.html#module-cPickle>}. These are binary files. Care should be taken if they are managed by CVS: they must be declared as binary files when adding them to the repository.)
RDFa 1.1 vs. RDFa 1.0
=====================
Unfortunately, RDFa 1.1 is I{not} fully backward compatible with RDFa 1.0, meaning that, in a few cases, the triples generated from an RDFa 1.1 source are not the same as for RDFa 1.0. (See the separate U{section in the RDFa 1.1 specification<http://www.w3.org/TR/rdfa-core/#major-differences-with-rdfa-syntax-1.0>} for some further details.)
This distiller’s default behavior is RDFa 1.1. However, if the source includes, in the top element of the file (e.g., the C{html} element) a C{@version} attribute whose value contains the C{RDFa 1.0} string, then the distiller switches to a RDFa 1.0 mode. (Although the C{@version} attribute is not required in RDFa 1.0, it is fairly commonly used.) Similarly, if the RDFa 1.0 DTD is used in the XHTML source, it will be taken into account (a very frequent setup is that an XHTML file is defined with that DTD and is served as text/html; pyRdfa will consider that file as XHTML5, i.e., parse it with the HTML5 parser, but interpret the RDFa attributes under the RDFa 1.0 rules).
Transformers
============
The package uses the concept of 'transformers': the parsed DOM tree is possibly
transformed I{before} performing the real RDFa processing. This transformer structure makes it possible to
add additional 'services' without distoring the core code of RDFa processing.
A transformer is a function with three arguments:
- C{node}: a DOM node for the top level element of the DOM tree
- C{options}: the current L{Options} instance
- C{state}: the current L{ExecutionContext} instance, corresponding to the top level DOM Tree element
The function may perform any type of change on the DOM tree; the typical behaviour is to add or remove attributes on specific elements. Some transformations are included in the package and can be used as examples; see the L{transform} module of the distribution. These are:
- The C{@name} attribute of the C{meta} element is copied into a C{@property} attribute of the same element
- Interpreting the 'openid' references in the header. See L{transform.OpenID} for further details.
- Implementing the Dublin Core dialect to include DC statements from the header. See L{transform.DublinCore} for further details.
The user of the package may refer add these transformers to L{Options} instance. Here is a possible usage with the “openid” transformer added to the call::
from pyRdfa.options import Options
from pyRdfa.transform.OpenID import OpenID_transform
options = Options(transformers=[OpenID_transform])
print pyRdfa(options=options).rdf_from_source('filename')
@summary: RDFa parser (distiller)
@requires: Python version 2.5 or up; 2.7 is preferred
@requires: U{RDFLib<http://rdflib.net>}; version 3.X is preferred.
@requires: U{html5lib<http://code.google.com/p/html5lib/>} for the HTML5 parsing.
@requires: U{httpheader<http://deron.meranda.us/python/httpheader/>}; however, a small modification had to make on the original file, so for this reason and to make distribution easier this module (single file) is added to the package.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@copyright: W3C
@var builtInTransformers: List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
@var CACHE_DIR_VAR: Environment variable used to define cache directories for RDFa vocabularies in case the default setting does not work or is not appropriate.
@var rdfa_current_version: Current "official" version of RDFa that this package implements by default. This can be changed at the invocation of the package
@var uri_schemes: List of registered (or widely used) URI schemes; used for warnings...
"""
"""
$Id: __init__.py,v 1.91 2013-10-16 11:48:54 ivan Exp $
"""
__version__ = "3.4.3"
__author__ = 'Ivan Herman'
__contact__ = 'Ivan Herman, [email protected]'
__license__ = 'W3C® SOFTWARE NOTICE AND LICENSE, http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231'
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3 :
from io import StringIO
else :
from StringIO import StringIO
import os
import xml.dom.minidom
if PY3 :
from urllib.parse import urlparse
else :
from urlparse import urlparse
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
import logging
logger = logging.getLogger(__name__)
# Namespace, in the RDFLib sense, for the rdfa vocabulary
ns_rdfa = Namespace("http://www.w3.org/ns/rdfa#")
from .extras.httpheader import acceptable_content_type, content_type
from .transform.prototype import handle_prototypes
# Vocabulary terms for vocab reporting
RDFA_VOCAB = ns_rdfa["usesVocabulary"]
# Namespace, in the RDFLib sense, for the XSD Datatypes
ns_xsd = Namespace('http://www.w3.org/2001/XMLSchema#')
# Namespace, in the RDFLib sense, for the distiller vocabulary, used as part of the processor graph
ns_distill = Namespace("http://www.w3.org/2007/08/pyRdfa/vocab#")
debug = False
#########################################################################################################
# Exception/error handling. Essentially, all the different exceptions are re-packaged into
# separate exception class, to allow for an easier management on the user level
class RDFaError(Exception) :
"""Superclass exceptions representing error conditions defined by the RDFa 1.1 specification.
It does not add any new functionality to the
Exception class."""
def __init__(self, msg) :
self.msg = msg
Exception.__init__(self)
class FailedSource(RDFaError) :
"""Raised when the original source cannot be accessed. It does not add any new functionality to the
Exception class."""
def __init__(self, msg, http_code = None) :
self.msg = msg
self.http_code = http_code
RDFaError.__init__(self, msg)
class HTTPError(RDFaError) :
"""Raised when HTTP problems are detected. It does not add any new functionality to the
Exception class."""
def __init__(self, http_msg, http_code) :
self.msg = http_msg
self.http_code = http_code
RDFaError.__init__(self,http_msg)
class ProcessingError(RDFaError) :
"""Error found during processing. It does not add any new functionality to the
Exception class."""
pass
class pyRdfaError(Exception) :
"""Superclass exceptions representing error conditions outside the RDFa 1.1 specification."""
pass
# Error and Warning RDFS classes
RDFA_Error = ns_rdfa["Error"]
RDFA_Warning = ns_rdfa["Warning"]
RDFA_Info = ns_rdfa["Information"]
NonConformantMarkup = ns_rdfa["DocumentError"]
UnresolvablePrefix = ns_rdfa["UnresolvedCURIE"]
UnresolvableReference = ns_rdfa["UnresolvedCURIE"]
UnresolvableTerm = ns_rdfa["UnresolvedTerm"]
VocabReferenceError = ns_rdfa["VocabReferenceError"]
PrefixRedefinitionWarning = ns_rdfa["PrefixRedefinition"]
FileReferenceError = ns_distill["FileReferenceError"]
HTError = ns_distill["HTTPError"]
IncorrectPrefixDefinition = ns_distill["IncorrectPrefixDefinition"]
IncorrectBlankNodeUsage = ns_distill["IncorrectBlankNodeUsage"]
IncorrectLiteral = ns_distill["IncorrectLiteral"]
# Error message texts
err_no_blank_node = "Blank node in %s position is not allowed; ignored"
err_redefining_URI_as_prefix = "'%s' a registered or an otherwise used URI scheme, but is defined as a prefix here; is this a mistake? (see, eg, http://en.wikipedia.org/wiki/URI_scheme or http://www.iana.org/assignments/uri-schemes.html for further information for most of the URI schemes)"
err_xmlns_deprecated = "The usage of 'xmlns' for prefix definition is deprecated; please use the 'prefix' attribute instead (definition for '%s')"
err_bnode_local_prefix = "The '_' local CURIE prefix is reserved for blank nodes, and cannot be defined as a prefix"
err_col_local_prefix = "The character ':' is not valid in a CURIE Prefix, and cannot be used in a prefix definition (definition for '%s')"
err_missing_URI_prefix = "Missing URI in prefix declaration for '%s' (in '%s')"
err_invalid_prefix = "Invalid prefix declaration '%s' (in '%s')"
err_no_default_prefix = "Default prefix cannot be changed (in '%s')"
err_prefix_and_xmlns = "@prefix setting for '%s' overrides the 'xmlns:%s' setting; may be a source of problem if same file is run through RDFa 1.0"
err_non_ncname_prefix = "Non NCNAME '%s' in prefix definition (in '%s'); ignored"
err_absolute_reference = "CURIE Reference part contains an authority part: %s (in '%s'); ignored"
err_query_reference = "CURIE Reference query part contains an unauthorized character: %s (in '%s'); ignored"
err_fragment_reference = "CURIE Reference fragment part contains an unauthorized character: %s (in '%s'); ignored"
err_lang = "There is a problem with language setting; either both xml:lang and lang used on an element with different values, or, for (X)HTML5, only xml:lang is used."
err_URI_scheme = "Unusual URI scheme used in <%s>; may that be a mistake, e.g., resulting from using an undefined CURIE prefix or an incorrect CURIE?"
err_illegal_safe_CURIE = "Illegal safe CURIE: %s; ignored"
err_no_CURIE_in_safe_CURIE = "Safe CURIE is used, but the value does not correspond to a defined CURIE: [%s]; ignored"
err_undefined_terms = "'%s' is used as a term, but has not been defined as such; ignored"
err_non_legal_CURIE_ref = "Relative URI is not allowed in this position (or not a legal CURIE reference) '%s'; ignored"
err_undefined_CURIE = "Undefined CURIE: '%s'; ignored"
err_prefix_redefinition = "Prefix '%s' (defined in the initial RDFa context or in an ancestor) is redefined"
err_unusual_char_in_URI = "Unusual character in uri: %s; possible error?"
#############################################################################################
from .state import ExecutionContext
from .parse import parse_one_node
from .options import Options
from .transform import top_about, empty_safe_curie, vocab_for_role
from .utils import URIOpener
from .host import HostLanguage, MediaTypes, preferred_suffixes, content_to_host_language
# Environment variable used to characterize cache directories for RDFa vocabulary files.
CACHE_DIR_VAR = "PyRdfaCacheDir"
# current "official" version of RDFa that this package implements. This can be changed at the invocation of the package
rdfa_current_version = "1.1"
# I removed schemes that would not appear as a prefix anyway, like iris.beep
# http://en.wikipedia.org/wiki/URI_scheme seems to be a good source of information
# as well as http://www.iana.org/assignments/uri-schemes.html
# There are some overlaps here, but better more than not enough...
# This comes from wikipedia
registered_iana_schemes = [
"aaa","aaas","acap","cap","cid","crid","data","dav","dict","dns","fax","file", "ftp","geo","go",
"gopher","h323","http","https","iax","icap","im","imap","info","ipp","iris","ldap", "lsid",
"mailto","mid","modem","msrp","msrps", "mtqp", "mupdate","news","nfs","nntp","opaquelocktoken",
"pop","pres", "prospero","rstp","rsync", "service","shttp","sieve","sip","sips", "sms", "snmp", "soap", "tag",
"tel","telnet", "tftp", "thismessage","tn3270","tip","tv","urn","vemmi","wais","ws", "wss", "xmpp"
]
# This comes from wikipedia, too
unofficial_common = [
"about", "adiumxtra", "aim", "apt", "afp", "aw", "bitcoin", "bolo", "callto", "chrome", "coap",
"content", "cvs", "doi", "ed2k", "facetime", "feed", "finger", "fish", "git", "gg",
"gizmoproject", "gtalk", "irc", "ircs", "irc6", "itms", "jar", "javascript",
"keyparc", "lastfm", "ldaps", "magnet", "maps", "market", "message", "mms",
"msnim", "mumble", "mvn", "notes", "palm", "paparazzi", "psync", "rmi",
"secondlife", "sgn", "skype", "spotify", "ssh", "sftp", "smb", "soldat",
"steam", "svn", "teamspeak", "things", "udb", "unreal", "ut2004",
"ventrillo", "view-source", "webcal", "wtai", "wyciwyg", "xfire", "xri", "ymsgr"
]
# These come from the IANA page
historical_iana_schemes = [
"fax", "mailserver", "modem", "pack", "prospero", "snews", "videotex", "wais"
]
provisional_iana_schemes = [
"afs", "dtn", "dvb", "icon", "ipn", "jms", "oid", "rsync", "ni"
]
other_used_schemes = [
"hdl", "isbn", "issn", "mstp", "rtmp", "rtspu", "stp"
]
uri_schemes = registered_iana_schemes + unofficial_common + historical_iana_schemes + provisional_iana_schemes + other_used_schemes
# List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
builtInTransformers = [
empty_safe_curie, top_about, vocab_for_role
]
#########################################################################################################
class pyRdfa :
"""Main processing class for the distiller
@ivar options: an instance of the L{Options} class
@ivar media_type: the preferred default media type, possibly set at initialization
@ivar base: the base value, possibly set at initialization
@ivar http_status: HTTP Status, to be returned when the package is used via a CGI entry. Initially set to 200, may be modified by exception handlers
"""
def __init__(self, options = None, base = "", media_type = "", rdfa_version = None) :
"""
@keyword options: Options for the distiller
@type options: L{Options}
@keyword base: URI for the default "base" value (usually the URI of the file to be processed)
@keyword media_type: explicit setting of the preferred media type (a.k.a. content type) of the the RDFa source
@keyword rdfa_version: the RDFa version that should be used. If not set, the value of the global L{rdfa_current_version} variable is used
"""
self.http_status = 200
self.base = base
if base == "" :
self.required_base = None
else :
self.required_base = base
self.charset = None
# predefined content type
self.media_type = media_type
if options == None :
self.options = Options()
else :
self.options = options
if media_type != "" :
self.options.set_host_language(self.media_type)
if rdfa_version is not None :
self.rdfa_version = rdfa_version
else :
self.rdfa_version = None
def _get_input(self, name) :
"""
Trying to guess whether "name" is a URI or a string (for a file); it then tries to open this source accordingly,
returning a file-like object. If name is none of these, it returns the input argument (that should
be, supposedly, a file-like object already).
If the media type has not been set explicitly at initialization of this instance,
the method also sets the media_type based on the HTTP GET response or the suffix of the file. See
L{host.preferred_suffixes} for the suffix to media type mapping.
@param name: identifier of the input source
@type name: string or a file-like object
@return: a file like object if opening "name" is possible and successful, "name" otherwise
"""
try :
# Python 2 branch
isstring = isinstance(name, basestring)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
if isstring :
# check if this is a URI, ie, if there is a valid 'scheme' part
# otherwise it is considered to be a simple file
if urlparse(name)[0] != "" :
url_request = URIOpener(name)
self.base = url_request.location
if self.media_type == "" :
if url_request.content_type in content_to_host_language :
self.media_type = url_request.content_type
else :
self.media_type = MediaTypes.xml
self.options.set_host_language(self.media_type)
self.charset = url_request.charset
if self.required_base == None :
self.required_base = name
return url_request.data
else :
# Creating a File URI for this thing
if self.required_base == None :
self.required_base = "file://" + os.path.join(os.getcwd(),name)
if self.media_type == "" :
self.media_type = MediaTypes.xml
# see if the default should be overwritten
for suffix in preferred_suffixes :
if name.endswith(suffix) :
self.media_type = preferred_suffixes[suffix]
self.charset = 'utf-8'
break
self.options.set_host_language(self.media_type)
return open(name, 'rb')
else :
return name
except HTTPError :
raise sys.exc_info()[1]
except :
(type, value, traceback) = sys.exc_info()
raise FailedSource(value)
####################################################################################################################
# Externally used methods
#
def graph_from_DOM(self, dom, graph = None, pgraph = None) :
"""
Extract the RDF Graph from a DOM tree. This is where the real processing happens. All other methods get down to this
one, eventually (e.g., after opening a URI and parsing it into a DOM).
@param dom: a DOM Node element, the top level entry node for the whole tree (i.e., the C{dom.documentElement} is used to initiate processing down the node hierarchy)
@keyword graph: an RDF Graph (if None, than a new one is created)
@type graph: rdflib Graph instance.
@keyword pgraph: an RDF Graph to hold (possibly) the processor graph content. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@type pgraph: rdflib Graph instance
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyGraph(tog, fromg) :
for t in fromg :
tog.add(t)
for k,ns in fromg.namespaces() :
tog.bind(k,ns)
if graph == None :
# Create the RDF Graph, that will contain the return triples...
graph = Graph()
# this will collect the content, the 'default graph', as called in the RDFa spec
default_graph = Graph()
# get the DOM tree
topElement = dom.documentElement
# Create the initial state. This takes care of things
# like base, top level namespace settings, etc.
state = ExecutionContext(topElement, default_graph, base=self.required_base if self.required_base != None else "", options=self.options, rdfa_version=self.rdfa_version)
# Perform the built-in and external transformations on the HTML tree.
logger.info(self.options)
for trans in self.options.transformers + builtInTransformers :
trans(topElement, self.options, state)
# This may have changed if the state setting detected an explicit version information:
self.rdfa_version = state.rdfa_version
# The top level subject starts with the current document; this
# is used by the recursion
# this function is the real workhorse
parse_one_node(topElement, default_graph, None, state, [])
# Massage the output graph in term of rdfa:Pattern and rdfa:copy
handle_prototypes(default_graph)
# If the RDFS expansion has to be made, here is the place...
if self.options.vocab_expansion :
from .rdfs.process import process_rdfa_sem
process_rdfa_sem(default_graph, self.options)
# Experimental feature: nothing for now, this is kept as a placeholder
if self.options.experimental_features :
pass
# What should be returned depends on the way the options have been set up
if self.options.output_default_graph :
copyGraph(graph, default_graph)
if self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
elif self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
# this is necessary if several DOM trees are handled in a row...
self.options.reset_processor_graph()
return graph
def graph_from_source(self, name, graph = None, rdfOutput = False, pgraph = None) :
"""
Extract an RDF graph from an RDFa source. The source is parsed, the RDF extracted, and the RDFa Graph is
returned. This is a front-end to the L{pyRdfa.graph_from_DOM} method.
@param name: a URI, a file name, or a file-like object
@param graph: rdflib Graph instance. If None, a new one is created.
@param pgraph: rdflib Graph instance for the processor graph. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@param rdfOutput: whether runtime exceptions should be turned into RDF and returned as part of the processor graph
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyErrors(tog, options) :
if tog == None :
tog = Graph()
if options.output_processor_graph :
for t in options.processor_graph.graph :
tog.add(t)
if pgraph != None : pgraph.add(t)
for k,ns in options.processor_graph.graph.namespaces() :
tog.bind(k,ns)
if pgraph != None : pgraph.bind(k,ns)
options.reset_processor_graph()
return tog
# Separating this for a forward Python 3 compatibility
try :
# Python 2 branch
isstring = isinstance(name, basestring)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
# First, open the source... Possible HTTP errors are returned as error triples
input = None
try :
input = self._get_input(name)
except FailedSource :
f = sys.exc_info()[1]
self.http_status = 400
if not rdfOutput : raise f
err = self.options.add_error(f.msg, FileReferenceError, name)
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
except HTTPError :
h = sys.exc_info()[1]
self.http_status = h.http_code
if not rdfOutput : raise h
err = self.options.add_error("HTTP Error: %s (%s)" % (h.http_code,h.msg), HTError, name)
self.options.processor_graph.add_http_context(err, h.http_code)
return copyErrors(graph, self.options)
except Exception :
e = sys.exc_info()[1]
self.http_status = 500
# Something nasty happened:-(
if not rdfOutput : raise e
err = self.options.add_error(str(e), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
dom = None
try :
msg = ""
parser = None
if self.options.host_language == HostLanguage.html5 :
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import html5lib
parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
if self.charset :
# This means the HTTP header has provided a charset, or the
# file is a local file when we suppose it to be a utf-8
dom = parser.parse(input, encoding=self.charset)
else :
# No charset set. The HTMLLib parser tries to sniff into the
# the file to find a meta header for the charset; if that
# works, fine, otherwise it falls back on window-...
dom = parser.parse(input)
try :
if isstring :
input.close()
input = self._get_input(name)
else :
input.seek(0)
from .host import adjust_html_version
self.rdfa_version = adjust_html_version(input, self.rdfa_version)
except :
# if anyting goes wrong, it is not really important; rdfa version stays what it was...
pass
else :
# in other cases an XML parser has to be used
from .host import adjust_xhtml_and_version
parse = xml.dom.minidom.parse
dom = parse(input)
(adjusted_host_language, version) = adjust_xhtml_and_version(dom, self.options.host_language, self.rdfa_version)
self.options.host_language = adjusted_host_language
self.rdfa_version = version
except ImportError :
msg = "HTML5 parser not available. Try installing html5lib <http://code.google.com/p/html5lib>"
raise ImportError(msg)
except Exception :
e = sys.exc_info()[1]
# These are various parsing exception. Per spec, this is a case when
# error triples MUST be returned, ie, the usage of rdfOutput (which switches between an HTML formatted
# return page or a graph with error triples) does not apply
err = self.options.add_error(str(e), context = name)
self.http_status = 400
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
# If we got here, we have a DOM tree to operate on...
return self.graph_from_DOM(dom, graph, pgraph)
except Exception :
# Something nasty happened during the generation of the graph...
(a,b,c) = sys.exc_info()
sys.excepthook(a,b,c)
if isinstance(b, ImportError) :
self.http_status = None
else :
self.http_status = 500
if not rdfOutput : raise b
err = self.options.add_error(str(b), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
# This is better because it gives access to the various, non-standard serializations
# If it does not work because the extra are not installed, fall back to the standard
# rdlib distribution...
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
# graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
retval = graph.serialize(format=outputFormat)
return retval
def rdf_from_source(self, name, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from an RDFa source and serialize it in one graph. The source is parsed, the RDF
extracted, and serialization is done in the specified format.
@param name: a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
return self.rdf_from_sources([name], outputFormat, rdfOutput)
################################################# CGI Entry point
def processURI(uri, outputFormat, form={}) :
"""The standard processing of an RDFa uri options in a form; used as an entry point from a CGI call.
The call accepts extra form options (i.e., HTTP GET options) as follows:
- C{graph=[output|processor|output,processor|processor,output]} specifying which graphs are returned. Default: C{output}
- C{space_preserve=[true|false]} means that plain literals are normalized in terms of white spaces. Default: C{false}
- C{rfa_version} provides the RDFa version that should be used for distilling. The string should be of the form "1.0" or "1.1". Default is the highest version the current package implements, currently "1.1"
- C{host_language=[xhtml,html,xml]} : the host language. Used when files are uploaded or text is added verbatim, otherwise the HTTP return header should be used. Default C{xml}
- C{embedded_rdf=[true|false]} : whether embedded turtle or RDF/XML content should be added to the output graph. Default: C{false}
- C{vocab_expansion=[true|false]} : whether the vocabularies should be expanded through the restricted RDFS entailment. Default: C{false}
- C{vocab_cache=[true|false]} : whether vocab caching should be performed or whether it should be ignored and vocabulary files should be picked up every time. Default: C{false}
- C{vocab_cache_report=[true|false]} : whether vocab caching details should be reported. Default: C{false}
- C{vocab_cache_bypass=[true|false]} : whether vocab caches have to be regenerated every time. Default: C{false}
- C{rdfa_lite=[true|false]} : whether warnings should be generated for non RDFa Lite attribute usage. Default: C{false}
@param uri: URI to access. Note that the C{text:} and C{uploaded:} fake URI values are treated separately; the former is for textual intput (in which case a StringIO is used to get the data) and the latter is for uploaded file, where the form gives access to the file directly.
@param outputFormat: serialization format, as defined by the package. Currently "xml", "turtle", "nt", or "json". Default is "turtle", also used if any other string is given.
@param form: extra call options (from the CGI call) to set up the local options
@type form: cgi FieldStorage instance
@return: serialized graph
@rtype: string
"""
def _get_option(param, compare_value, default) :
param_old = param.replace('_','-')
if param in list(form.keys()) :
val = form.getfirst(param).lower()
return val == compare_value
elif param_old in list(form.keys()) :
# this is to ensure the old style parameters are still valid...
# in the old days I used '-' in the parameters, the standard favours '_'
val = form.getfirst(param_old).lower()
return val == compare_value
else :
return default
if uri == "uploaded:" :
input = form["uploaded"].file
base = ""
elif uri == "text:" :
input = StringIO(form.getfirst("text"))
base = ""
else :
input = uri
base = uri
if "rdfa_version" in list(form.keys()) :
rdfa_version = form.getfirst("rdfa_version")
else :
rdfa_version = None
# working through the possible options
# Host language: HTML, XHTML, or XML
# Note that these options should be used for the upload and inline version only in case of a form
# for real uris the returned content type should be used
if "host_language" in list(form.keys()) :
if form.getfirst("host_language").lower() == "xhtml" :
media_type = MediaTypes.xhtml
elif form.getfirst("host_language").lower() == "html" :
media_type = MediaTypes.html
elif form.getfirst("host_language").lower() == "svg" :
media_type = MediaTypes.svg
elif form.getfirst("host_language").lower() == "atom" :
media_type = MediaTypes.atom
else :
media_type = MediaTypes.xml
else :
media_type = ""
transformers = []
check_lite = "rdfa_lite" in list(form.keys()) and form.getfirst("rdfa_lite").lower() == "true"
# The code below is left for backward compatibility only. In fact, these options are not exposed any more,
# they are not really in use
if "extras" in list(form.keys()) and form.getfirst("extras").lower() == "true" :
from .transform.metaname import meta_transform
from .transform.OpenID import OpenID_transform
from .transform.DublinCore import DC_transform
for t in [OpenID_transform, DC_transform, meta_transform] :
transformers.append(t)
else :
if "extra-meta" in list(form.keys()) and form.getfirst("extra-meta").lower() == "true" :
from .transform.metaname import meta_transform
transformers.append(meta_transform)
if "extra-openid" in list(form.keys()) and form.getfirst("extra-openid").lower() == "true" :
from .transform.OpenID import OpenID_transform
transformers.append(OpenID_transform)
if "extra-dc" in list(form.keys()) and form.getfirst("extra-dc").lower() == "true" :
from .transform.DublinCore import DC_transform
transformers.append(DC_transform)
output_default_graph = True
output_processor_graph = False
# Note that I use the 'graph' and the 'rdfagraph' form keys here. Reason is that
# I used 'graph' in the previous versions, including the RDFa 1.0 processor,
# so if I removed that altogether that would create backward incompatibilities
# On the other hand, the RDFa 1.1 doc clearly refers to 'rdfagraph' as the standard
# key.
a = None
if "graph" in list(form.keys()) :
a = form.getfirst("graph").lower()
elif "rdfagraph" in list(form.keys()) :
a = form.getfirst("rdfagraph").lower()
if a != None :
if a == "processor" :
output_default_graph = False
output_processor_graph = True
elif a == "processor,output" or a == "output,processor" :
output_processor_graph = True
embedded_rdf = _get_option( "embedded_rdf", "true", False)
space_preserve = _get_option( "space_preserve", "true", True)
vocab_cache = _get_option( "vocab_cache", "true", True)
vocab_cache_report = _get_option( "vocab_cache_report", "true", False)
refresh_vocab_cache = _get_option( "vocab_cache_refresh", "true", False)
vocab_expansion = _get_option( "vocab_expansion", "true", False)
if vocab_cache_report : output_processor_graph = True
options = Options(output_default_graph = output_default_graph,
output_processor_graph = output_processor_graph,
space_preserve = space_preserve,
transformers = transformers,
vocab_cache = vocab_cache,
vocab_cache_report = vocab_cache_report,
refresh_vocab_cache = refresh_vocab_cache,
vocab_expansion = vocab_expansion,
embedded_rdf = embedded_rdf,
check_lite = check_lite
)
processor = pyRdfa(options = options, base = base, media_type = media_type, rdfa_version = rdfa_version)
# Decide the output format; the issue is what should happen in case of a top level error like an inaccessibility of
# the html source: should a graph be returned or an HTML page with an error message?
# decide whether HTML or RDF should be sent.
htmlOutput = False
#if 'HTTP_ACCEPT' in os.environ :
# acc = os.environ['HTTP_ACCEPT']
# possibilities = ['text/html',
# 'application/rdf+xml',
# 'text/turtle; charset=utf-8',
# 'application/json',
# 'application/ld+json',
# 'text/rdf+n3']
#
# # this nice module does content negotiation and returns the preferred format
# sg = acceptable_content_type(acc, possibilities)
# htmlOutput = (sg != None and sg[0] == content_type('text/html'))
# os.environ['rdfaerror'] = 'true'
# This is really for testing purposes only, it is an unpublished flag to force RDF output no
# matter what
try :
graph = processor.rdf_from_source(input, outputFormat, rdfOutput = ("forceRDFOutput" in list(form.keys())) or not htmlOutput)
if outputFormat == "n3" :
retval = 'Content-Type: text/rdf+n3; charset=utf-8\n'
elif outputFormat == "nt" or outputFormat == "turtle" :
retval = 'Content-Type: text/turtle; charset=utf-8\n'
elif outputFormat == "json-ld" or outputFormat == "json" :
retval = 'Content-Type: application/ld+json; charset=utf-8\n'
else :
retval = 'Content-Type: application/rdf+xml; charset=utf-8\n'
retval += '\n'
retval += graph
return retval
except HTTPError :
(type,h,traceback) = sys.exc_info()
import cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s \n\n' % h.http_code
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>HTTP Error in distilling RDFa content</title>\n"
retval += "</head><body>\n"
retval += "<h1>HTTP Error in distilling RDFa content</h1>\n"
retval += "<p>HTTP Error: %s (%s)</p>\n" % (h.http_code,h.msg)
retval += "<p>On URI: <code>'%s'</code></p>\n" % cgi.escape(uri)
retval +="</body>\n"
retval +="</html>\n"
return retval
except :
# This branch should occur only if an exception is really raised, ie, if it is not turned
# into a graph value.
(type,value,traceback) = sys.exc_info()
import traceback, cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s\n\n' % processor.http_status
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>Exception in RDFa processing</title>\n"
retval += "</head><body>\n"
retval += "<h1>Exception in distilling RDFa</h1>\n"
retval += "<pre>\n"
strio = StringIO()
traceback.print_exc(file=strio)
retval += strio.getvalue()
retval +="</pre>\n"
retval +="<pre>%s</pre>\n" % value
retval +="<h1>Distiller request details</h1>\n"
retval +="<dl>\n"
if uri == "text:" and "text" in form and form["text"].value != None and len(form["text"].value.strip()) != 0 :
retval +="<dt>Text input:</dt><dd>%s</dd>\n" % cgi.escape(form["text"].value).replace('\n','<br/>')
elif uri == "uploaded:" :
retval +="<dt>Uploaded file</dt>\n"
else :
retval +="<dt>URI received:</dt><dd><code>'%s'</code></dd>\n" % cgi.escape(uri)
if "host_language" in list(form.keys()) :
retval +="<dt>Media Type:</dt><dd>%s</dd>\n" % media_type
if "graph" in list(form.keys()) :
retval +="<dt>Requested graphs:</dt><dd>%s</dd>\n" % form.getfirst("graph").lower()
else :
retval +="<dt>Requested graphs:</dt><dd>default</dd>\n"
retval +="<dt>Output serialization format:</dt><dd> %s</dd>\n" % outputFormat
if "space_preserve" in form : retval +="<dt>Space preserve:</dt><dd> %s</dd>\n" % form["space_preserve"].value
retval +="</dl>\n"
retval +="</body>\n"
retval +="</html>\n"
return retval
|
bsd-3-clause
|
flyfei/python-for-android
|
python3-alpha/extra_modules/bs4/builder/_html5lib.py
|
46
|
7524
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
else:
self.element.append(node.element)
node.parent = self
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and attributes != {}:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
name = NamespacedAttribute(*name)
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.element.contains_substitutions = (
self.soup.builder.set_up_substitutions(
self.element))
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
apache-2.0
|
emonty/burrow
|
burrow/tests/backend/test_http.py
|
1
|
1516
|
# Copyright (C) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unittests for the HTTP backend. This starts the WSGI server so
tests the WSGI frontend as well.'''
import ConfigParser
import burrow.backend.http
from burrow.tests import backend
class HTTPBase(backend.Base):
'''Base test case for http backend.'''
def setUp(self):
super(HTTPBase, self).setUp()
config = (ConfigParser.ConfigParser(), 'test')
self.backend = burrow.backend.http.Backend(config)
self.check_empty()
class TestHTTPAccounts(HTTPBase, backend.TestAccounts):
'''Test case for accounts with http backend.'''
pass
class TestHTTPQueues(HTTPBase, backend.TestQueues):
'''Test case for queues with http backend.'''
pass
class TestHTTPMessages(HTTPBase, backend.TestMessages):
'''Test case for messages with http backend.'''
pass
class TestHTTPMessage(HTTPBase, backend.TestMessage):
'''Test case for message with http backend.'''
pass
|
apache-2.0
|
miaecle/deepchem
|
deepchem/models/models.py
|
1
|
8342
|
"""
Contains an abstract base class that supports different ML models.
"""
import sys
import numpy as np
import pandas as pd
import joblib
import os
import shutil
import tempfile
import sklearn
import logging
from sklearn.base import BaseEstimator
import logging
from deepchem.data import Dataset, pad_features
from deepchem.metrics import Metric
from deepchem.trans import Transformer, undo_transforms
from deepchem.utils.save import load_from_disk
from deepchem.utils.save import save_to_disk
from deepchem.utils.evaluate import Evaluator
from typing import Any, Dict, List, Optional, Sequence
from deepchem.utils.typing import OneOrMany
logger = logging.getLogger(__name__)
class Model(BaseEstimator):
"""
Abstract base class for DeepChem models.
"""
def __init__(self,
model_instance: Optional[Any] = None,
model_dir: Optional[str] = None,
**kwargs) -> None:
"""Abstract class for all models.
This is intended only for convenience of subclass implementations
and should not be invoked directly.
Parameters:
-----------
model_instance: object
Wrapper around ScikitLearn/Keras/Tensorflow model object.
model_dir: str, optional (default None)
Path to directory where model will be stored. If not specified,
model will be stored in a temporary directory.
"""
if self.__class__.__name__ == "Model":
raise ValueError(
"This constructor is for an abstract class and should never be called directly. Can only call from subclass constructors."
)
self.model_dir_is_temp = False
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self.model_dir_is_temp = True
self.model_dir = model_dir
self.model_instance = model_instance
self.model_class = model_instance.__class__
def __del__(self):
if 'model_dir_is_temp' in dir(self) and self.model_dir_is_temp:
shutil.rmtree(self.model_dir)
def fit_on_batch(self, X: Sequence, y: Sequence, w: Sequence) -> float:
"""Perform a single step of training.
Parameters
----------
X: ndarray
the inputs for the batch
y: ndarray
the labels for the batch
w: ndarray
the weights for the batch
Returns
-------
the loss on the batch
"""
raise NotImplementedError(
"Each model is responsible for its own fit_on_batch method.")
def predict_on_batch(self, X: Sequence):
"""
Makes predictions on given batch of new data.
Parameters
----------
X: np.ndarray
Features
"""
raise NotImplementedError(
"Each model is responsible for its own predict_on_batch method.")
def reload(self) -> None:
"""
Reload trained model from disk.
"""
raise NotImplementedError(
"Each model is responsible for its own reload method.")
@staticmethod
def get_model_filename(model_dir: str) -> str:
"""
Given model directory, obtain filename for the model itself.
"""
return os.path.join(model_dir, "model.joblib")
@staticmethod
def get_params_filename(model_dir: str) -> str:
"""
Given model directory, obtain filename for the model itself.
"""
return os.path.join(model_dir, "model_params.joblib")
def save(self) -> None:
"""Dispatcher function for saving.
Each subclass is responsible for overriding this method.
"""
raise NotImplementedError
def fit(self, dataset: Dataset, nb_epoch: int = 10) -> float:
"""
Fits a model on data in a Dataset object.
Parameters
----------
dataset: Dataset
the Dataset to train on
nb_epoch: int
the number of epochs to train for
Returns
-------
the average loss over the most recent epoch
"""
for epoch in range(nb_epoch):
logger.info("Starting epoch %s" % str(epoch + 1))
losses = []
for (X_batch, y_batch, w_batch, ids_batch) in dataset.iterbatches():
losses.append(self.fit_on_batch(X_batch, y_batch, w_batch))
logger.info(
"Avg loss for epoch %d: %f" % (epoch + 1, np.array(losses).mean()))
return np.array(losses).mean()
def predict(self, dataset: Dataset,
transformers: List[Transformer] = []) -> OneOrMany[np.ndarray]:
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
y_preds = []
n_tasks = self.get_num_tasks()
ind = 0
for (X_batch, _, _, ids_batch) in dataset.iterbatches(deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_on_batch(X_batch)
# Discard any padded predictions
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def evaluate(self,
dataset: Dataset,
metrics: List[Metric],
transformers: List[Transformer] = [],
per_task_metrics: bool = False,
use_sample_weights: bool = False,
n_classes: int = 2):
"""
Evaluates the performance of this model on specified dataset.
This function uses `Evaluator` under the hood to perform model
evaluation. As a result, it inherits the same limitations of
`Evaluator`. Namely, that only regression and classification
models can be evaluated in this fashion. For generator models, you
will need to overwrite this method to perform a custom evaluation.
Keyword arguments specified here will be passed to
`Evaluator.compute_model_performance`.
Parameters
----------
dataset: `dc.data.Dataset`
Dataset object.
metrics: dc.metrics.Metric/list[dc.metrics.Metric]/function
The set of metrics provided. This class attempts to do some
intelligent handling of input. If a single `dc.metrics.Metric`
object is provided or a list is provided, it will evaluate
`self.model` on these metrics. If a function is provided, it is
assumed to be a metric function that this method will attempt to
wrap in a `dc.metrics.Metric` object. A metric function must
accept two arguments, `y_true, y_pred` both of which are
`np.ndarray` objects and return a floating point score. The
metric function may also accept a keyword argument
`sample_weight` to account for per-sample weights.
transformers: list
List of `dc.trans.Transformer` objects. These transformations
must have been applied to `dataset` previously. The dataset will
be untransformed for metric evaluation.
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask dataset.
use_sample_weights: bool, optional (default False)
If set, use per-sample weights `w`.
n_classes: int, optional (default None)
If specified, will use `n_classes` as the number of unique classes
in `self.dataset`. Note that this argument will be ignored for
regression metrics.
Returns
-------
multitask_scores: dict
Dictionary mapping names of metrics to metric scores.
all_task_scores: dict, optional
If `per_task_metrics == True` is passed as a keyword argument,
then returns a second dictionary of scores for each task
separately.
"""
evaluator = Evaluator(self, dataset, transformers)
return evaluator.compute_model_performance(
metrics,
per_task_metrics=per_task_metrics,
use_sample_weights=use_sample_weights,
n_classes=n_classes)
def get_task_type(self) -> str:
"""
Currently models can only be classifiers or regressors.
"""
raise NotImplementedError
def get_num_tasks(self) -> int:
"""
Get number of tasks.
"""
raise NotImplementedError
|
mit
|
Workday/OpenFrame
|
tools/telemetry/third_party/modulegraph/modulegraph_tests/test_explicit_packages.py
|
26
|
1643
|
from __future__ import absolute_import
import unittest
import os, shutil, sys
from modulegraph import find_modules
from modulegraph import modulegraph
class PackagesTestCase (unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, object, types, message=None):
self.assertTrue(isinstance(object, types),
message or '%r is not an instance of %r'%(object, types))
def testIncludePackage(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-packages')
mf = find_modules.find_modules(
path=[root]+sys.path,
scripts=[os.path.join(root, "main_script.py")],
packages=['pkg'],
debug=1)
node = mf.findNode('pkg')
self.assertIsInstance(node, modulegraph.Package)
node = mf.findNode('pkg.sub3')
self.assertIsInstance(node, modulegraph.SourceModule)
def testIncludePackageWithExclude(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-packages')
mf = find_modules.find_modules(
path=[root]+sys.path,
scripts=[os.path.join(root, "main_script.py")],
packages=['pkg'],
excludes=['pkg.sub3'])
node = mf.findNode('pkg')
self.assertIsInstance(node, modulegraph.Package)
node = mf.findNode('pkg.sub3')
self.assertIsInstance(node, modulegraph.ExcludedModule)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
chiefspace/udemy-rest-api
|
udemy_rest_api_section6/env/lib/python3.4/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py
|
33
|
6580
|
# mysql/mysqlconnector.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@\
<host>[:<port>]/<dbname>
:url: http://dev.mysql.com/downloads/connector/python/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer,
BIT)
from ... import util
import re
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
def get_lastrowid(self):
return self.cursor.lastrowid
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = 'mysqlconnector'
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
statement_compiler = MySQLCompiler_mysqlconnector
preparer = MySQLIdentifierPreparer_mysqlconnector
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
BIT: _myconnpyBIT,
}
)
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
return connector
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
util.coerce_kw_type(opts, 'allow_local_infile', bool)
util.coerce_kw_type(opts, 'autocommit', bool)
util.coerce_kw_type(opts, 'buffered', bool)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connection_timeout', int)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'consume_results', bool)
util.coerce_kw_type(opts, 'force_ipv6', bool)
util.coerce_kw_type(opts, 'get_warnings', bool)
util.coerce_kw_type(opts, 'pool_reset_session', bool)
util.coerce_kw_type(opts, 'pool_size', int)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'raw', bool)
util.coerce_kw_type(opts, 'ssl_verify_cert', bool)
util.coerce_kw_type(opts, 'use_pure', bool)
util.coerce_kw_type(opts, 'use_unicode', bool)
# unfortunately, MySQL/connector python refuses to release a
# cursor without reading fully, so non-buffered isn't an option
opts.setdefault('buffered', True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
client_flags = opts.get(
'client_flags', ClientFlag.get_default())
client_flags |= ClientFlag.FOUND_ROWS
opts['client_flags'] = client_flags
except Exception:
pass
return [[], opts]
@util.memoized_property
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
return tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = dbapi_con.get_server_version()
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return e.errno in errnos or \
"MySQL Connection not available." in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'AUTOCOMMIT'])
def _set_isolation_level(self, connection, level):
if level == 'AUTOCOMMIT':
connection.autocommit = True
else:
connection.autocommit = False
super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
connection, level)
dialect = MySQLDialect_mysqlconnector
|
gpl-2.0
|
rafasis1986/EngineeringMidLevel
|
flaskiwsapp/api/v1/views/ticketViews.py
|
1
|
3027
|
from werkzeug.exceptions import BadRequest
from flask import request
from flask.blueprints import Blueprint
from flask_cors.extension import CORS
from flask_jwt import jwt_required, current_identity, JWTError
from flask_restful import Resource, reqparse, fields
from flaskiwsapp.api.v1.schemas.ticketSchemas import BaseTicketJsonSchema
from flaskiwsapp.projects.controllers.requestControllers import get_request_by_id
from flaskiwsapp.projects.controllers.ticketControllers import create_ticket, delete_ticket, \
get_ticket_by_id, get_tickets_user
from flaskiwsapp.snippets.customApi import CustomApi
from flaskiwsapp.workers.queueManager import create_ticket_email_job, create_ticket_sms_job
from flaskiwsapp.snippets.constants import ROLE_EMPLOYEE
from flaskiwsapp.snippets.helpers import roles_required
tickets_api_blueprint = Blueprint('tickets_api_blueprint', __name__)
cors = CORS(tickets_api_blueprint)
tickets_api = CustomApi(tickets_api_blueprint)
ticket_parser = reqparse.RequestParser(bundle_errors=True)
ticket_parser.add_argument('request_id', type=int, location='json', required=True, help="Choose a request id")
ticket_parser.add_argument('detail', type=str, location='json', required=True, help="send a detail.")
ticket_fields = {
'request_id': fields.Integer,
'detail': fields.String
}
class TicketsAPI(Resource):
"""An API to get or create tickets."""
@jwt_required()
@roles_required(ROLE_EMPLOYEE)
def get(self):
"""
HTTP GET. Get all requests.
:email: a string valid as object id.
:returns: One or all available requests.
"""
tickets = get_tickets_user(current_identity.id)
request_schema = BaseTicketJsonSchema(many=True)
return request_schema.dump(tickets).data
@jwt_required()
@roles_required(ROLE_EMPLOYEE)
def post(self):
try:
args = ticket_parser.parse_args()
request = get_request_by_id(args.request_id)
ticket = create_ticket(request, current_identity, args.detail)
ticket_schema = BaseTicketJsonSchema()
create_ticket_email_job(ticket.id)
create_ticket_sms_job(ticket.id)
except BadRequest as e:
raise JWTError(e, e.description)
except Exception as e:
raise JWTError(e, e.args[0])
else:
return ticket_schema.dump(ticket).data
class TicketAPI(Resource):
"""An API to get or delete a ticket. """
@jwt_required()
def delete(self, ticket_id):
"""
HTTP DELETE. Delete an ticket.
:returns:
"""
return delete_ticket(ticket_id)
@jwt_required()
def get(self, ticket_id):
"""
HTTP DELETE. Get specific Ticket.
:returns:
"""
ticket = get_ticket_by_id(ticket_id)
return BaseTicketJsonSchema().dump(ticket).data
tickets_api.add_resource(TicketsAPI, '', endpoint='list')
tickets_api.add_resource(TicketAPI, '<ticket_id>', endpoint='detail')
|
mit
|
ChristosChristofidis/h2o-3
|
py2/testdir_r/sh2junit.py
|
31
|
16028
|
import sys, psutil, os, stat, tempfile, argparse, time, datetime
sys.path.extend(['.','..','../..','py'])
import h2o_sandbox
# Stripped down, similar to h2o.py has for these functions
# Possible to do this in bash, but the code becomes cryptic.
# You can execute this as sh2junit.py <bash command string>
# sh2junit runs the cmd_string as a subprocess, with stdout/stderr going to files in sandbox
# and stdout to python stdout too.
# When it completes, check the sandbox for errors (using h2o_sandbox.py
# prints interesting things to stdout. Creates the result xml in the current dire
# with name "sh2junit_<name>.xml"
def sandbox_tmp_file(prefix='', suffix=''):
# this gives absolute path, good!
dirname = './sandbox'
if not os.path.exists(dirname):
print "no ./sandbox. Creating"
os.makedirs(dirname)
fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dirname)
# make sure the file now exists
# os.open(path, 'a').close()
# give everyone permission to read it (jenkins running as
# 0xcustomer needs to archive as jenkins
#permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(path, 0644) #'644') #permissions)
return (fd, path)
#**************************************************************************
# Example junit xml
#<?xml version="1.0" encoding="UTF-8"?>
#<testsuites disabled="" errors="" failures="" name="" tests="" time="">
# <testsuite disabled="" errors="" failures="" hostname="" id="" name="" package="" skipped="" tests="" time="" timestamp="">
# <properties>
# <property name="" value=""/>
# </properties>
# <testcase assertions="" classname="" name="" status="" time="">
# <skipped/>
# <error message="" type=""/>
# <failure message="" type=""/>
# <system-out/>
# <system-err/>
# </testcase>
# <system-out/>
# <system-err/>
# </testsuite>
#</testsuites>
def create_junit_xml(name, out, err, sandboxErrorMessage, errors=0, elapsed=0):
# http://junitpdfreport.sourceforge.net/managedcontent/PdfTranslation
# not really nosetests..just trying to mimic the python xml
content = '<?xml version="1.0" encoding="UTF-8" ?>\n'
content += ' <testsuite name="nosetests" tests="1" errors="%s" failures="0" skip="0">\n' % (errors)
content += ' <testcase classname="%s" name="%s" time="%0.4f">\n' % (name, name, elapsed)
if errors != 0 and not sandboxErrorMessage:
content += ' <error type="Non-zero R exit code" message="Non-zero R exit code"></error>\n'
# may or may not be 2 errors (R exit code plus log error
if errors != 0 and sandboxErrorMessage:
content += ' <error type="Error in h2o logs" message="Error in h2o logs"></error>\n'
content += ' <system-out>\n'
content += '<![CDATA[\n'
content += 'spawn stdout' + str(datetime.datetime.now()) + '**********************************************************\n'
content += out
content += ']]>\n'
content += ' </system-out>\n'
content += ' <system-err>\n'
content += '<![CDATA[\n'
content += 'spawn stderr' + str(datetime.datetime.now()) + '**********************************************************\n'
content += err
if sandboxErrorMessage:
content += 'spawn errors from sandbox log parsing*********************************\n'
# maybe could split this into a 2nd stdout or stder ..see above
content += sandboxErrorMessage
content += ']]>\n'
content += ' </system-err>\n'
content += ' </testcase>\n'
content += ' </testsuite>\n'
# see if adding nosetests makes michal's stuff pick it up??
# and "test_" prefix"
x = './test_' + os.path.basename(name) + '.nosetests.xml'
with open(x, 'wb') as f:
f.write(content)
#f = open(x, 'w')
#f.write(content)
#f.close()
#**************************************************************************
# belt and suspenders. Do we really need to worry about this?
def terminate_process_tree(pid, including_parent=True):
parent = psutil.Process(pid)
for child in parent.get_children(recursive=True):
try:
child.terminate()
except psutil.NoSuchProcess:
print "terminate_process_tree:", "NoSuchProcess. couldn't terminate child process with pid %s" % child.pid()
except psutil.AccessDenied:
print "terminate_process_tree:", "couldn't terminate child process with pid %s" % child.pid()
else:
child.wait(timeout=3)
if including_parent:
try:
parent.terminate()
except psutil.NoSuchProcess:
print "terminate_process_tree:", "NoSuchProcess. couldn't terminate parent process with pid %s" % parent.pid()
pass
except psutil.AccessDenied:
print "terminate_process_tree:", "AccessDenied. couldn't terminate parent process with pid %s" % parent.pid()
else:
parent.wait(timeout=3)
def terminate_child_processes():
me = os.getpid()
terminate_process_tree(me, including_parent=False)
#**************************************************************************
def rc_if_exists_and_done(ps):
try:
rc = ps.wait(0)
except psutil.TimeoutExpired:
# not sure why I'm getting this
print "Got TimeoutExpired on the R subprocess, may be legal"
rc = None
except psutil.NoSuchProcess:
raise Exception("The R subprocess disappeared when we thought it should still be there")
except psutil.AccessDenied:
raise Exception("The R subprocess gave us AccessDenied")
# rc = None means it already completed?
# FIX! Is it none if we get a timeout exception on this python ..how is that captured?
if rc:
# increment the global errors count if we get a non-zero rc. non-zero rc should only happen once?
error = 1
print "rc_if_exists_and_done: got non-zero rc: %s" % rc
else:
error = 0
return (rc, error)
#**************************************************************************
def sh2junit(name='NoName', cmd_string='/bin/ls', timeout=300, shdir=None, **kwargs):
# split by arbitrary strings of whitespace characters (space, tab, newline, return, formfeed)
print "cmd_string:", cmd_string
cmdList = cmd_string.split()
# these are absolute paths
outfd, outpath = sandbox_tmp_file(prefix=name + '.stdout.', suffix='.log')
errfd, errpath = sandbox_tmp_file(prefix=name + '.stderr.', suffix='.log')
# make outpath and errpath full paths, so we can redirect
print "outpath:", outpath
print "errpath:", errpath
start = time.time()
print "psutil.Popen:", cmdList, outpath, errpath
import subprocess
# start the process in the target dir, if desired
if shdir:
currentDir = os.getcwd()
os.chdir(shdir)
ps = psutil.Popen(cmdList, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
if shdir:
os.chdir(currentDir)
comment = 'PID %d, stdout %s, stderr %s' % (
ps.pid, os.path.basename(outpath), os.path.basename(errpath))
print "spawn_cmd", cmd_string, comment
# Reads the subprocess stdout until it is closed and
# ...echo it our python stdout and also the R stdout file in sandbox
# Then wait for the program to exit.
# Read before wait so that you don't risk the pipe filling up and hanging the program.
# You wait after read for the final program exit and return code.
# If you don't wait, you'll get a zombie process (at least on linux)
# this might not do what we want..see:
# http://stackoverflow.com/questions/2804543/read-subprocess-stdout-line-by-line
# I suppose we'll stop early?
# shouldn't need a delay before checking this?
if not ps.is_running():
raise Exception("sh2junit: not immediate ps.is_running after start")
# Until we get the rc, it can be a zombie process.
# A zombie process is not a real process.
# it's just a remaining entry in the process table until the parent process requests the child's return code.
# The actual process has ended and requires no other resources but said process table entry.
linesMayExist = True
errors = 0
timeoutError = False
while linesMayExist:
# get whatever accumulated, up to nothing returned
# only do up to 20 lines before we check timeout again
# why was R processes not completing on centos?
# linesMayExist = ps.is_running() and not ps.status() == psutil.STATUS_ZOMBIE
linesMayExist = ps.is_running()
lineBurstCnt = 0
# stdout from subprocess
line = ps.stdout.readline()
# R apparently uses stderr a lot, so want to mix that in. We don't grab it until we hit a stall in R stdout though.
while line:
lineBurstCnt += 1
# maybe I should use p.communicate() instead. have to keep it to stdout? or do stdout+stderr here
sys.stdout.write("R->" + line) # to our python stdout, with a prefix so it's obviously from R
sys.stdout.flush()
os.write(outfd, line) # to sandbox R stdout
elapsed = time.time() - start
if elapsed > timeout:
timeoutError = True
errors += 1
print "ERROR: sh2junit: elapsed: %0.2f timeout: %s (secs) while echoing subprocess stdout" % (elapsed, timeout)
#kill R subprocess but don't kill me
terminate_process_tree(ps.pid, including_parent=False)
break
line = ps.stdout.readline()
if timeoutError:
print "\n\n\nERROR: timeout"
break
# stderr from subprocess
line = ps.stderr.readline()
while line:
lineBurstCnt += 1
sys.stdout.write("Re->" + line) # to our python stdout, with a prefix so it's obviously from R stderr
sys.stdout.flush()
os.write(errfd, line) # to sandbox R stderr
line = ps.stderr.readline()
print "lineBurstCnt:", lineBurstCnt
# Check. may have flipped to not running, and we just got the last bit.
# shouldn't be a race on a transition here, if ps.wait(0) completion syncs the transition
if linesMayExist:
print "ps.is_running():", ps.is_running(), ps.pid, ps.name, ps.status, ps.create_time
# unload the return code without waiting..so we don't have a zombie!
(lastrc, error) = rc_if_exists_and_done(ps)
errors += error
elapsed = time.time() - start
# forever if timeout is None
#if timeout and elapsed > timeout:
if elapsed > timeout:
timeoutError = True
errors += 1
# we don't want to exception here, because we're going to print the xml that says there's an error
# I guess we'll end up terminating the R process down below
# could we have lines in stdout we didn't catch up on? maybe, but do we care?
print "ERROR: sh2junit: elapsed: %0.2f timeout: %s (secs) while echoing subprocess stdout" % (elapsed, timeout)
#kill R subprocess but don't kill me
#terminate_process_tree(ps.pid, including_parent=False)
break
# wait for some more output to accumulate
time.sleep(0.25)
# It shouldn't be running now?
# timeout=None waits forever. timeout=0 returns immediately.
# default above is 5 minutes
# Wait for process termination. Since child: return the exit code.
# If the process is already terminated does not raise NoSuchProcess exception
# but just return None immediately.
# If timeout is specified and process is still alive raises psutil.TimeoutExpired() exception.
# old
# rc = ps.wait(timeout)
(lastrc, error) = rc_if_exists_and_done(ps)
errors += error
elapsed = time.time() - start
# Prune h2o logs to interesting lines and detect errors.
# Error lines are returned. warning/info are printed to our (python stdout)
# so that's always printed/saved?
# None if no error
sandboxErrorMessage = h2o_sandbox.check_sandbox_for_errors(
LOG_DIR='./sandbox',
python_test_name=name,
cloudShutdownIsError=True,
sandboxIgnoreErrors=True) # don't take exception on error
if sandboxErrorMessage:
errors += 1
out = file(outpath).read()
err = file(errpath).read()
create_junit_xml(name, out, err, sandboxErrorMessage, errors=errors, elapsed=elapsed)
if not errors:
return (errors, outpath, errpath)
else:
# dump all the info as part of the exception? maybe too much
# is this bad to do in all cases? do we need it?
hline = "\n===========================================BEGIN DUMP=============================================================\n"
hhline = "\n===========================================END DUMP=============================================================\n"
out = '[stdout->err]: '.join(out.splitlines(True))
err = '[sterr->err]: '.join(err.splitlines(True))
if ps.is_running():
print "Before terminate:", ps.pid, ps.is_running()
terminate_process_tree(ps.pid, including_parent=True)
if sandboxErrorMessage:
print "\n\n\nError in Sandbox. Ending test. Dumping sub-process output.\n"
print hline
raise Exception("%s %s \n\tlastrc:%s \n\terrors:%s \n\tErrors found in ./sandbox log files?.\nR stdout:\n%s\n\nR stderr:\n%s\n%s" %
(name, cmd_string, lastrc, errors, out, err, hhline))
# could have already terminated?
elif timeoutError:
print "\n\n\nTimeout Error. Ending test. Dumping sub-process output.\n"
print hline
raise Exception("%s %s \n\tlastrc:%s \n\terrors:%s \n\ttimed out after %d secs. \nR stdout:\n%s\n\nR stderr:\n%s\n%s" %
(name, cmd_string, lastrc, errors, timeout or 0, out, err, hhline))
else:
print "\n\n\nCaught exception. Ending test. Dumping sub-process output.\n"
print hline
raise Exception("%s %s \n\tlastrc:%s \n\terrors:%s \n\tLikely non-zero exit code from R.\nR stdout:\n%s\n\nR stderr:\n%s\n%s" %
(name, cmd_string, lastrc, errors, out, err, hhline))
#**************************************************************************
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-shdir', type=str, default=None, help='executes the $cmd in the target dir, but the logs stay in sandbox here')
parser.add_argument('-name', type=str, default='NoName', help='used to help name the xml/stdout/stderr logs created')
parser.add_argument('-timeout', type=int, default=5, help='secs timeout for the shell subprocess. Fail if timeout')
parser.add_argument('-cmd', '--cmd_string', type=str, default=None, help="cmd string to pass to shell subprocess. Better to just use'--' to start the cmd (everything after that is sucked in)")
parser.add_argument('Rargs', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.cmd_string:
cmd_string = args.cmd_string
else:
# easiest way to handle multiple tokens for command
# end with -- and this grabs the rest
# drop the leading '--' if we stopped parsing the rest that way
if args.Rargs:
print "args.Rargs:", args.Rargs
if args.Rargs[0]=='--':
args.Rargs[0] = ''
cmd_string = ' '.join(args.Rargs)
else:
# placeholder for test
cmd_string = '/bin/ls'
sh2junit(name=args.name, cmd_string=cmd_string, timeout=args.timeout, shdir=args.shdir)
|
apache-2.0
|
why11002526/keras
|
tests/auto/test_sequential_model.py
|
20
|
12510
|
from __future__ import absolute_import
from __future__ import print_function
import unittest
import numpy as np
np.random.seed(1337)
from keras.models import Sequential, model_from_json, model_from_yaml
from keras.layers.core import Dense, Activation, Merge
from keras.utils import np_utils
from keras.utils.test_utils import get_test_data
input_dim = 32
nb_hidden = 16
nb_class = 4
batch_size = 64
nb_epoch = 1
train_samples = 5000
test_samples = 1000
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples, nb_test=test_samples, input_shape=(input_dim,),
classification=True, nb_class=4)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
print(X_train.shape)
print(y_train.shape)
class TestSequential(unittest.TestCase):
def test_sequential(self):
print('Test sequential')
model = Sequential()
model.add(Dense(input_dim, nb_hidden))
model.add(Activation('relu'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
print('loss:', loss)
if loss > 0.6:
raise Exception('Score too low, learning issue.')
preds = model.predict(X_test, verbose=0)
classes = model.predict_classes(X_test, verbose=0)
probas = model.predict_proba(X_test, verbose=0)
print(model.get_config(verbose=1))
print('test weight saving')
model.save_weights('temp.h5', overwrite=True)
model = Sequential()
model.add(Dense(input_dim, nb_hidden))
model.add(Activation('relu'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights('temp.h5')
nloss = model.evaluate(X_train, y_train, verbose=0)
print(nloss)
assert(loss == nloss)
# test json serialization
json_data = model.to_json()
model = model_from_json(json_data)
# test yaml serialization
yaml_data = model.to_yaml()
model = model_from_yaml(yaml_data)
def test_merge_sum(self):
print('Test merge: sum')
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(input_dim, nb_hidden))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_train, X_train], y_train, verbose=0)
print('loss:', loss)
if loss > 0.7:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
probas = model.predict_proba([X_test, X_test], verbose=0)
print(model.get_config(verbose=1))
print('test weight saving')
model.save_weights('temp.h5', overwrite=True)
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(input_dim, nb_hidden))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.load_weights('temp.h5')
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
print(nloss)
assert(loss == nloss)
def test_merge_concat(self):
print('Test merge: concat')
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(input_dim, nb_hidden))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_hidden * 2, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_train, X_train], y_train, verbose=0)
print('loss:', loss)
if loss > 0.6:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
probas = model.predict_proba([X_test, X_test], verbose=0)
print(model.get_config(verbose=1))
print('test weight saving')
model.save_weights('temp.h5', overwrite=True)
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(input_dim, nb_hidden))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_hidden * 2, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights('temp.h5')
nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
print(nloss)
assert(loss == nloss)
def test_merge_recursivity(self):
print('Test merge recursivity')
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(input_dim, nb_hidden))
right.add(Activation('relu'))
righter = Sequential()
righter.add(Dense(input_dim, nb_hidden))
righter.add(Activation('relu'))
intermediate = Sequential()
intermediate.add(Merge([left, right], mode='sum'))
intermediate.add(Dense(nb_hidden, nb_hidden))
intermediate.add(Activation('relu'))
model = Sequential()
model.add(Merge([intermediate, righter], mode='sum'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
print('loss:', loss)
if loss > 0.6:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test, X_test], verbose=0)
probas = model.predict_proba([X_test, X_test, X_test], verbose=0)
print(model.get_config(verbose=1))
model.save_weights('temp.h5', overwrite=True)
model.load_weights('temp.h5')
nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
print(nloss)
assert(loss == nloss)
def test_merge_overlap(self):
print('Test merge overlap')
left = Sequential()
left.add(Dense(input_dim, nb_hidden))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(nb_hidden, nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
print('loss:', loss)
if loss > 0.6:
raise Exception('Score too low, learning issue.')
preds = model.predict(X_test, verbose=0)
classes = model.predict_classes(X_test, verbose=0)
probas = model.predict_proba(X_test, verbose=0)
print(model.get_config(verbose=1))
model.save_weights('temp.h5', overwrite=True)
model.load_weights('temp.h5')
nloss = model.evaluate(X_train, y_train, verbose=0)
print(nloss)
assert(loss == nloss)
if __name__ == '__main__':
print('Test Sequential model')
unittest.main()
|
mit
|
product-owner/2015scrum
|
static/Brython3.1.1-20150328-091302/Lib/unittest/case.py
|
743
|
48873
|
"""Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
|
gpl-3.0
|
JackDanger/sentry
|
src/sentry/api/endpoints/useravatar.py
|
3
|
1971
|
from __future__ import absolute_import
from rest_framework import status
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.user import UserEndpoint
from sentry.api.fields import AvatarField
from sentry.api.serializers import serialize
from sentry.models import UserAvatar
class UserAvatarSerializer(serializers.Serializer):
avatar_photo = AvatarField(required=False)
avatar_type = serializers.ChoiceField(choices=(
('upload', 'upload'),
('gravatar', 'gravatar'),
('letter_avatar', 'letter_avatar'),
))
def validate(self, attrs):
attrs = super(UserAvatarSerializer, self).validate(attrs)
if attrs.get('avatar_type') == 'upload':
has_existing_file = UserAvatar.objects.filter(
user=self.context['user'],
file__isnull=False,
).exists()
if not has_existing_file and not attrs.get('avatar_photo'):
raise serializers.ValidationError({
'avatar_type': 'Cannot set avatar_type to upload without avatar_photo',
})
return attrs
class UserAvatarEndpoint(UserEndpoint):
def get(self, request, user):
return Response(serialize(user, request.user))
def put(self, request, user):
if user != request.user:
return Response(status=status.HTTP_403_FORBIDDEN)
serializer = UserAvatarSerializer(
data=request.DATA,
context={'user': user},
)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
result = serializer.object
UserAvatar.save_avatar(
relation={'user': user},
type=result['avatar_type'],
avatar=result.get('avatar_photo'),
filename='{}.png'.format(user.id),
)
return Response(serialize(user, request.user))
|
bsd-3-clause
|
fsalamero/pilas
|
pilasengine/controles/__init__.py
|
6
|
2403
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from PyQt4 import QtCore
from pilasengine.controles import simbolos
from pilasengine.controles import control
TECLAS = {
QtCore.Qt.Key_Left: simbolos.IZQUIERDA,
QtCore.Qt.Key_Right: simbolos.DERECHA,
QtCore.Qt.Key_Up: simbolos.ARRIBA,
QtCore.Qt.Key_Down: simbolos.ABAJO,
QtCore.Qt.Key_Space: simbolos.ESPACIO,
QtCore.Qt.Key_Return: simbolos.SELECCION,
QtCore.Qt.Key_Shift: simbolos.SHIFT,
QtCore.Qt.Key_Control: simbolos.CTRL,
QtCore.Qt.Key_AltGr: simbolos.ALTGR,
QtCore.Qt.Key_Alt: simbolos.ALT,
QtCore.Qt.Key_CapsLock: simbolos.CAPSLOCK,
QtCore.Qt.Key_F1: simbolos.F1,
QtCore.Qt.Key_F2: simbolos.F2,
QtCore.Qt.Key_F3: simbolos.F3,
QtCore.Qt.Key_F4: simbolos.F4,
QtCore.Qt.Key_F5: simbolos.F5,
QtCore.Qt.Key_F6: simbolos.F6,
QtCore.Qt.Key_F7: simbolos.F7,
QtCore.Qt.Key_F8: simbolos.F8,
QtCore.Qt.Key_F9: simbolos.F9,
QtCore.Qt.Key_F10: simbolos.F10,
QtCore.Qt.Key_F11: simbolos.F11,
QtCore.Qt.Key_F12: simbolos.F12,
QtCore.Qt.Key_A: simbolos.a,
QtCore.Qt.Key_B: simbolos.b,
QtCore.Qt.Key_C: simbolos.c,
QtCore.Qt.Key_D: simbolos.d,
QtCore.Qt.Key_E: simbolos.e,
QtCore.Qt.Key_F: simbolos.f,
QtCore.Qt.Key_G: simbolos.g,
QtCore.Qt.Key_H: simbolos.h,
QtCore.Qt.Key_I: simbolos.i,
QtCore.Qt.Key_J: simbolos.j,
QtCore.Qt.Key_K: simbolos.k,
QtCore.Qt.Key_L: simbolos.l,
QtCore.Qt.Key_M: simbolos.m,
QtCore.Qt.Key_N: simbolos.n,
QtCore.Qt.Key_O: simbolos.o,
QtCore.Qt.Key_P: simbolos.p,
QtCore.Qt.Key_Q: simbolos.q,
QtCore.Qt.Key_R: simbolos.r,
QtCore.Qt.Key_S: simbolos.s,
QtCore.Qt.Key_T: simbolos.t,
QtCore.Qt.Key_U: simbolos.u,
QtCore.Qt.Key_V: simbolos.v,
QtCore.Qt.Key_W: simbolos.w,
QtCore.Qt.Key_X: simbolos.x,
QtCore.Qt.Key_Y: simbolos.y,
QtCore.Qt.Key_Z: simbolos.z,
}
class Controles(object):
def __init__(self, pilas):
self.pilas = pilas
def Control(self, escena, mapa_teclado=None):
return control.Control(escena, mapa_teclado)
@staticmethod
def obtener_codigo_de_tecla_normalizado(tecla_qt):
return TECLAS.get(tecla_qt, tecla_qt)
|
lgpl-3.0
|
vSlipenchuk/linux-aufs
|
tools/perf/scripts/python/futex-contention.py
|
1997
|
1508
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
project-generator/project_generator
|
project_generator/settings.py
|
4
|
2317
|
# Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import normpath, join, pardir, sep
class ProjectSettings:
PROJECT_ROOT = os.environ.get('PROJECT_GENERATOR_ROOT') or join(pardir, pardir)
DEFAULT_TOOL = os.environ.get('PROJECT_GENERATOR_DEFAULT_TOOL') or 'uvision'
DEFAULT_EXPORT_LOCATION_FORMAT = join('generated_projects', '{tool}_{project_name}')
DEFAULT_ROOT = os.getcwd()
def __init__(self):
""" This are default enviroment settings for build tools. To override,
define them in the projects.yaml file. """
self.paths = {}
self.templates = {}
self.paths['uvision'] = os.environ.get('UV4') or join('C:', sep,
'Keil', 'UV4', 'UV4.exe')
self.paths['iar'] = os.environ.get('IARBUILD') or join(
'C:', sep, 'Program Files (x86)',
'IAR Systems', 'Embedded Workbench 7.0',
'common', 'bin')
self.paths['gcc'] = os.environ.get('ARM_GCC_PATH') or ''
self.export_location_format = self.DEFAULT_EXPORT_LOCATION_FORMAT
self.root = os.getcwd()
def update(self, settings):
if settings:
if 'tools' in settings:
for k, v in settings['tools'].items():
if k in self.paths:
if 'path' in v.keys():
self.paths[k] = v['path'][0]
if 'template' in v.keys():
self.templates[k] = v['template']
if 'export_dir' in settings:
self.export_location_format = normpath(settings['export_dir'][0])
if 'root' in settings:
self.root = normpath(settings['root'][0])
def get_env_settings(self, env_set):
return self.paths[env_set]
|
apache-2.0
|
blade-vec-4g/android_kernel_zte_msm8226
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
BlindHunter/django
|
tests/gis_tests/geoapp/models.py
|
83
|
2546
|
from django.utils.encoding import python_2_unicode_compatible
from ..models import models
from ..utils import gisfield_may_be_null
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class Country(NamedModel):
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
class City(NamedModel):
point = models.PointField()
class Meta:
app_label = 'geoapp'
required_db_features = ['gis_enabled']
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
# TODO: This should be implicitly inherited.
objects = models.GeoManager()
class Meta:
app_label = 'geoapp'
required_db_features = ['gis_enabled']
class State(NamedModel):
poly = models.PolygonField(null=gisfield_may_be_null) # Allowing NULL geometries here.
class Meta:
app_label = 'geoapp'
required_db_features = ['gis_enabled']
class Track(NamedModel):
line = models.LineStringField()
class MultiFields(NamedModel):
city = models.ForeignKey(City, models.CASCADE)
point = models.PointField()
poly = models.PolygonField()
class Meta:
required_db_features = ['gis_enabled']
class UniqueTogetherModel(models.Model):
city = models.CharField(max_length=30)
point = models.PointField()
class Meta:
unique_together = ('city', 'point')
required_db_features = ['gis_enabled', 'supports_geometry_field_unique_index']
class Truth(models.Model):
val = models.BooleanField(default=False)
objects = models.GeoManager()
class Meta:
required_db_features = ['gis_enabled']
class Feature(NamedModel):
geom = models.GeometryField()
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
class Meta:
required_db_features = ['gis_enabled']
class NonConcreteField(models.IntegerField):
def db_type(self, connection):
return None
def get_attname_column(self):
attname, column = super(NonConcreteField, self).get_attname_column()
return attname, None
class NonConcreteModel(NamedModel):
non_concrete = NonConcreteField()
point = models.PointField(geography=True)
|
bsd-3-clause
|
firebitsbr/raspberry_pwn
|
src/pentest/voiper/sulley/sulley/legos/ber.py
|
8
|
2229
|
########################################################################################################################
### ASN.1 / BER TYPES (http://luca.ntop.org/Teaching/Appunti/asn1.html)
########################################################################################################################
import struct
from sulley import blocks, primitives, sex
########################################################################################################################
class string (blocks.block):
'''
[0x04][0x84][dword length][string]
Where:
0x04 = string
0x84 = length is 4 bytes
'''
def __init__ (self, name, request, value, options={}):
blocks.block.__init__(self, name, request, None, None, None, None)
self.value = value
self.options = options
self.prefix = options.get("prefix", "\x04")
if not self.value:
raise sex.error("MISSING LEGO.ber_string DEFAULT VALUE")
str_block = blocks.block(name + "_STR", request)
str_block.push(primitives.string(self.value))
self.push(blocks.size(name + "_STR", request, endian=">", fuzzable=True))
self.push(str_block)
def render (self):
# let the parent do the initial render.
blocks.block.render(self)
self.rendered = self.prefix + "\x84" + self.rendered
return self.rendered
########################################################################################################################
class integer (blocks.block):
'''
[0x02][0x04][dword]
Where:
0x02 = integer
0x04 = integer length is 4 bytes
'''
def __init__ (self, name, request, value, options={}):
blocks.block.__init__(self, name, request, None, None, None, None)
self.value = value
self.options = options
if not self.value:
raise sex.error("MISSING LEGO.ber_integer DEFAULT VALUE")
self.push(primitives.dword(self.value, endian=">"))
def render (self):
# let the parent do the initial render.
blocks.block.render(self)
self.rendered = "\x02\x04" + self.rendered
return self.rendered
|
gpl-3.0
|
poljeff/odoo
|
addons/account_analytic_plans/__init__.py
|
445
|
1104
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_plans
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
GoogleCloudPlatform/python-compat-runtime
|
appengine-compat/exported_appengine_sdk/google/appengine/runtime/__init__.py
|
8
|
1610
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Define the DeadlineExceededError exception."""
try:
BaseException
except NameError:
BaseException = Exception
class DeadlineExceededError(BaseException):
"""Exception raised when the request reaches its overall time limit.
This exception will be thrown by the original thread handling the request,
shortly after the request reaches its deadline. Since the exception is
asynchronously set on the thread by the App Engine runtime, it can appear
to originate from any line of code that happens to be executing at that
time.
If the application catches this exception and does not generate a response
very quickly afterwards, an error will be returned to the user and
the application instance may be terminated.
Not to be confused with runtime.apiproxy_errors.DeadlineExceededError.
That one is raised when individual API calls take too long.
"""
def __str__(self):
return ('The overall deadline for responding to the HTTP request '
'was exceeded.')
|
apache-2.0
|
INL/MBMP-morphological-parser
|
mbmp/__init__.py
|
1
|
2461
|
## Python implementation of MBMA (Van den Bosch & Daelemans 1999)
## Copyright (C) 2011 Institute for Dutch Lexicology (INL)
## Author: Folgert Karsdorp, INL
## E-mail: <[email protected]>
## URL: <http://www.inl.nl/>
## For licence information, see LICENCE.TXT
"""
Memory-Based Morphological Parsing
This package consists of an implementation and extensions of Memory-Based
Morphological Analysis (MBMA) as described by Van den Bosch & Daelemans
(1999). MBMA is extended with a specialized CKY Parser that returns all
possible derivations for a given analysis of MBMA. In addition to an
implementation of 'standard' MBMA, this package contains a class to perform
morphological segmentation (MBMS), i.e. the segmentation of words into
morphemes. Furthermore, the package provides a Memory-Based Morphological
Chunker (MBMC), which is used to analyze words into hierarchical
structures in an alternative way. Lastly, the package can be used to
lemmatize word forms (MBLEM) and to assign part-of-speech tags to lemmas
(MBPT).
All classes (MBMA, MBMS, MBMC, MBLEM, MBPT) extend the
abstract MBClassifier interface. The Memory-Based Classifier sets up an
instance of TimblServer and connects to this server via a client (see
TimblClient).
"""
#----------------------------------------------------------------------------
# METADATA
#----------------------------------------------------------------------------
__author__ = 'Folgert Karsdorp, INL'
__licence__ = 'see LICENCE.TXT'
__version__ = '0.4'
__maintainer__ = 'INL'
__maintainer_email__ = '[email protected]'
__copyright__ = 'Copyright (C) 2011 INL'
#----------------------------------------------------------------------------
# TOP-LEVEL MODULES
#----------------------------------------------------------------------------
# Import top-level functionality into top-level namespace
from mbmp.datatypes import Morpheme
from mbmp.config import *
from mbmp.util import xml
from mbmp.mbmp_exceptions import ConnectionError, ServerConnectionError
from mbmp.client import TimblClient
#----------------------------------------------------------------------------
# PACKAGES
#----------------------------------------------------------------------------
# Processing packages -- these define __all__ carefully.
import mbmp.classifiers
import mbmp.server
import mbmp.parse
import mbmp.train
from mbmp.classifiers import *
from mbmp.server import *
from mbmp.parse import *
from mbmp.train import *
|
apache-2.0
|
motion2015/a3
|
lms/djangoapps/certificates/migrations/0015_adding_mode_for_verified_certs.py
|
114
|
6231
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.mode'
db.add_column('certificates_generatedcertificate', 'mode',
self.gf('django.db.models.fields.CharField')(default='honor', max_length=32),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.mode'
db.delete_column('certificates_generatedcertificate', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
agpl-3.0
|
sjfloat/youtube-dl
|
youtube_dl/extractor/mgoon.py
|
177
|
2695
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
)
class MgoonIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
video\.mgoon\.com)/(?P<id>[0-9]+)'''
_API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
_TESTS = [
{
'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
'info_dict': {
'id': '5582148',
'uploader_id': 'hi6618',
'duration': 240.419,
'upload_date': '20131220',
'ext': 'mp4',
'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.mgoon.com/play/view/5582148',
'only_matching': True,
},
{
'url': 'http://video.mgoon.com/5582148',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
data = self._download_json(self._API_URL.format(video_id), video_id)
if data.get('errorInfo', {}).get('code') != 'NONE':
raise ExtractorError('%s encountered an error: %s' % (
self.IE_NAME, data['errorInfo']['message']), expected=True)
v_info = data['videoInfo']
title = v_info.get('v_title')
thumbnail = v_info.get('v_thumbnail')
duration = v_info.get('v_duration')
upload_date = unified_strdate(v_info.get('v_reg_date'))
uploader_id = data.get('userInfo', {}).get('u_alias')
if duration:
duration /= 1000.0
age_limit = None
if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
age_limit = 18
formats = []
get_quality = qualities(['360p', '480p', '720p', '1080p'])
for fmt in data['videoFiles']:
formats.append({
'format_id': fmt['label'],
'quality': get_quality(fmt['label']),
'url': fmt['url'],
'ext': fmt['format'],
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader_id': uploader_id,
'age_limit': age_limit,
}
|
unlicense
|
jef-n/QGIS
|
tests/src/python/test_qgsmetadatawidget.py
|
30
|
12133
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMetadataWidget.
Run with: ctest -V -R QgsMetadataWidget
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '20/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsCoordinateReferenceSystem,
QgsAbstractMetadataBase,
QgsLayerMetadata,
QgsProjectMetadata,
QgsBox3d,
QgsDateTimeRange)
from qgis.gui import (QgsMetadataWidget)
from qgis.PyQt.QtCore import (QDate,
QTime,
QDateTime)
from qgis.testing import start_app, unittest
start_app()
class TestQgsMetadataWidget(unittest.TestCase):
def testLayerMode(self):
"""
Create a fully populated QgsLayerMetadata object, then set it to the widget and re-read back
the generated metadata to ensure that no content is lost.
"""
w = QgsMetadataWidget()
m = QgsLayerMetadata()
m.setIdentifier('1234')
m.setParentIdentifier('xyz')
m.setLanguage('en-CA')
m.setType('dataset')
m.setTitle('roads')
m.setAbstract('my roads')
m.setFees('None')
m.setConstraints([QgsLayerMetadata.Constraint('None', 'access')])
m.setRights(['Copyright foo 2017'])
m.setLicenses(['WTFPL'])
m.setHistory(['history a', 'history b'])
m.setKeywords({
'GEMET': ['kw1', 'kw2'],
'gmd:topicCategory': ['natural'],
})
# m.setEncoding('utf-8')
m.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:4326'))
e = QgsLayerMetadata.Extent()
se = QgsLayerMetadata.SpatialExtent()
se.extentCrs = QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:4326')
se.bounds = QgsBox3d(-180, -90, 0, 180, 90, 0)
e.setSpatialExtents([se])
dates = [
QgsDateTimeRange(
QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)),
QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
]
e.setTemporalExtents(dates)
m.setExtent(e)
c = QgsLayerMetadata.Contact()
c.name = 'John Smith'
c.organization = 'ACME'
c.position = 'staff'
c.voice = '1500 515 555'
c.fax = 'xx.xxx.xxx.xxxx'
c.email = '[email protected]'
c.role = 'pointOfContact'
address = QgsLayerMetadata.Address()
address.type = 'postal'
address.address = '123 Main Street'
address.city = 'anycity'
address.administrativeArea = 'anyprovince'
address.postalCode = '90210'
address.country = 'Canada'
c.addresses = [address]
m.setContacts([c])
l = QgsLayerMetadata.Link()
l.name = 'geonode:roads'
l.type = 'OGC:WMS'
l.description = 'my GeoNode road layer'
l.url = 'http://example.org/wms'
l2 = QgsLayerMetadata.Link()
l2.name = 'geonode:roads'
l2.type = 'OGC:WFS'
l2.description = 'my GeoNode road layer'
l2.url = 'http://example.org/wfs'
l3 = QgsLayerMetadata.Link()
l3.name = 'roads'
l3.type = 'WWW:LINK'
l3.description = 'full dataset download'
l3.url = 'http://example.org/roads.tgz'
l3.format = 'ESRI Shapefile'
l3.mimeType = 'application/gzip'
l3.size = '283676'
m.setLinks([l, l2, l3])
# set widget metadata
w.setMetadata(m)
self.assertEqual(w.mode(), QgsMetadataWidget.LayerMetadata)
m = w.metadata()
self.assertIsInstance(m, QgsLayerMetadata)
self.assertEqual(m.identifier(), '1234')
self.assertEqual(m.parentIdentifier(), 'xyz')
self.assertEqual(m.language(), 'en-CA')
self.assertEqual(m.type(), 'dataset')
self.assertEqual(m.title(), 'roads')
self.assertEqual(m.abstract(), 'my roads')
self.assertEqual(m.fees(), 'None')
self.assertEqual(m.constraints()[0].constraint, 'None')
self.assertEqual(m.constraints()[0].type, 'access')
self.assertEqual(m.rights(), ['Copyright foo 2017'])
self.assertEqual(m.licenses(), ['WTFPL'])
self.assertEqual(m.history(), ['history a', 'history b'])
# self.assertEqual(m.encoding(), 'utf-8')
self.assertEqual(
m.keywords(),
{'GEMET': ['kw1', 'kw2'], 'gmd:topicCategory': ['natural']})
self.assertEqual(m.crs().authid(), 'EPSG:4326')
extent = m.extent().spatialExtents()[0]
self.assertEqual(extent.extentCrs.authid(), 'EPSG:4326')
self.assertEqual(extent.bounds.xMinimum(), -180.0)
self.assertEqual(extent.bounds.yMinimum(), -90.0)
self.assertEqual(extent.bounds.xMaximum(), 180.0)
self.assertEqual(extent.bounds.yMaximum(), 90.0)
self.assertEqual(m.extent().temporalExtents()[0].begin(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
self.assertTrue(m.extent().temporalExtents()[0].isInstant())
self.assertEqual(m.contacts()[0].name, 'John Smith')
self.assertEqual(m.contacts()[0].organization, 'ACME')
self.assertEqual(m.contacts()[0].position, 'staff')
self.assertEqual(m.contacts()[0].voice, '1500 515 555')
self.assertEqual(m.contacts()[0].fax, 'xx.xxx.xxx.xxxx')
self.assertEqual(m.contacts()[0].email, '[email protected]')
self.assertEqual(m.contacts()[0].role, 'pointOfContact')
self.assertEqual(m.contacts()[0].addresses[0].type, 'postal')
self.assertEqual(m.contacts()[0].addresses[0].address, '123 Main Street')
self.assertEqual(m.contacts()[0].addresses[0].city, 'anycity')
self.assertEqual(m.contacts()[0].addresses[0].administrativeArea, 'anyprovince')
self.assertEqual(m.contacts()[0].addresses[0].postalCode, '90210')
self.assertEqual(m.contacts()[0].addresses[0].country, 'Canada')
self.assertEqual(m.links()[0].name, 'geonode:roads')
self.assertEqual(m.links()[0].type, 'OGC:WMS')
self.assertEqual(m.links()[0].description, 'my GeoNode road layer')
self.assertEqual(m.links()[0].url, 'http://example.org/wms')
self.assertEqual(m.links()[1].name, 'geonode:roads')
self.assertEqual(m.links()[1].type, 'OGC:WFS')
self.assertEqual(m.links()[1].description, 'my GeoNode road layer')
self.assertEqual(m.links()[1].url, 'http://example.org/wfs')
self.assertEqual(m.links()[2].name, 'roads')
self.assertEqual(m.links()[2].type, 'WWW:LINK')
self.assertEqual(m.links()[2].description, 'full dataset download')
self.assertEqual(m.links()[2].url, 'http://example.org/roads.tgz')
self.assertEqual(m.links()[2].format, 'ESRI Shapefile')
self.assertEqual(m.links()[2].mimeType, 'application/gzip')
self.assertEqual(m.links()[2].size, '283676')
def testProjectMode(self):
"""
Create a fully populated QgsProjectMetadata object, then set it to the widget and re-read back
the generated metadata to ensure that no content is lost.
"""
w = QgsMetadataWidget()
m = QgsProjectMetadata()
m.setIdentifier('1234')
m.setParentIdentifier('xyz')
m.setLanguage('en-CA')
m.setType('project')
m.setTitle('roads')
m.setAbstract('my roads')
m.setHistory(['history a', 'history b'])
m.setKeywords({
'GEMET': ['kw1', 'kw2'],
'gmd:topicCategory': ['natural'],
})
c = QgsAbstractMetadataBase.Contact()
c.name = 'John Smith'
c.organization = 'ACME'
c.position = 'staff'
c.voice = '1500 515 555'
c.fax = 'xx.xxx.xxx.xxxx'
c.email = '[email protected]'
c.role = 'pointOfContact'
address = QgsAbstractMetadataBase.Address()
address.type = 'postal'
address.address = '123 Main Street'
address.city = 'anycity'
address.administrativeArea = 'anyprovince'
address.postalCode = '90210'
address.country = 'Canada'
c.addresses = [address]
m.setContacts([c])
l = QgsAbstractMetadataBase.Link()
l.name = 'geonode:roads'
l.type = 'OGC:WMS'
l.description = 'my GeoNode road layer'
l.url = 'http://example.org/wms'
l2 = QgsAbstractMetadataBase.Link()
l2.name = 'geonode:roads'
l2.type = 'OGC:WFS'
l2.description = 'my GeoNode road layer'
l2.url = 'http://example.org/wfs'
l3 = QgsAbstractMetadataBase.Link()
l3.name = 'roads'
l3.type = 'WWW:LINK'
l3.description = 'full dataset download'
l3.url = 'http://example.org/roads.tgz'
l3.format = 'ESRI Shapefile'
l3.mimeType = 'application/gzip'
l3.size = '283676'
m.setLinks([l, l2, l3])
m.setAuthor('my author')
m.setCreationDateTime(QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
# set widget metadata
w.setMetadata(m)
self.assertEqual(w.mode(), QgsMetadataWidget.ProjectMetadata)
m = w.metadata()
self.assertIsInstance(m, QgsProjectMetadata)
self.assertEqual(m.identifier(), '1234')
self.assertEqual(m.parentIdentifier(), 'xyz')
self.assertEqual(m.language(), 'en-CA')
self.assertEqual(m.type(), 'project')
self.assertEqual(m.title(), 'roads')
self.assertEqual(m.abstract(), 'my roads')
self.assertEqual(m.history(), ['history a', 'history b'])
self.assertEqual(
m.keywords(),
{'GEMET': ['kw1', 'kw2'], 'gmd:topicCategory': ['natural']})
self.assertEqual(m.contacts()[0].name, 'John Smith')
self.assertEqual(m.contacts()[0].organization, 'ACME')
self.assertEqual(m.contacts()[0].position, 'staff')
self.assertEqual(m.contacts()[0].voice, '1500 515 555')
self.assertEqual(m.contacts()[0].fax, 'xx.xxx.xxx.xxxx')
self.assertEqual(m.contacts()[0].email, '[email protected]')
self.assertEqual(m.contacts()[0].role, 'pointOfContact')
self.assertEqual(m.contacts()[0].addresses[0].type, 'postal')
self.assertEqual(m.contacts()[0].addresses[0].address, '123 Main Street')
self.assertEqual(m.contacts()[0].addresses[0].city, 'anycity')
self.assertEqual(m.contacts()[0].addresses[0].administrativeArea, 'anyprovince')
self.assertEqual(m.contacts()[0].addresses[0].postalCode, '90210')
self.assertEqual(m.contacts()[0].addresses[0].country, 'Canada')
self.assertEqual(m.links()[0].name, 'geonode:roads')
self.assertEqual(m.links()[0].type, 'OGC:WMS')
self.assertEqual(m.links()[0].description, 'my GeoNode road layer')
self.assertEqual(m.links()[0].url, 'http://example.org/wms')
self.assertEqual(m.links()[1].name, 'geonode:roads')
self.assertEqual(m.links()[1].type, 'OGC:WFS')
self.assertEqual(m.links()[1].description, 'my GeoNode road layer')
self.assertEqual(m.links()[1].url, 'http://example.org/wfs')
self.assertEqual(m.links()[2].name, 'roads')
self.assertEqual(m.links()[2].type, 'WWW:LINK')
self.assertEqual(m.links()[2].description, 'full dataset download')
self.assertEqual(m.links()[2].url, 'http://example.org/roads.tgz')
self.assertEqual(m.links()[2].format, 'ESRI Shapefile')
self.assertEqual(m.links()[2].mimeType, 'application/gzip')
self.assertEqual(m.links()[2].size, '283676')
self.assertEqual(m.author(), 'my author')
self.assertEqual(m.creationDateTime(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
liampauling/betfair
|
betfairlightweight/resources/baseresource.py
|
2
|
1242
|
import functools
import datetime
from typing import Union, Optional
from ..compat import basestring, integer_types, json, parse_datetime
class BaseResource:
"""Lightweight data structure for resources."""
def __init__(self, **kwargs):
self.elapsed_time = kwargs.pop("elapsed_time", None)
now = datetime.datetime.utcnow()
self._datetime_created = now
self._datetime_updated = now
self._data = kwargs
def json(self) -> str:
return json.dumps(self._data)
@staticmethod
@functools.lru_cache()
def strip_datetime(value: Union[str, int]) -> Optional[datetime.datetime]:
"""
Converts value to datetime if string or int.
"""
if isinstance(value, basestring):
try:
return parse_datetime(value)
except ValueError:
return
elif isinstance(value, integer_types):
try:
return datetime.datetime.utcfromtimestamp(value / 1e3)
except (ValueError, OverflowError, OSError):
return
def __repr__(self) -> str:
return "<%s>" % self.__class__.__name__
def __str__(self) -> str:
return self.__class__.__name__
|
mit
|
MSchnei/py_pRF_motion
|
pyprf_feature/analysis/save_fit_tc_nii.py
|
1
|
12294
|
# -*- coding: utf-8 -*-
"""Saving empirical and fitted time courses to nii file format"""
# Part of pyprf_feature library
# Copyright (C) 2018 Marian Schneider, Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
import nibabel as nb
from pyprf_feature.analysis.load_config import load_config
from pyprf_feature.analysis.utils_general import (cls_set_config, export_nii,
load_res_prm)
from pyprf_feature.analysis.prepare import prep_func, prep_models
from pyprf_feature.analysis.model_creation_utils import (crt_mdl_prms,
fnd_unq_rws)
###### DEBUGGING ###############
#strCsvCnfg = "/media/sf_D_DRIVE/MotDepPrf/Analysis/S02/04_motDepPrf/pRF_results/Avg/S02_config_motDepPrf_cntr_smooth_avg.csv"
#lgcTest = False
#lstRat = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
#lgcMdlRsp = True
#strPathHrf = None
################################
def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
|
gpl-3.0
|
akirk/youtube-dl
|
youtube_dl/extractor/twitter.py
|
17
|
8604
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
xpath_text,
remove_end,
int_or_none,
ExtractorError,
sanitized_Request,
)
class TwitterCardIE(InfoExtractor):
IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
'md5': '4fa26a35f9d1bf4b646590ba8e84be19',
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
}
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 80.155,
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/',
'upload_date': '20111013',
'uploader': 'OMG! Ubuntu!',
'uploader_id': 'omgubuntu',
},
'add_ie': ['Youtube'],
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
'md5': 'ab2745d0b0ce53319a534fccaa986439',
'info_dict': {
'id': 'iBb2x00UVlv',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
'uploader': '@ArsenalTerje',
'title': 'Vine by @ArsenalTerje',
},
'add_ie': ['Vine'],
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
# Different formats served for different User-Agents
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', # mp4
'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0', # webm
]
config = None
formats = []
for user_agent in USER_AGENTS:
request = sanitized_Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(request, video_id)
iframe_url = self._html_search_regex(
r'<iframe[^>]+src="((?:https?:)?//(?:www.youtube.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"',
webpage, 'video iframe', default=None)
if iframe_url:
return self.url_result(iframe_url)
config = self._parse_json(self._html_search_regex(
r'data-player-config="([^"]+)"', webpage, 'data player config'),
video_id)
if 'playlist' not in config:
if 'vmapUrl' in config:
vmap_data = self._download_xml(config['vmapUrl'], video_id)
video_url = xpath_text(vmap_data, './/MediaFile').strip()
formats.append({
'url': video_url,
})
break # same video regardless of UA
continue
video_url = config['playlist'][0]['source']
f = {
'url': video_url,
}
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
thumbnail = config.get('posterImageUrl')
duration = float_or_none(config.get('duration'))
return {
'id': video_id,
'title': 'TwitterCard',
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class TwitterIE(InfoExtractor):
IE_NAME = 'twitter'
_VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)'
_TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'md5': 'db6612ec5d03355953c3ca9250c97e5e',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 12.922,
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
},
}, {
'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
'info_dict': {
'id': '657991469417025536',
'ext': 'mp4',
'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
'thumbnail': 're:^https?://.*\.png',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'md5': '39b7199856dee6cd4432e72c74bc69d4',
'info_dict': {
'id': '665052190608723968',
'ext': 'mp4',
'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.',
'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."',
'uploader_id': 'starwars',
'uploader': 'Star Wars',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user_id')
twid = mobj.group('id')
webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid)
username = remove_end(self._og_search_title(webpage), ' on Twitter')
title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”')
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
title = re.sub(r'\s+(https?://[^ ]+)', '', title)
info = {
'uploader_id': user_id,
'uploader': username,
'webpage_url': url,
'description': '%s on Twitter: "%s"' % (username, description),
'title': username + ' - ' + title,
}
card_id = self._search_regex(
r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url', default=None)
if card_id:
card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id
info.update({
'_type': 'url_transparent',
'ie_key': 'TwitterCard',
'url': card_url,
})
return info
mobj = re.search(r'''(?x)
<video[^>]+class="animated-gif"[^>]+
(?:data-height="(?P<height>\d+)")?[^>]+
(?:data-width="(?P<width>\d+)")?[^>]+
(?:poster="(?P<poster>[^"]+)")?[^>]*>\s*
<source[^>]+video-src="(?P<url>[^"]+)"
''', webpage)
if mobj:
info.update({
'id': twid,
'url': mobj.group('url'),
'height': int_or_none(mobj.group('height')),
'width': int_or_none(mobj.group('width')),
'thumbnail': mobj.group('poster'),
})
return info
raise ExtractorError('There\'s not video in this tweet.')
|
unlicense
|
google/timesketch
|
importer_client/python/timesketch_import_client/data/__init__.py
|
1
|
1973
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch data import configuration."""
from __future__ import unicode_literals
import codecs
import logging
import os
import yaml
logger = logging.getLogger('timesketch_importer.config_loader')
DEFAULT_FILE = 'formatter.yaml'
def load_config(file_path=''):
"""Loads YAML config and returns a list of dict with the results.
Args:
file_path (str): path to the YAML config file. This is optional
and if not defined the default formatter.yaml file will be
used that comes with the tool.
Returns:
dict: a dict with the key being a config file identifier and the value
being another dict with the configuration items.
"""
if not file_path:
base_path = os.path.dirname(__file__)
file_path = os.path.join(base_path, DEFAULT_FILE)
if not file_path.endswith('.yaml'):
logger.error('Can\'t load a config that is not a YAML file.')
return {}
if not os.path.isfile(file_path):
logger.error('File path does not exist, unable to load YAML config.')
return {}
with codecs.open(file_path, 'r') as fh:
try:
data = yaml.safe_load(fh)
return data
except (AttributeError, yaml.parser.ParserError) as e:
logger.error('Unable to parse YAML file, with error: %s', e)
return {}
return {}
|
apache-2.0
|
cocafe/i9070_Kernel_CoCore-E
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
petemounce/ansible-modules-core
|
database/postgresql/postgresql_user.py
|
89
|
25160
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_user
short_description: Adds or removes a users (roles) from a PostgreSQL database.
description:
- Add or remove PostgreSQL users (roles) from a remote host and, optionally,
grant the users access to an existing database or tables.
- The fundamental function of the module is to create, or delete, roles from
a PostgreSQL cluster. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password, before 1.4 this was required.
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not it is pre-encrypted."
required: false
default: null
db:
description:
- name of database where permissions will be granted
required: false
default: null
fail_on_user:
description:
- if C(yes), fail when user can't be removed. Otherwise just log and continue
required: false
default: 'yes'
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User (role) used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL
required: false
default: null
login_host:
description:
- Host running PostgreSQL.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
required: false
default: null
role_attr_flags:
description:
- "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER"
required: false
default: ""
choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
state:
description:
- The user (role) state
required: false
default: present
choices: [ "present", "absent" ]
encrypted:
description:
- whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the stored password is hashed when encrypted is set.
required: false
default: false
version_added: '1.4'
expires:
description:
- sets the user's password expiration.
required: false
default: null
version_added: '1.4'
no_password_changes:
description:
- if C(yes), don't inspect database for password changes. Effective when C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make password changes as necessary.
required: false
default: 'no'
choices: [ "yes", "no" ]
version_added: '2.0'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- If the passlib library is installed, then passwords that are encrypted
in the DB but not encrypted when passed as arguments can be checked for
changes. If the passlib library is not installed, unencrypted passwords
stored in the DB encrypted will be assumed to have changed.
- If you specify PUBLIC as the user, then the privilege changes will apply
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''
# Create django user and grant access to database and products table
- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL
# Create rails user, grant privilege to create other databases and demote rails from super user status
- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER
# Remove test user privileges from acme
- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no
# Remove test user from test database and the cluster
- postgresql_user: db=test name=test priv=ALL state=absent
# Example privileges string format
INSERT,UPDATE/table:SELECT/anothertable:ALL
# Remove an existing user's password
- postgresql_user: db=test user=test password=NULL
'''
import re
import itertools
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEUSER='rolcreateuser', CREATEDB='rolcreatedb',
INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication')
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None:
query.append("WITH %(crypt)s" % { "crypt": encrypted })
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data)
return True
def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if not no_password_changes and (password is not None or role_attr_flags != ''):
# Select password and all flag-like columns in order to verify changes.
query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
# Do we actually need to do anything?
pwchanging = False
if password is not None:
if encrypted:
if password.startswith('md5'):
if password != current_role_attrs['rolpassword']:
pwchanging = True
else:
try:
from passlib.hash import postgres_md5 as pm
if pm.encrypt(password, user) != current_role_attrs['rolpassword']:
pwchanging = True
except ImportError:
# Cannot check if passlib is not installed, so assume password is different
pwchanging = True
else:
if password != current_role_attrs['rolpassword']:
pwchanging = True
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
expires_changing = (expires is not None and expires == current_roles_attrs['rol_valid_until'])
if not pwchanging and not role_attr_flags_changing and not expires_changing:
return False
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if pwchanging:
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
try:
cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError, e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror)
return changed
else:
raise psycopg2.InternalError, e
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
for i in range(len(current_role_attrs)):
if current_role_attrs[i] != new_role_attrs[i]:
changed = True
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = '''SELECT privilege_type FROM information_schema.role_table_grants
WHERE grantee=%s AND table_name=%s AND table_schema=%s'''
cursor.execute(query, (user, table, schema))
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def get_database_privileges(cursor, user, db):
priv_map = {
'C':'CREATE',
'T':'TEMPORARY',
'c':'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl)
if r is None:
return set()
o = set()
for v in r.group(1):
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs =', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in privs[type_].iteritems():
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in privs[type_].iteritems():
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
"""
if ',' in role_attr_flags:
flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
elif role_attr_flags:
flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in new_privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in new_privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database':{},
'table':{}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
db=dict(default=''),
port=dict(default='5432'),
fail_on_user=dict(type='bool', default='yes'),
role_attr_flags=dict(default=''),
encrypted=dict(type='bool', default='no'),
no_password_changes=dict(type='bool', default='no'),
expires=dict(default=None)
),
supports_check_mode = True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
db = module.params["db"]
if db == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db)
port = module.params["port"]
no_password_changes = module.params["no_password_changes"]
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError, e:
module.fail_json(msg=str(e))
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes)
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError, e:
module.fail_json(msg=str(e))
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()
|
gpl-3.0
|
KellyChan/python-examples
|
python/tools/barcode_query.py
|
3
|
1044
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Kelly Chan'
__date__ = 'Oct 1 2014'
__version__ = '1.0.0'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import json
import urllib
import urllib2
import httplib
def ConnectDatabase(barcode):
url = 'http://setup.3533.com/ean/index?keyword=' + str(barcode)
connection = httplib.HTTPConnection("setup.3533.com")
connection.request(method='GET', url=url)
response = connection.getresponse().read()
info = json.loads(response)
return info
def PrinInfo(info):
print "code: " + info.get("ean","null")
print "name: " + info.get("name","null")
print "price: " + str(info.get("price","null"))
print "supplier: " + info.get("supplier","null")
print "factory: " + info.get("production","null")
print ""
if __name__ == '__main__':
info = ConnectDatabase(6939354800469)
PrinInfo(info)
info = ConnectDatabase(6917878002972)
PrinInfo(info)
info = ConnectDatabase(6925785604585)
PrinInfo(info)
|
mit
|
JulieWestfall/django-calaccess-raw-data
|
example/toolbox/management/commands/dumpuniquecontributors.py
|
32
|
3398
|
from __future__ import unicode_literals
from csvkit import CSVKitWriter
from calaccess_raw import models
from django.db import connection
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = 'Dump all of the unique campaign contributor names'
def handle(self, *args, **options):
self.cursor = connection.cursor()
sql = """
SELECT
title,
first_name,
last_name,
suffix,
occupation,
employer,
address1,
address2,
city,
state,
zipcode,
committee_id,
COUNT(*)
FROM (
SELECT
ctrib_namt as title,
ctrib_namf as first_name,
ctrib_naml as last_name,
ctrib_nams as suffix,
ctrib_occ as occupation,
ctrib_emp as employer,
ctrib_adr1 as address1,
ctrib_adr2 as address2,
ctrib_city as city,
ctrib_st as state,
ctrib_zip4 as zipcode,
cmte_id as committee_id
FROM %(rcpt)s
UNION ALL
SELECT
lndr_namt as title,
lndr_namf as first_name,
lndr_naml as last_name,
lndr_nams as suffix,
loan_occ as occupation,
loan_emp as employer,
loan_adr1 as address1,
loan_adr2 as address2,
loan_city as city,
loan_st as state,
loan_zip4 as zipcode,
cmte_id as committee_id
FROM %(loan)s
UNION ALL
SELECT
enty_namt as title,
enty_namf as first_name,
enty_naml as last_name,
enty_nams as suffix,
ctrib_occ as occupation,
ctrib_emp as employer,
'' as address1,
'' as address2,
enty_city as city,
enty_st as state,
enty_zip4 as zipcode,
cmte_id as committee_id
FROM %(s497)s
) as t
GROUP BY
title,
first_name,
last_name,
suffix,
occupation,
employer,
address1,
address2,
city,
state,
zipcode,
committee_id
ORDER BY
last_name,
first_name,
suffix,
title,
city,
state,
occupation,
employer
""" % dict(
rcpt=models.RcptCd._meta.db_table,
loan=models.LoanCd._meta.db_table,
s497=models.S497Cd._meta.db_table,
)
self.cursor.execute(sql)
writer = CSVKitWriter(open("./contributors.csv", 'wb'))
writer.writerow([
'title',
'first_name',
'last_name',
'suffix',
'occupation',
'employer',
'address1',
'address2',
'city',
'state',
'zipcode',
'committee_id',
'count'
])
writer.writerows(self.cursor.fetchall())
|
mit
|
beiko-lab/gengis
|
bin/Lib/distutils/tests/test_upload.py
|
10
|
3719
|
# -*- encoding: utf8 -*-
"""Tests for distutils.command.upload."""
import os
import unittest
from test.test_support import run_unittest
from distutils.command import upload as upload_mod
from distutils.command.upload import upload
from distutils.core import Distribution
from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase
PYPIRC_LONG_PASSWORD = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_NOPASSWORD = """\
[distutils]
index-servers =
server1
[server1]
username:me
"""
class FakeOpen(object):
def __init__(self, url):
self.url = url
if not isinstance(url, str):
self.req = url
else:
self.req = None
self.msg = 'OK'
def getcode(self):
return 200
class uploadTestCase(PyPIRCCommandTestCase):
def setUp(self):
super(uploadTestCase, self).setUp()
self.old_open = upload_mod.urlopen
upload_mod.urlopen = self._urlopen
self.last_open = None
def tearDown(self):
upload_mod.urlopen = self.old_open
super(uploadTestCase, self).tearDown()
def _urlopen(self, url):
self.last_open = FakeOpen(url)
return self.last_open
def test_finalize_options(self):
# new format
self.write_file(self.rc, PYPIRC)
dist = Distribution()
cmd = upload(dist)
cmd.finalize_options()
for attr, waited in (('username', 'me'), ('password', 'secret'),
('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi')):
self.assertEqual(getattr(cmd, attr), waited)
def test_saved_password(self):
# file with no password
self.write_file(self.rc, PYPIRC_NOPASSWORD)
# make sure it passes
dist = Distribution()
cmd = upload(dist)
cmd.finalize_options()
self.assertEqual(cmd.password, None)
# make sure we get it as well, if another command
# initialized it at the dist level
dist.password = 'xxx'
cmd = upload(dist)
cmd.finalize_options()
self.assertEqual(cmd.password, 'xxx')
def test_upload(self):
tmp = self.mkdtemp()
path = os.path.join(tmp, 'xxx')
self.write_file(path)
command, pyversion, filename = 'xxx', '2.6', path
dist_files = [(command, pyversion, filename)]
self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
# lets run it
pkg_dir, dist = self.create_dist(dist_files=dist_files, author=u'dédé')
cmd = upload(dist)
cmd.ensure_finalized()
cmd.run()
# what did we send ?
self.assertIn('dédé', self.last_open.req.data)
headers = dict(self.last_open.req.headers)
self.assertEqual(headers['Content-length'], '2085')
self.assertTrue(headers['Content-type'].startswith('multipart/form-data'))
self.assertEqual(self.last_open.req.get_method(), 'POST')
self.assertEqual(self.last_open.req.get_full_url(),
'http://pypi.python.org/pypi')
self.assertTrue('xxx' in self.last_open.req.data)
auth = self.last_open.req.headers['Authorization']
self.assertFalse('\n' in auth)
def test_suite():
return unittest.makeSuite(uploadTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
gpl-3.0
|
hzlf/openbroadcast
|
website/filer/tests/server_backends.py
|
25
|
4610
|
#-*- coding: utf-8 -*-
import time
import shutil
import os
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpResponseNotModified, Http404
from django.test import TestCase
from django.utils.http import http_date
from filer import settings as filer_settings
from filer.models import File
from filer.server.backends.default import DefaultServer
from filer.server.backends.nginx import NginxXAccelRedirectServer
from filer.server.backends.xsendfile import ApacheXSendfileServer
from filer.tests.helpers import create_image
from filer.tests.utils import Mock
class BaseServerBackendTestCase(TestCase):
def setUp(self):
original_filename = 'testimage.jpg'
file_obj = SimpleUploadedFile(
name=original_filename,
content=create_image().tostring(),
content_type='image/jpeg')
self.filer_file = File.objects.create(
is_public=False,
file=file_obj,
original_filename=original_filename)
def tearDown(self):
self.filer_file.delete()
class DefaultServerTestCase(BaseServerBackendTestCase):
def test_normal(self):
server = DefaultServer()
request = Mock()
request.META = {}
response = server.serve(request, self.filer_file.file)
self.assertTrue(response.has_header('Last-Modified'))
def test_not_modified(self):
server = DefaultServer()
request = Mock()
request.META = {'HTTP_IF_MODIFIED_SINCE': http_date(time.time())}
response = server.serve(request, self.filer_file.file)
self.assertTrue(isinstance(response, HttpResponseNotModified))
def test_missing_file(self):
server = DefaultServer()
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
self.assertRaises(Http404, server.serve, *(request, self.filer_file.file))
class NginxServerTestCase(BaseServerBackendTestCase):
def setUp(self):
super(NginxServerTestCase, self).setUp()
self.server = NginxXAccelRedirectServer(
location=filer_settings.FILER_PRIVATEMEDIA_STORAGE.location,
nginx_location='mylocation',
)
def test_normal(self):
request = Mock()
request.META = {}
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Accel-Redirect'))
self.assertTrue(headers['X-Accel-Redirect'].startswith(self.server.nginx_location))
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed)
def test_missing_file(self):
"""
this backend should not even notice if the file is missing.
"""
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Accel-Redirect'))
self.assertTrue(headers['X-Accel-Redirect'].startswith(self.server.nginx_location))
self.assertTrue(self.filer_file.file.closed)
class XSendfileServerTestCase(BaseServerBackendTestCase):
def setUp(self):
super(XSendfileServerTestCase, self).setUp()
self.server = ApacheXSendfileServer()
def test_normal(self):
request = Mock()
request.META = {}
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Sendfile'))
self.assertEqual(headers['X-Sendfile'], self.filer_file.file.path)
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed)
def test_missing_file(self):
"""
this backend should not even notice if the file is missing.
"""
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Sendfile'))
self.assertEqual(headers['X-Sendfile'], self.filer_file.file.path)
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed)
|
gpl-3.0
|
aequitas/home-assistant
|
homeassistant/components/eliqonline/sensor.py
|
6
|
2917
|
"""Monitors home energy use for the ELIQ Online service."""
from datetime import timedelta
import logging
import asyncio
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_ACCESS_TOKEN, CONF_NAME, POWER_WATT)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
CONF_CHANNEL_ID = 'channel_id'
DEFAULT_NAME = 'ELIQ Online'
ICON = 'mdi:gauge'
SCAN_INTERVAL = timedelta(seconds=60)
UNIT_OF_MEASUREMENT = POWER_WATT
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_CHANNEL_ID): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the ELIQ Online sensor."""
import eliqonline
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME, DEFAULT_NAME)
channel_id = config.get(CONF_CHANNEL_ID)
session = async_get_clientsession(hass)
api = eliqonline.API(session=session,
access_token=access_token)
try:
_LOGGER.debug("Probing for access to ELIQ Online API")
await api.get_data_now(channelid=channel_id)
except OSError as error:
_LOGGER.error("Could not access the ELIQ Online API: %s", error)
return False
async_add_entities([EliqSensor(api, channel_id, name)], True)
class EliqSensor(Entity):
"""Implementation of an ELIQ Online sensor."""
def __init__(self, api, channel_id, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self._api = api
self._channel_id = channel_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return icon."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return UNIT_OF_MEASUREMENT
@property
def state(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Get the latest data."""
try:
response = await self._api.get_data_now(channelid=self._channel_id)
self._state = int(response["power"])
_LOGGER.debug("Updated power from server %d W", self._state)
except KeyError:
_LOGGER.warning("Invalid response from ELIQ Online API")
except (OSError, asyncio.TimeoutError) as error:
_LOGGER.warning("Could not connect to the ELIQ Online API: %s",
error)
|
apache-2.0
|
kutenai/django
|
tests/lookup/tests.py
|
27
|
38241
|
from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import (
TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature,
)
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author.objects.create(name='Author 1')
self.au2 = Author.objects.create(name='Author 2')
# Create a couple of Articles.
self.a1 = Article.objects.create(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a2 = Article.objects.create(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3 = Article.objects.create(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a4 = Article.objects.create(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a5 = Article.objects.create(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a6 = Article.objects.create(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a7 = Article.objects.create(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
# Create a few Tags.
self.t1 = Tag.objects.create(name='Tag 1')
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag.objects.create(name='Tag 2')
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag.objects.create(name='Tag 3')
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(
Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
def identity(x):
return x
self.assertQuerysetEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(
Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity
)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity
)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity
)
self.assertQuerysetEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
with self.assertRaises(FieldError):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}],
transform=identity
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
def identity(x):
return x
self.assertQuerysetEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity
)
self.assertQuerysetEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertQuerysetEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>']
)
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>']
)
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>']
)
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
]
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual([article for article in Article.objects.none().iterator()], [])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(
Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, tag"
):
Article.objects.filter(pub_date_year='2005').count()
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted."
):
Article.objects.filter(headline__starts='Article')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.create(pub_date=now, headline='f')
Article.objects.create(pub_date=now, headline='fo')
Article.objects.create(pub_date=now, headline='foo')
Article.objects.create(pub_date=now, headline='fooo')
Article.objects.create(pub_date=now, headline='hey-Foo')
Article.objects.create(pub_date=now, headline='bar')
Article.objects.create(pub_date=now, headline='AbBa')
Article.objects.create(pub_date=now, headline='baz')
Article.objects.create(pub_date=now, headline='baxZ')
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
]
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>']
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>']
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
# and more articles:
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
]
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']
)
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
|
bsd-3-clause
|
fengxiaoiie/volatility
|
tools/mac/generate_profile_list.py
|
12
|
3305
|
# Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
'''
This helper script generates (kernel version, version address pairs)
to help generate the list used by mac_get_profiles
Run it from the Mac directory of the Volatility profiles repo
'''
import os, sys, re
import zipfile
def parse_dsymutil(data, module):
"""Parse the symbol file."""
sys_map = {}
sys_map[module] = {}
want_lower = ["_IdlePML4"]
type_map = {}
type_map[module] = {}
# get the system map
for line in data.splitlines():
ents = line.split()
match = re.search("\[.*?\(([^\)]+)\)\s+[0-9A-Fa-z]+\s+\d+\s+([0-9A-Fa-f]+)\s'(\w+)'", line)
if match:
(sym_type, addr, name) = match.groups()
sym_type = sym_type.strip()
addr = int(addr, 16)
if addr == 0 or name == "":
continue
if not name in sys_map[module]:
sys_map[module][name] = [(addr, sym_type)]
# every symbol is in the symbol table twice
# except for the entries in 'want_lower', we need the higher address for all
oldaddr = sys_map[module][name][0][0]
if addr < oldaddr and name in want_lower:
sys_map[module][name] = [(addr, sym_type)]
if not addr in type_map[module]:
type_map[module][addr] = (name, [sym_type])
type_map[module][addr][1].append(sym_type)
return sys_map["kernel"]
print "profiles = ["
for path in set("."):
for path, _, files in os.walk(path):
for fn in files:
if zipfile.is_zipfile(os.path.join(path, fn)):
profpkg = zipfile.ZipFile(os.path.join(path, fn))
for f in profpkg.filelist:
if 'symbol.dsymutil' in f.filename.lower():
data = parse_dsymutil(profpkg.read(f.filename), "kernel")
if "_lowGlo" in data:
lg = data["_lowGlo"][0][0]
else:
lg = "0"
if "_BootPML4" in data:
aslr = 1
else:
aslr = 0
name = fn.replace(".zip", "")
name = 'Mac' + name.replace('.', '_')
if name.find("Intel") == -1:
name = name + "x64"
else:
name = name + "x86"
print "[\"%s\", %s, %s, %d]," % (name, data["_version"][0][0], lg, aslr)
print "]"
|
gpl-2.0
|
rickerc/nova_audit
|
tools/xenserver/vdi_chain_cleanup.py
|
139
|
3678
|
#!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is designed to cleanup any VHDs (and their descendents) which have
a bad parent pointer.
The script needs to be run in the dom0 of the affected host.
The available actions are:
- print: display the filenames of the affected VHDs
- delete: remove the affected VHDs
- move: move the affected VHDs out of the SR into another directory
"""
import glob
import os
import subprocess
import sys
class ExecutionFailed(Exception):
def __init__(self, returncode, stdout, stderr, max_stream_length=32):
self.returncode = returncode
self.stdout = stdout[:max_stream_length]
self.stderr = stderr[:max_stream_length]
self.max_stream_length = max_stream_length
def __repr__(self):
return "<ExecutionFailed returncode=%s out='%s' stderr='%s'>" % (
self.returncode, self.stdout, self.stderr)
__str__ = __repr__
def execute(cmd, ok_exit_codes=None):
if ok_exit_codes is None:
ok_exit_codes = [0]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode not in ok_exit_codes:
raise ExecutionFailed(proc.returncode, stdout, stderr)
return proc.returncode, stdout, stderr
def usage():
print "usage: %s <SR PATH> <print|delete|move>" % sys.argv[0]
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
sr_path = sys.argv[1]
action = sys.argv[2]
if action not in ('print', 'delete', 'move'):
usage()
if action == 'move':
if len(sys.argv) < 4:
print "error: must specify where to move bad VHDs"
sys.exit(1)
bad_vhd_path = sys.argv[3]
if not os.path.exists(bad_vhd_path):
os.makedirs(bad_vhd_path)
bad_leaves = []
descendents = {}
for fname in glob.glob(os.path.join(sr_path, "*.vhd")):
(returncode, stdout, stderr) = execute(
['vhd-util', 'query', '-n', fname, '-p'], ok_exit_codes=[0, 22])
stdout = stdout.strip()
if stdout.endswith('.vhd'):
try:
descendents[stdout].append(fname)
except KeyError:
descendents[stdout] = [fname]
elif 'query failed' in stdout:
bad_leaves.append(fname)
def walk_vhds(root):
yield root
if root in descendents:
for child in descendents[root]:
for vhd in walk_vhds(child):
yield vhd
for bad_leaf in bad_leaves:
for bad_vhd in walk_vhds(bad_leaf):
print bad_vhd
if action == "print":
pass
elif action == "delete":
os.unlink(bad_vhd)
elif action == "move":
new_path = os.path.join(bad_vhd_path,
os.path.basename(bad_vhd))
os.rename(bad_vhd, new_path)
else:
raise Exception("invalid action %s" % action)
if __name__ == '__main__':
main()
|
apache-2.0
|
DarkEnergyScienceCollaboration/ReprocessingTaskForce
|
config/w_2017_30/processCcdConfig.py
|
28
|
3389
|
# Apply the brighter fatter correction
config.isr.doBrighterFatter=False
config.charImage.repair.cosmicray.nCrPixelMax=1000000
# Useful to get to avoid deblending of satellite tracks
config.calibrate.deblend.maxFootprintSize=2000 #2200
# Use psfex instead of pca
import lsst.meas.extensions.psfex.psfexPsfDeterminer
config.charImage.measurePsf.psfDeterminer.name='psfex'
# The following should be included for u filter in order to lower the source detection threshold
#config.charImage.detection.includeThresholdMultiplier=1.0
# Run CModel
import lsst.meas.modelfit
config.charImage.measurement.plugins.names |= ["modelfit_DoubleShapeletPsfApprox", "modelfit_CModel"]
# Run astrometry using the new htm reference catalog format
# The following retargets are necessary until the new scheme becomes standard
from lsst.meas.algorithms import LoadIndexedReferenceObjectsTask
config.calibrate.astromRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.calibrate.photoRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
# Use new astrometry fitter
from lsst.meas.astrom import FitSipDistortionTask
config.calibrate.astrometry.wcsFitter.retarget(FitSipDistortionTask)
config.calibrate.astrometry.wcsFitter.order = 3
config.calibrate.astrometry.matcher.maxMatchDistArcSec=5
# Select external catalogs for Astrometry and Photometry
config.calibrate.photoRefObjLoader.ref_dataset_name='sdss'
#config.calibrate.astromRefObjLoader.ref_dataset_name='gaia'
config.calibrate.astromRefObjLoader.ref_dataset_name='pan-starrs'
#config.calibrate.astromRefObjLoader.ref_dataset_name='sdss'
# Astrometry with panstarrs
config.calibrate.astromRefObjLoader.filterMap = {
'u':'g',
'g':'g',
'r':'r',
'i':'i',
'i2': 'i',
'z':'z',
'y':'y',
}
# Astrometry with gaia
#config.calibrate.astromRefObjLoader.filterMap = {
# 'u':'phot_g_mean_mag',
# 'g':'phot_g_mean_mag',
# 'r':'phot_g_mean_mag',
# 'i':'phot_g_mean_mag',
# 'z':'phot_g_mean_mag',
# 'y':'phot_g_mean_mag',
#}
# Photometry with sdss
config.calibrate.photoRefObjLoader.filterMap = {
'u': 'U',
'g': 'G',
'r': 'R',
'i': 'I',
'i2': 'I',
'z': 'Z',
'y': 'Z',
}
#Astrometry with sdss
#config.calibrate.astromRefObjLoader.filterMap = {
# 'u': 'U',
# 'g': 'G',
# 'r': 'R',
# 'i': 'I',
# 'z': 'Z',
# 'y': 'Z',
#}
import lsst.pipe.tasks.colorterms
config.calibrate.photoCal.colorterms.data['e2v'].data['i2']=lsst.pipe.tasks.colorterms.Colorterm()
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c2=0.0
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c1=0.003
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c0=0.0
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].primary='i'
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].secondary='r'
# use Chebyshev background estimation
config.charImage.background.useApprox=True
config.charImage.detection.background.binSize=128
config.charImage.detection.background.useApprox=True
config.charImage.background.binSize = 128
config.charImage.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
config.charImage.detection.background.binSize = 128
config.charImage.detection.background.undersampleStyle='REDUCE_INTERP_ORDER'
config.charImage.detection.background.binSize = 128
config.charImage.detection.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
|
gpl-2.0
|
shreyans800755/coala
|
coalib/bearlib/aspects/base.py
|
15
|
6123
|
import functools
import re
from coalib.bearlib.languages import Language
import coalib.bearlib.aspects
from .taste import TasteError
def get_subaspect(parent, subaspect):
"""
Get a subaspect from an aspectclass or aspectclass instance.
>>> import coalib.bearlib.aspects as coala_aspects
>>> metadata = coala_aspects['Metadata']
>>> commit_msg = coala_aspects['CommitMessage']
>>> shortlog = coala_aspects['Shortlog']
We can get direct children.
>>> get_subaspect(metadata, commit_msg)
<aspectclass 'Root.Metadata.CommitMessage'>
Or even a grandchildren.
>>> get_subaspect(metadata, shortlog)
<aspectclass 'Root.Metadata.CommitMessage.Shortlog'>
Or with string of aspect name
>>> get_subaspect(metadata, 'shortlog')
<aspectclass 'Root.Metadata.CommitMessage.Shortlog'>
We can also get child instance of an aspect instance.
>>> get_subaspect(metadata('Python'), commit_msg)
<...CommitMessage object at 0x...>
But, passing subaspect instance as argument is prohibited, because
it doesn't really make sense.
>>> get_subaspect(metadata('Python'), commit_msg('Java'))
Traceback (most recent call last):
...
AttributeError: Cannot search an aspect instance using another ...
:param parent: The parent aspect that should be searched.
:param subaspect: An subaspect that we want to find in an
aspectclass.
:return: An aspectclass. Return None if not found.
"""
# Avoid circular import
from .meta import isaspect, issubaspect
if not isaspect(subaspect):
subaspect = coalib.bearlib.aspects[subaspect]
if not issubaspect(subaspect, parent):
return None
if isinstance(subaspect, aspectbase):
raise AttributeError('Cannot search an aspect instance using '
'another aspect instance as argument.')
parent_qualname = (type(parent).__qualname__ if isinstance(
parent, aspectbase) else parent.__qualname__)
if parent_qualname == subaspect.__qualname__:
return parent
# Trim common parent name
aspect_path = re.sub(r'^%s\.' % parent_qualname, '',
subaspect.__qualname__)
aspect_path = aspect_path.split('.')
child = parent
# Traverse through children until we got our subaspect
for path in aspect_path:
child = child.subaspects[path]
return child
def _get_leaf_aspects(aspect):
"""
Explode an aspect into list of its leaf aspects.
:param aspect: An aspect class or instance.
:return: List of leaf aspects.
"""
# Avoid circular import
from .collections import AspectList
leaf_aspects = AspectList()
def search_leaf(aspects):
for aspect in aspects:
if not aspect.subaspects:
nonlocal leaf_aspects
leaf_aspects.append(aspect)
else:
search_leaf(aspect.subaspects.values())
search_leaf([aspect])
return leaf_aspects
class SubaspectGetter:
"""
Special "getter" class to implement ``get()`` method in aspectbase that
could be accessed from the aspectclass or aspectclass instance.
"""
def __get__(self, obj, owner):
parent = obj if obj is not None else owner
return functools.partial(get_subaspect, parent)
class LeafAspectGetter:
"""
Descriptor class for ``get_leaf_aspects()`` method in aspectbase.
This class is required to make the ``get_leaf_aspects()`` accessible from
both aspectclass and aspectclass instance.
"""
def __get__(self, obj, owner):
parent = obj if obj is not None else owner
return functools.partial(_get_leaf_aspects, parent)
class aspectbase:
"""
Base class for aspectclasses with common features for their instances.
Derived classes must use
:class:`coalib.bearlib.aspects.meta.aspectclass` as metaclass.
This is automatically handled by
:meth:`coalib.bearlib.aspects.meta.aspectclass.subaspect` decorator.
"""
get = SubaspectGetter()
get_leaf_aspects = LeafAspectGetter()
def __init__(self, language, **taste_values):
"""
Instantiate an aspectclass with specific `taste_values`,
including parent tastes.
Given tastes must be available for the given `language`,
which must be a language identifier supported by
:class:`coalib.bearlib.languages.Language`.
All taste values will be casted to the related taste cast types.
Non-given available tastes will get their default values.
"""
# bypass self.__setattr__
self.__dict__['language'] = Language[language]
for name, taste in type(self).tastes.items():
if taste.languages and language not in taste.languages:
if name in taste_values:
raise TasteError('%s.%s is not available for %s.' % (
type(self).__qualname__, name, language))
else:
setattr(self, name, taste_values.get(name, taste.default))
# Recursively instance its subaspects too
instanced_child = {}
for name, child in self.subaspects.items():
instanced_child[name] = child(language, **taste_values)
self.__dict__['subaspects'] = instanced_child
def __eq__(self, other):
return type(self) is type(other) and self.tastes == other.tastes
@property
def tastes(self):
"""
Get a dictionary of all taste names mapped to their specific values,
including parent tastes.
"""
return {name: self.__dict__[name] for name in type(self).tastes
if name in self.__dict__}
def __setattr__(self, name, value):
"""
Don't allow attribute manipulations after instantiation of
aspectclasses.
"""
if name not in type(self).tastes:
raise AttributeError(
"can't set attributes of aspectclass instances")
super().__setattr__(name, value)
|
agpl-3.0
|
ianatpn/nupictest
|
nupic/support/lockattributes.py
|
15
|
6792
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
# This is the environment variable that controls the lock attributes
# enforcement.
#
# The lock attributes machinery is engaged by default. To deactivate it
# define this environment variabe. The benefit is that there will be no runtime
# overhead (Except for a one-time check when classes that derive from
# LockAttributesMixin are defined or methods decorated with
# _canAddAttributes are defined).
deactivation_key = 'NTA_DONT_USE_LOCK_ATTRIBUTES'
def _allow_new_attributes(f):
"""A decorator that maintains the attribute lock state of an object
It coperates with the LockAttributesMetaclass (see bellow) that replaces
the __setattr__ method with a custom one that checks the _canAddAttributes
counter and allows setting new attributes only if _canAddAttributes > 0.
New attributes can be set only from methods decorated
with this decorator (should be only __init__ and __setstate__ normally)
The decorator is reentrant (e.g. if from inside a decorated function another
decorated function is invoked). Before invoking the target function it
increments the counter (or sets it to 1). After invoking the target function
it decrements the counter and if it's 0 it removed the counter.
"""
def decorated(self, *args, **kw):
"""The decorated function that replaces __init__() or __setstate__()
"""
# Run the original function
if not hasattr(self, '_canAddAttributes'):
self.__dict__['_canAddAttributes'] = 1
else:
self._canAddAttributes += 1
assert self._canAddAttributes >= 1
# Save add attribute counter
count = self._canAddAttributes
f(self, *args, **kw)
# Restore _CanAddAttributes if deleted from dict (can happen in __setstte__)
if hasattr(self, '_canAddAttributes'):
self._canAddAttributes -= 1
else:
self._canAddAttributes = count - 1
assert self._canAddAttributes >= 0
if self._canAddAttributes == 0:
del self._canAddAttributes
decorated.__doc__ = f.__doc__
decorated.__name__ = f.__name__
return decorated
def _simple_init(self, *args, **kw):
"""trivial init method that just calls base class's __init__()
This method is attached to classes that don't define __init__(). It is needed
because LockAttributesMetaclass must decorate the __init__() method of
its target class.
"""
type(self).__base__.__init__(self, *args, **kw)
class LockAttributesMetaclass(type):
"""This metaclass makes objects attribute-locked by decorating their
__init__() and __setstate__() methods with the _allow_new_attributes
decorator.
It doesn't do anything unless the environment variable
'NTA_USE_LOCK_ATTRIBUTES' is defined.
That allows for verifying proper usage during testing and skipping
it in production code (that was verified during testing) to avoid the cost
of verifying every attribute setting.
It also replaces the __setattr__ magic method with a custom one that verifies
new attributes are set only in code that originates from a decorated method
(normally __init__() or __setstate__()).
If the target class has no __init__() method it adds a trivial __init__()
method to provide a hook for the decorator (the _simple_init()
function defined above)
"""
def __init__(cls, name, bases, dict):
"""
"""
def custom_setattr(self, name, value):
"""A custom replacement for __setattr__
Allows setting only exisitng attributes. It is designed to work
with the _allow_new_attributes decorator.
It works is by checking if the requested attribute is already in the
__dict__ or if the _canAddAttributes counter > 0. Otherwise it raises an
exception.
If all is well it calls the original __setattr__. This means it can work
also with classes that already have custom __setattr__
"""
if (name == '_canAddAttributes' or
(hasattr(self, '_canAddAttributes') and self._canAddAttributes > 0) or
hasattr(self, name)):
return self._original_setattr(name, value)
else:
#from dbgp.client import brk; brk(port=9029)
raise Exception('Attempting to set a new attribute: ' + name)
# Bail out if not active. Zero overhead other than this one-time check
# at class definition time
if deactivation_key in os.environ:
return
# Initialize the super-class
super(LockAttributesMetaclass, cls).__init__(name, bases, dict)
# Store and replace the __setattr__ with the custom one (if needed)
if not hasattr(cls, '_original_setattr'):
cls._original_setattr = cls.__setattr__
cls.__setattr__ = custom_setattr
# Keep the original __init__ if exists. This was needed for NuPIC 1. Remove?
if '__init__' in dict:
setattr(cls, '_original_init', dict['__init__'])
# Get the __init__ and __setstate__ form the target class's dict
# If there is no __init__ use _simple_init (it's Ok if there is no
#__setstate__)
methods = [('__init__', dict.get('__init__', _simple_init)),
('__setstate__', dict.get('__setstate__', None))]
# Wrap the methods with _allow_new_attributes decorator
for name, method in methods:
if method is not None:
setattr(cls, name, _allow_new_attributes(method))
class LockAttributesMixin(object):
"""This class serves as a base (or mixin) for classes that want to enforce
the locked attributes pattern (all attributes should be defined in __init__()
or __setstate__().
All the target class has to do add LockAttributesMixin as one of its bases
(inherit from it).
The metaclass will be activated when the application class is created
and the lock attributes machinery will be injected (unless the
deactivation_key is defined in the environment)
"""
__metaclass__ = LockAttributesMetaclass
|
gpl-3.0
|
zhongpei/shadowsocks
|
shadowsocks/manager.py
|
925
|
9692
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
apache-2.0
|
xodus7/tensorflow
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
8
|
10064
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
state_ops.assign(var, [1.0]).eval()
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
|
apache-2.0
|
atsolakid/edx-platform
|
lms/djangoapps/certificates/management/commands/fix_ungraded_certs.py
|
110
|
1538
|
from certificates.models import GeneratedCertificate
from courseware import grades, courses
from django.test.client import RequestFactory
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
help = """
Find all students that need to be graded
and grade them.
"""
option_list = BaseCommand.option_list + (
make_option(
'-n',
'--noop',
action='store_true',
dest='noop',
default=False,
help="Print but do not update the GeneratedCertificate table"
),
make_option(
'-c',
'--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='Grade ungraded users for this course'
),
)
def handle(self, *args, **options):
course_id = options['course']
print "Fetching ungraded students for {0}".format(course_id)
ungraded = GeneratedCertificate.objects.filter(
course_id__exact=course_id).filter(grade__exact='')
course = courses.get_course_by_id(course_id)
factory = RequestFactory()
request = factory.get('/')
for cert in ungraded:
# grade the student
grade = grades.grade(cert.user, request, course)
print "grading {0} - {1}".format(cert.user, grade['percent'])
cert.grade = grade['percent']
if not options['noop']:
cert.save()
|
agpl-3.0
|
souravsingh/sympy
|
sympy/sets/tests/test_sets.py
|
6
|
39180
|
from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan,
GreaterThan, LessThan, Max, Min, And, Or, Eq, Ge, Le, Gt, Lt, Float,
FiniteSet, Intersection, imageset, I, true, false, ProductSet, E,
sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi,
Eq, Pow, Contains, Sum, rootof, SymmetricDifference, Piecewise,
Matrix, signsimp, Range)
from mpmath import mpi
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, y, z, m, n
def test_imageset():
ints = S.Integers
raises(TypeError, lambda: imageset(x, ints))
raises(ValueError, lambda: imageset(x, y, z, ints))
raises(ValueError, lambda: imageset(Lambda(x, cos(x)), y))
assert imageset(cos, ints) == ImageSet(Lambda(x, cos(x)), ints)
def f(x):
return cos(x)
raises(TypeError, lambda: imageset(f, ints))
f = lambda x: cos(x)
assert imageset(f, ints) == ImageSet(Lambda(x, cos(x)), ints)
assert imageset(x, 1, ints) == FiniteSet(1)
assert imageset(x, y, ints) == FiniteSet(y)
assert (str(imageset(lambda y: x + y, Interval(-2, 1)).lamda.expr)
in ('_x + x', 'x + _x'))
def test_interval_arguments():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(0, oo).right_open is true
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(-oo, 0).left_open is true
assert Interval(oo, -oo) == S.EmptySet
assert isinstance(Interval(1, 1), FiniteSet)
e = Sum(x, (x, 1, 3))
assert isinstance(Interval(e, e), FiniteSet)
assert Interval(1, 0) == S.EmptySet
assert Interval(1, 1).measure == 0
assert Interval(1, 1, False, True) == S.EmptySet
assert Interval(1, 1, True, False) == S.EmptySet
assert Interval(1, 1, True, True) == S.EmptySet
assert isinstance(Interval(0, Symbol('a')), Interval)
assert Interval(Symbol('a', real=True, positive=True), 0) == S.EmptySet
raises(ValueError, lambda: Interval(0, S.ImaginaryUnit))
raises(ValueError, lambda: Interval(0, Symbol('z', real=False)))
raises(NotImplementedError, lambda: Interval(0, 1, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y)))
def test_interval_symbolic_end_points():
a = Symbol('a', real=True)
assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3)
assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a)
assert Interval(0, a).contains(1) == LessThan(1, a)
def test_union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2)
assert Union(S.EmptySet) == S.EmptySet
assert Union(Interval(0, 1), [FiniteSet(1.0/n) for n in range(1, 10)]) == \
Interval(0, 1)
assert Interval(1, 2).union(Interval(2, 3)) == \
Interval(1, 2) + Interval(2, 3)
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
assert Union(Set()) == Set()
assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3)
assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs')
assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3)
assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3)
assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4)
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \
FiniteSet(x, FiniteSet(y, z))
# Test that Intervals and FiniteSets play nicely
assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3)
assert Interval(1, 3, True, True) + FiniteSet(3) == \
Interval(1, 3, True, False)
X = Interval(1, 3) + FiniteSet(5)
Y = Interval(1, 2) + FiniteSet(3)
XandY = X.intersect(Y)
assert 2 in X and 3 in X and 3 in XandY
assert XandY.is_subset(X) and XandY.is_subset(Y)
raises(TypeError, lambda: Union(1, 2, 3))
assert X.is_iterable is False
# issue 7843
assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \
FiniteSet(-sqrt(-I), sqrt(-I))
assert Union(S.Reals, S.Integers) == S.Reals
def test_union_iter():
# Use Range because it is ordered
u = Union(Range(3), Range(5), Range(3), evaluate=False)
# Round robin
assert list(u) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4]
def test_difference():
assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True)
assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True)
assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True)
assert Interval(1, 3, True) - Interval(2, 3, True) == \
Interval(1, 2, True, False)
assert Interval(0, 2) - FiniteSet(1) == \
Union(Interval(0, 1, False, True), Interval(1, 2, True, False))
assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3)
assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \
FiniteSet(1, 2)
assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4)
assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert -1 in S.Reals - S.Naturals
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False)
assert -1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert Complement(S.Integers, S.UniversalSet) == EmptySet()
assert S.UniversalSet.complement(S.Integers) == EmptySet()
assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0)))
assert S.EmptySet - S.Integers == S.EmptySet
assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1)
assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \
Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi))
def test_complement():
assert Interval(0, 1).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True))
assert Interval(0, 1, True, False).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True))
assert Interval(0, 1, False, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True))
assert Interval(0, 1, True, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True))
assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet
assert S.UniversalSet.complement(S.Reals) == S.EmptySet
assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet
assert S.EmptySet.complement(S.Reals) == S.Reals
assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True),
Interval(3, oo, True, True))
assert FiniteSet(0).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True))
assert (FiniteSet(5) + Interval(S.NegativeInfinity,
0)).complement(S.Reals) == \
Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True)
assert FiniteSet(1, 2, 3).complement(S.Reals) == \
Interval(S.NegativeInfinity, 1, True, True) + \
Interval(1, 2, True, True) + Interval(2, 3, True, True) +\
Interval(3, S.Infinity, True, True)
assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x))
assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) +
Interval(0, oo, True, True)
,FiniteSet(x), evaluate=False)
square = Interval(0, 1) * Interval(0, 1)
notsquare = square.complement(S.Reals*S.Reals)
assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(
pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)])
assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)])
def test_intersect():
x = Symbol('x')
assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2)
assert Interval(0, 2).intersect(Interval(1, 2, True)) == \
Interval(1, 2, True)
assert Interval(0, 2, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, False)
assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, True)
assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \
Union(Interval(0, 1), Interval(2, 2))
assert FiniteSet(1, 2)._intersect((1, 2, 3)) == FiniteSet(1, 2)
assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x)
assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \
FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet
assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \
Union(Interval(0, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \
S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \
S.EmptySet
assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \
Union(FiniteSet(2, 3, 4, 5), Intersection(FiniteSet(6), Union(Interval(0, 5), FiniteSet('ham'))))
# issue 8217
assert Intersection(FiniteSet(x), FiniteSet(y)) == \
Intersection(FiniteSet(x), FiniteSet(y), evaluate=False)
assert FiniteSet(x).intersect(S.Reals) == \
Intersection(S.Reals, FiniteSet(x), evaluate=False)
# tests for the intersection alias
assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
def test_intersection():
# iterable
i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False)
assert i.is_iterable
assert set(i) == {S(2), S(3)}
# challenging intervals
x = Symbol('x', real=True)
i = Intersection(Interval(0, 3), Interval(x, 6))
assert (5 in i) is False
raises(TypeError, lambda: 2 in i)
# Singleton special cases
assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet
assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x)
# Products
line = Interval(0, 5)
i = Intersection(line**2, line**3, evaluate=False)
assert (2, 2) not in i
assert (2, 2, 2) not in i
raises(ValueError, lambda: list(i))
assert Intersection(Intersection(S.Integers, S.Naturals, evaluate=False),
S.Reals, evaluate=False) == \
Intersection(S.Integers, S.Naturals, S.Reals, evaluate=False)
assert Intersection(S.Complexes, FiniteSet(S.ComplexInfinity)) == S.EmptySet
def test_issue_9623():
n = Symbol('n')
a = S.Reals
b = Interval(0, oo)
c = FiniteSet(n)
assert Intersection(a, b, c) == Intersection(b, c)
assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet()
def test_is_disjoint():
assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False
assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True
def test_ProductSet_of_single_arg_is_arg():
assert ProductSet(Interval(0, 1)) == Interval(0, 1)
def test_interval_subs():
a = Symbol('a', real=True)
assert Interval(0, a).subs(a, 2) == Interval(0, 2)
assert Interval(a, 0).subs(a, 2) == S.EmptySet
def test_interval_to_mpi():
assert Interval(0, 1).to_mpi() == mpi(0, 1)
assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1)
assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1))
def test_measure():
a = Symbol('a', real=True)
assert Interval(1, 3).measure == 2
assert Interval(0, a).measure == a
assert Interval(1, a).measure == a - 1
assert Union(Interval(1, 2), Interval(3, 4)).measure == 2
assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \
== 2
assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0
assert S.EmptySet.measure == 0
square = Interval(0, 10) * Interval(0, 10)
offsetsquare = Interval(5, 15) * Interval(5, 15)
band = Interval(-oo, oo) * Interval(2, 4)
assert square.measure == offsetsquare.measure == 100
assert (square + offsetsquare).measure == 175 # there is some overlap
assert (square - offsetsquare).measure == 75
assert (square * FiniteSet(1, 2, 3)).measure == 0
assert (square.intersect(band)).measure == 20
assert (square + band).measure == oo
assert (band * FiniteSet(1, 2, 3)).measure == nan
def test_is_subset():
assert Interval(0, 1).is_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_subset(Interval(0, 2)) is False
assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4))
assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False
assert FiniteSet(1).is_subset(Interval(0, 2))
assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False
assert (Interval(1, 2) + FiniteSet(3)).is_subset(
(Interval(0, 2, False, True) + FiniteSet(2, 3)))
assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True
assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False
assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True
assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True
assert Interval(0, 1).is_subset(S.EmptySet) is False
assert S.EmptySet.is_subset(S.EmptySet) is True
raises(ValueError, lambda: S.EmptySet.is_subset(1))
# tests for the issubset alias
assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True
assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True
def test_is_proper_subset():
assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False
assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0))
def test_is_superset():
assert Interval(0, 1).is_superset(Interval(0, 2)) == False
assert Interval(0, 3).is_superset(Interval(0, 2))
assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(1).is_superset(Interval(0, 2)) == False
assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False
assert (Interval(1, 2) + FiniteSet(3)).is_superset(
(Interval(0, 2, False, True) + FiniteSet(2, 3))) == False
assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False
assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False
assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False
assert Interval(0, 1).is_superset(S.EmptySet) == True
assert S.EmptySet.is_superset(S.EmptySet) == True
raises(ValueError, lambda: S.EmptySet.is_superset(1))
# tests for the issuperset alias
assert Interval(0, 1).issuperset(S.EmptySet) == True
assert S.EmptySet.issuperset(S.EmptySet) == True
def test_is_proper_superset():
assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False
assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True
assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0))
def test_contains():
assert Interval(0, 2).contains(1) is S.true
assert Interval(0, 2).contains(3) is S.false
assert Interval(0, 2, True, False).contains(0) is S.false
assert Interval(0, 2, True, False).contains(2) is S.true
assert Interval(0, 2, False, True).contains(0) is S.true
assert Interval(0, 2, False, True).contains(2) is S.false
assert Interval(0, 2, True, True).contains(0) is S.false
assert Interval(0, 2, True, True).contains(2) is S.false
assert (Interval(0, 2) in Interval(0, 2)) is False
assert FiniteSet(1, 2, 3).contains(2) is S.true
assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true
# issue 8197
from sympy.abc import a, b
assert isinstance(FiniteSet(b).contains(-a), Contains)
assert isinstance(FiniteSet(b).contains(a), Contains)
assert isinstance(FiniteSet(a).contains(1), Contains)
raises(TypeError, lambda: 1 in FiniteSet(a))
# issue 8209
rad1 = Pow(Pow(2, S(1)/3) - 1, S(1)/3)
rad2 = Pow(S(1)/9, S(1)/3) - Pow(S(2)/9, S(1)/3) + Pow(S(4)/9, S(1)/3)
s1 = FiniteSet(rad1)
s2 = FiniteSet(rad2)
assert s1 - s2 == S.EmptySet
items = [1, 2, S.Infinity, S('ham'), -1.1]
fset = FiniteSet(*items)
assert all(item in fset for item in items)
assert all(fset.contains(item) is S.true for item in items)
assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true
assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false
assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false
assert S.EmptySet.contains(1) is S.false
assert FiniteSet(rootof(x**3 + x - 1, 0)).contains(S.Infinity) is S.false
assert rootof(x**5 + x**3 + 1, 0) in S.Reals
assert not rootof(x**5 + x**3 + 1, 1) in S.Reals
# non-bool results
assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \
Or(And(x <= 2, x >= 1), And(x <= 4, x >= 3))
assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \
And(y <= 3, y <= x, y >= 1, y >= 2)
assert (S.Complexes).contains(S.ComplexInfinity) == S.false
def test_interval_symbolic():
x = Symbol('x')
e = Interval(0, 1)
assert e.contains(x) == And(0 <= x, x <= 1)
raises(TypeError, lambda: x in e)
e = Interval(0, 1, True, True)
assert e.contains(x) == And(0 < x, x < 1)
def test_union_contains():
x = Symbol('x')
i1 = Interval(0, 1)
i2 = Interval(2, 3)
i3 = Union(i1, i2)
raises(TypeError, lambda: x in i3)
e = i3.contains(x)
assert e == Or(And(0 <= x, x <= 1), And(2 <= x, x <= 3))
assert e.subs(x, -0.5) is false
assert e.subs(x, 0.5) is true
assert e.subs(x, 1.5) is false
assert e.subs(x, 2.5) is true
assert e.subs(x, 3.5) is false
U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6)
assert all(el not in U for el in [0, 4, -oo])
assert all(el in U for el in [2, 5, 10])
def test_is_number():
assert Interval(0, 1).is_number is False
assert Set().is_number is False
def test_Interval_is_left_unbounded():
assert Interval(3, 4).is_left_unbounded is False
assert Interval(-oo, 3).is_left_unbounded is True
assert Interval(Float("-inf"), 3).is_left_unbounded is True
def test_Interval_is_right_unbounded():
assert Interval(3, 4).is_right_unbounded is False
assert Interval(3, oo).is_right_unbounded is True
assert Interval(3, Float("+inf")).is_right_unbounded is True
def test_Interval_as_relational():
x = Symbol('x')
assert Interval(-1, 2, False, False).as_relational(x) == \
And(Le(-1, x), Le(x, 2))
assert Interval(-1, 2, True, False).as_relational(x) == \
And(Lt(-1, x), Le(x, 2))
assert Interval(-1, 2, False, True).as_relational(x) == \
And(Le(-1, x), Lt(x, 2))
assert Interval(-1, 2, True, True).as_relational(x) == \
And(Lt(-1, x), Lt(x, 2))
assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2))
assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2))
assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo))
assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo))
assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo))
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert Interval(x, y).as_relational(x) == (x <= y)
assert Interval(y, x).as_relational(x) == (y <= x)
def test_Finite_as_relational():
x = Symbol('x')
y = Symbol('y')
assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2))
assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5))
def test_Union_as_relational():
x = Symbol('x')
assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \
Or(And(Le(0, x), Le(x, 1)), Eq(x, 2))
assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \
And(Lt(0, x), Le(x, 1))
def test_Intersection_as_relational():
x = Symbol('x')
assert (Intersection(Interval(0, 1), FiniteSet(2),
evaluate=False).as_relational(x)
== And(And(Le(0, x), Le(x, 1)), Eq(x, 2)))
def test_EmptySet():
assert S.EmptySet.as_relational(Symbol('x')) is S.false
assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet
assert S.EmptySet.boundary == S.EmptySet
def test_finite_basic():
x = Symbol('x')
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersect(B)
assert A.is_subset(AorB) and B.is_subset(AorB)
assert AandB.is_subset(A)
assert AandB == FiniteSet(3)
assert A.inf == 1 and A.sup == 3
assert AorB.inf == 1 and AorB.sup == 5
assert FiniteSet(x, 1, 5).sup == Max(x, 5)
assert FiniteSet(x, 1, 5).inf == Min(x, 1)
# issue 7335
assert FiniteSet(S.EmptySet) != S.EmptySet
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3)
# Ensure a variety of types can exist in a FiniteSet
s = FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval)
assert (A > B) is False
assert (A >= B) is False
assert (A < B) is False
assert (A <= B) is False
assert AorB > A and AorB > B
assert AorB >= A and AorB >= B
assert A >= A and A <= A
assert A >= AandB and B >= AandB
assert A > AandB and B > AandB
def test_powerset():
# EmptySet
A = FiniteSet()
pset = A.powerset()
assert len(pset) == 1
assert pset == FiniteSet(S.EmptySet)
# FiniteSets
A = FiniteSet(1, 2)
pset = A.powerset()
assert len(pset) == 2**len(A)
assert pset == FiniteSet(FiniteSet(), FiniteSet(1),
FiniteSet(2), A)
# Not finite sets
I = Interval(0, 1)
raises(NotImplementedError, I.powerset)
def test_product_basic():
H, T = 'H', 'T'
unit_line = Interval(0, 1)
d6 = FiniteSet(1, 2, 3, 4, 5, 6)
d4 = FiniteSet(1, 2, 3, 4)
coin = FiniteSet(H, T)
square = unit_line * unit_line
assert (0, 0) in square
assert 0 not in square
assert (H, T) in coin ** 2
assert (.5, .5, .5) in square * unit_line
assert (H, 3, 3) in coin * d6* d6
HH, TT = sympify(H), sympify(T)
assert set(coin**2) == set(((HH, HH), (HH, TT), (TT, HH), (TT, TT)))
assert (d4*d4).is_subset(d6*d6)
assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union(
(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True))*Interval(-oo, oo),
Interval(-oo, oo)*(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True)))
assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3)
assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3)
assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3)
assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square
assert len(coin*coin*coin) == 8
assert len(S.EmptySet*S.EmptySet) == 0
assert len(S.EmptySet*coin) == 0
raises(TypeError, lambda: len(coin*Interval(0, 2)))
def test_real():
x = Symbol('x', real=True, finite=True)
I = Interval(0, 5)
J = Interval(10, 20)
A = FiniteSet(1, 2, 30, x, S.Pi)
B = FiniteSet(-4, 0)
C = FiniteSet(100)
D = FiniteSet('Ham', 'Eggs')
assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C])
assert not D.is_subset(S.Reals)
assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C])
assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D])
assert not (I + A + D).is_subset(S.Reals)
def test_supinf():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert (Interval(0, 1) + FiniteSet(2)).sup == 2
assert (Interval(0, 1) + FiniteSet(2)).inf == 0
assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x)
assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x)
assert FiniteSet(5, 1, x).sup == Max(5, x)
assert FiniteSet(5, 1, x).inf == Min(1, x)
assert FiniteSet(5, 1, x, y).sup == Max(5, x, y)
assert FiniteSet(5, 1, x, y).inf == Min(1, x, y)
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \
S.Infinity
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \
S.NegativeInfinity
assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs')
def test_universalset():
U = S.UniversalSet
x = Symbol('x')
assert U.as_relational(x) is S.true
assert U.union(Interval(2, 4)) == U
assert U.intersect(Interval(2, 4)) == Interval(2, 4)
assert U.measure == S.Infinity
assert U.boundary == S.EmptySet
assert U.contains(0) is S.true
def test_Union_of_ProductSets_shares():
line = Interval(0, 2)
points = FiniteSet(0, 1, 2)
assert Union(line * line, line * points) == line * line
def test_Interval_free_symbols():
# issue 6211
assert Interval(0, 1).free_symbols == set()
x = Symbol('x', real=True)
assert Interval(0, x).free_symbols == {x}
def test_image_interval():
from sympy.core.numbers import Rational
x = Symbol('x', real=True)
a = Symbol('a', real=True)
assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \
Interval(-4, 2, True, False)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1, True, True)) == \
Interval(0, 4, False, True)
assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1)
assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \
Interval(-35, 0) # Multiple Maxima
assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \
+ Interval(2, oo) # Single Infinite discontinuity
assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \
Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities
# Test for Python lambda
assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(Lambda(x, a*x), Interval(0, 1)) == \
ImageSet(Lambda(x, a*x), Interval(0, 1))
assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \
ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1))
def test_image_piecewise():
f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True))
f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True))
assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(S(1)/25, oo))
assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1)
@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826
def test_image_Intersection():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \
Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2)))
def test_image_FiniteSet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6)
def test_image_Union():
x = Symbol('x', real=True)
assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \
(Interval(0, 4) + FiniteSet(9))
def test_image_EmptySet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, S.EmptySet) == S.EmptySet
def test_issue_5724_7680():
assert I not in S.Reals # issue 7680
assert Interval(-oo, oo).contains(I) is S.false
def test_boundary():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert FiniteSet(1).boundary == FiniteSet(1)
assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1)
for left_open in (true, false) for right_open in (true, false))
def test_boundary_Union():
assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3)
assert ((Interval(0, 1, False, True)
+ Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2))
assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2)
assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \
== FiniteSet(0, 10)
assert Union(Interval(0, 10, True, True),
Interval(10, 15, True, True), evaluate=False).boundary \
== FiniteSet(0, 10, 15)
@XFAIL
def test_union_boundary_of_joining_sets():
""" Testing the boundary of unions is a hard problem """
assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
def test_boundary_ProductSet():
open_square = Interval(0, 1, True, True) ** 2
assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1))
second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True)
assert (open_square + second_square).boundary == (
FiniteSet(0, 1) * Interval(0, 1)
+ FiniteSet(1, 2) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1)
+ Interval(1, 2) * FiniteSet(0, 1))
def test_boundary_ProductSet_line():
line_in_r2 = Interval(0, 1) * FiniteSet(0)
assert line_in_r2.boundary == line_in_r2
def test_is_open():
assert not Interval(0, 1, False, False).is_open
assert not Interval(0, 1, True, False).is_open
assert Interval(0, 1, True, True).is_open
assert not FiniteSet(1, 2, 3).is_open
def test_is_closed():
assert Interval(0, 1, False, False).is_closed
assert not Interval(0, 1, True, False).is_closed
assert FiniteSet(1, 2, 3).is_closed
def test_closure():
assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False)
def test_interior():
assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True)
def test_issue_7841():
raises(TypeError, lambda: x in S.Reals)
def test_Eq():
assert Eq(Interval(0, 1), Interval(0, 1))
assert Eq(Interval(0, 1), Interval(0, 2)) == False
s1 = FiniteSet(0, 1)
s2 = FiniteSet(1, 2)
assert Eq(s1, s1)
assert Eq(s1, s2) == False
assert Eq(s1*s2, s1*s2)
assert Eq(s1*s2, s2*s1) == False
def test_SymmetricDifference():
assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \
FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10)
assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3 ,4 ,5 )) \
== FiniteSet(5)
assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \
FiniteSet(3, 4, 6)
assert Set(1, 2 ,3) ^ Set(2, 3, 4) == Union(Set(1, 2, 3) - Set(2, 3, 4), \
Set(2, 3, 4) - Set(1, 2, 3))
assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \
Interval(2, 5), Interval(2, 5) - Interval(0, 4))
def test_issue_9536():
from sympy.functions.elementary.exponential import log
a = Symbol('a', real=True)
assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a)))
def test_issue_9637():
n = Symbol('n')
a = FiniteSet(n)
b = FiniteSet(2, n)
assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False)
assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False)
assert Complement(Interval(1, 3), b) == \
Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a)
assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False)
assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False)
def test_issue_9808():
assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False)
assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \
Complement(FiniteSet(1), FiniteSet(y), evaluate=False)
def test_issue_9956():
assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo)
assert Interval(-oo, oo).contains(1) is S.true
def test_issue_Symbol_inter():
i = Interval(0, oo)
r = S.Reals
mat = Matrix([0, 0, 0])
assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \
Intersection(i, FiniteSet(m))
assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \
Intersection(i, FiniteSet(m, n))
assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \
Intersection(r, FiniteSet(m, z), FiniteSet(n, x))
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \
Intersection(r, FiniteSet(3, m, n), evaluate=False)
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \
Union(FiniteSet(3), Intersection(r, FiniteSet(m, n)))
assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \
Intersection(r, FiniteSet(n))
assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \
Intersection(r, FiniteSet(sin(x), cos(x)))
assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \
Intersection(r, FiniteSet(x**2, sin(x)))
def test_issue_10113():
f = x**2/(x**2 - 4)
assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True))
assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0)
assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo))
def test_issue_10248():
assert list(Intersection(S.Reals, FiniteSet(x))) == [
And(x < oo, x > -oo)]
def test_issue_9447():
a = Interval(0, 1) + Interval(2, 3)
assert Complement(S.UniversalSet, a) == Complement(
S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
assert Complement(S.Naturals, a) == Complement(
S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
def test_issue_10337():
assert (FiniteSet(2) == 3) is False
assert (FiniteSet(2) != 3) is True
raises(TypeError, lambda: FiniteSet(2) < 3)
raises(TypeError, lambda: FiniteSet(2) <= 3)
raises(TypeError, lambda: FiniteSet(2) > 3)
raises(TypeError, lambda: FiniteSet(2) >= 3)
def test_issue_10326():
bad = [
EmptySet(),
FiniteSet(1),
Interval(1, 2),
S.ComplexInfinity,
S.ImaginaryUnit,
S.Infinity,
S.NaN,
S.NegativeInfinity,
]
interval = Interval(0, 5)
for i in bad:
assert i not in interval
x = Symbol('x', real=True)
nr = Symbol('nr', real=False)
assert x + 1 in Interval(x, x + 4)
assert nr not in Interval(x, x + 4)
assert Interval(1, 2) in FiniteSet(Interval(0, 5), Interval(1, 2))
assert Interval(-oo, oo).contains(oo) is S.false
assert Interval(-oo, oo).contains(-oo) is S.false
def test_issue_2799():
U = S.UniversalSet
a = Symbol('a', real=True)
inf_interval = Interval(a, oo)
R = S.Reals
assert U + inf_interval == inf_interval + U
assert U + R == R + U
assert R + inf_interval == inf_interval + R
def test_issue_9706():
assert Interval(-oo, 0).closure == Interval(-oo, 0, True, False)
assert Interval(0, oo).closure == Interval(0, oo, False, True)
assert Interval(-oo, oo).closure == Interval(-oo, oo)
def test_issue_8257():
reals_plus_infinity = Union(Interval(-oo, oo), FiniteSet(oo))
reals_plus_negativeinfinity = Union(Interval(-oo, oo), FiniteSet(-oo))
assert Interval(-oo, oo) + FiniteSet(oo) == reals_plus_infinity
assert FiniteSet(oo) + Interval(-oo, oo) == reals_plus_infinity
assert Interval(-oo, oo) + FiniteSet(-oo) == reals_plus_negativeinfinity
assert FiniteSet(-oo) + Interval(-oo, oo) == reals_plus_negativeinfinity
def test_issue_10931():
assert S.Integers - S.Integers == EmptySet()
assert S.Integers - S.Reals == EmptySet()
def test_issue_11174():
soln = Intersection(Interval(-oo, oo), FiniteSet(-x), evaluate=False)
assert Intersection(FiniteSet(-x), S.Reals) == soln
soln = Intersection(S.Reals, FiniteSet(x), evaluate=False)
assert Intersection(FiniteSet(x), S.Reals) == soln
|
bsd-3-clause
|
songmonit/CTTMSONLINE
|
addons/account/wizard/account_fiscalyear_close.py
|
81
|
15499
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = ','.join([str(x['id']) for x in result])
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
yunw/nodejs
|
node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/runner.py
|
177
|
1688
|
#!/usr/bin/env python
import argparse
import deflate
import inflate
from colorama import Fore
testDir = 'test-files'
outDir = 'test-outs'
allPassed = True
parser = argparse.ArgumentParser(description='Process command-line arguments')
parser.add_argument('--test', metavar='path/to/file', type=str, default='both', nargs='?', help='Which test to run: deflate, inflate, or both')
parser.add_argument('--file', '-f', metavar='path/to/file', type=str, nargs='?', help='Path to file to use for test')
parser.add_argument('--level', '-l', metavar='#', type=int, nargs='?', help='Compression level')
parser.add_argument('--no-delete', const=True, default=False, nargs='?', help='Don\'t delete files produced for test')
args = parser.parse_args()
delete = not getattr(args, 'no_delete')
level = getattr(args, 'level')
inFile = getattr(args, 'file')
test = getattr(args, 'test')
if test == 'deflate' or test == 'both':
print Fore.CYAN + 'Running deflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = deflate.runTest(inFile, level, delete, outDir)
else:
passed = deflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if test == 'inflate' or test == 'both':
print Fore.CYAN + 'Running inflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = inflate.runTest(inFile, level, delete, outDir)
else:
passed = inflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if allPassed:
print Fore.GREEN + 'All tests passed!' + Fore.RESET
else:
print Fore.RED + 'Automated test failed' + Fore.RESET
|
mit
|
Nepherhotep/django
|
tests/postgres_tests/migrations/0002_create_test_models.py
|
231
|
8070
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from ..fields import * # NOQA
class Migration(migrations.Migration):
dependencies = [
('postgres_tests', '0001_setup_extensions'),
]
operations = [
migrations.CreateModel(
name='CharArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.CharField(max_length=10), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DateTimeArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetimes', ArrayField(models.DateTimeField(), size=None)),
('dates', ArrayField(models.DateField(), size=None)),
('times', ArrayField(models.TimeField(), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HStoreModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', HStoreField(blank=True, null=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OtherTypesArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ips', ArrayField(models.GenericIPAddressField(), size=None)),
('uuids', ArrayField(models.UUIDField(), size=None)),
('decimals', ArrayField(models.DecimalField(max_digits=5, decimal_places=2), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.IntegerField(), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NestedIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(ArrayField(models.IntegerField(), size=None), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NullableIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.IntegerField(), size=None, null=True, blank=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CharFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.CharField(max_length=16)),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='TextFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.TextField()),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='AggregateTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('boolean_field', models.NullBooleanField()),
('char_field', models.CharField(max_length=30, blank=True)),
('integer_field', models.IntegerField(null=True)),
]
),
migrations.CreateModel(
name='StatTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('int1', models.IntegerField()),
('int2', models.IntegerField()),
('related_field', models.ForeignKey(
'postgres_tests.AggregateTestModel',
models.SET_NULL,
null=True,
)),
]
),
migrations.CreateModel(
name='NowTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField(null=True, default=None)),
]
),
]
pg_92_operations = [
migrations.CreateModel(
name='RangesModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ints', IntegerRangeField(null=True, blank=True)),
('bigints', BigIntegerRangeField(null=True, blank=True)),
('floats', FloatRangeField(null=True, blank=True)),
('timestamps', DateTimeRangeField(null=True, blank=True)),
('dates', DateRangeField(null=True, blank=True)),
],
options={
'required_db_vendor': 'postgresql'
},
bases=(models.Model,)
),
migrations.CreateModel(
name='RangeLookupsModel',
fields=[
('parent', models.ForeignKey(
'postgres_tests.RangesModel',
models.SET_NULL,
blank=True, null=True,
)),
('integer', models.IntegerField(blank=True, null=True)),
('big_integer', models.BigIntegerField(blank=True, null=True)),
('float', models.FloatField(blank=True, null=True)),
('timestamp', models.DateTimeField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
]
pg_94_operations = [
migrations.CreateModel(
name='JSONModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', JSONField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
def apply(self, project_state, schema_editor, collect_sql=False):
try:
PG_VERSION = schema_editor.connection.pg_version
except AttributeError:
pass # We are probably not on PostgreSQL
else:
if PG_VERSION >= 90200:
self.operations = self.operations + self.pg_92_operations
if PG_VERSION >= 90400:
self.operations = self.operations + self.pg_94_operations
return super(Migration, self).apply(project_state, schema_editor, collect_sql)
|
bsd-3-clause
|
lexus24/w16b_test
|
static/Brython3.1.3-20150514-095342/Lib/site-packages/spur.py
|
291
|
5461
|
#coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
dr = (ra-rd)/imax
else:
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
|
agpl-3.0
|
kelvinongtoronto/numbers
|
bignumbers.py
|
1
|
1978
|
TENS = [None, None, "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
SMALL = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven",
"twelve", "thirteen", "fourteen", "fifteen","sixteen", "seventeen", "eighteen", "nineteen"]
HUGE = [None, None]+ [h + "illion"
for h in ("m", "b", "tr", "quadr", "quint", "sext", "sept", "oct", "non")]
for g in("dec", "vigint", "trigint", "quadragint", "quinquagint", "sexagint", "septuagint", "octogint", "nonagint", "cent"):
for h in ("","un", "duo", "tre", "quattour", "quin", "sex", "septen", "octo", "novem"):
HUGE.append(h + g + "illion" )
import sys
def nonzero(c, n, connect=''):
return "" if n == 0 else connect + c + spell_integer(n)
def last_and(num):
if ',' in num:
pre, last = num.rsplit(',', 1)
if ' and ' not in last:
last = ' and' + last
num = ''.join([pre, ',', last])
return num
def big(e, n):
if e == 0:
return spell_integer(n)
elif e == 1:
return spell_integer(n) + " thousand"
elif e <= 110:
return spell_integer(n) + " " + HUGE[e]
else:
return spell_integer(n) + " " + str(e-1) + "-illion"
def base1000_rev(n):
# generates the value of the digits of n in base 1000
# (i.e. 3-digit chunks), in reverse.
while n != 0:
n, r = divmod(n, 1000)
yield r
def spell_integer(n):
if n < 0:
return "minus " + spell_integer(-n)
elif n < 20:
return SMALL[n]
elif n < 100:
a, b = divmod(n, 10)
return TENS[a] + nonzero("-", b)
elif n < 1000:
a, b = divmod(n, 100)
return SMALL[a] + " hundred" + nonzero(" ", b, ' and')
else:
num = ", ".join([big(e, x) for e, x in
enumerate(base1000_rev(n)) if x][::-1])
return last_and(num)
if __name__ == '__main__':
#n=int(sys.float_info.max)*16**20
n=16**276-1
#n=10**333-1
print str(n)
print('%x' % (n))
f = open('number.txt','w')
f.write(str(n)+'\n'+spell_integer(int(n)))
f.close
|
artistic-2.0
|
batiste/django-page-cms
|
pages/widgets.py
|
1
|
6835
|
# -*- coding: utf-8 -*-
"""Django CMS come with a set of ready to use widgets that you can enable
in the admin via a placeholder tag in your template."""
from pages.settings import PAGES_MEDIA_URL, PAGES_STATIC_URL
from pages.settings import PAGE_LANGUAGES
from pages.models import Page
from pages.widgets_registry import register_widget
from django import forms
from django.forms import TextInput, Textarea
from django.forms import MultiWidget
from django.forms import FileInput as DFileInput
from django.contrib.admin.widgets import AdminTextInputWidget
from django.contrib.admin.widgets import AdminTextareaWidget
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from os.path import join
register_widget(TextInput)
register_widget(Textarea)
register_widget(AdminTextInputWidget)
register_widget(AdminTextareaWidget)
class RichTextarea(Textarea):
"""A RichTextarea widget."""
class Media:
js = [join(PAGES_STATIC_URL, path) for path in (
'javascript/jquery.js',
'javascript/jquery.rte.js'
)]
css = {
'all': [join(PAGES_STATIC_URL, path) for path in (
'css/rte.css',
'css/font-awesome.min.css'
)]
}
def __init__(self, language=None, attrs=None, **kwargs):
attrs = {'class': 'rte'}
self.language = language
super(RichTextarea, self).__init__(attrs)
def render(self, name, value, attrs=None, **kwargs):
rendered = super(RichTextarea, self).render(name, value, attrs)
context = {
'name': name,
'PAGES_STATIC_URL': PAGES_STATIC_URL,
'PAGES_MEDIA_URL': PAGES_MEDIA_URL,
}
return rendered + mark_safe(render_to_string(
'pages/widgets/richtextarea.html', context))
register_widget(RichTextarea)
insert_image_link = u'''
<br>
<button title='insert image from the media library' class='image-lookup-{name}'>
From media library
</button>
<input name="{name}-selected" id="{name}-selected" type="hidden">
<span id="{name}-selected-value">
</span>
<br><label for="{name}-delete">
<input name="{name}-delete" style="display:inline-block" id="{name}-delete" type="checkbox" value="true"> {del_msg}
</label>
<br style="clear:both">
<script>
$(function(){{
function dismissRelatedLookupPopup(win, chosenId) {{
$.get('/admin/pages/page/' + chosenId + '/media-url/', function(response) {{
console.log(response);
$('#{name}-selected').val(response);
$('#{name}-selected-value').text(response);
}});
win.close();
window.dismissRelatedLookupPopup = oldDismissRelatedLookupPopup;
window.dismissAddRelatedObjectPopup = oldDismissAddRelatedObjectPopup;
}}
function showMediaAdminPopup() {{
var name = 'mediaWindowSelect';
var href = '/admin/pages/media/?_to_field=id&_popup=1';
window.dismissRelatedLookupPopup = dismissRelatedLookupPopup;
window.dismissAddRelatedObjectPopup = dismissRelatedLookupPopup;
var win = window.open(href, name, 'height=500,width=800,resizable=yes,scrollbars=yes');
win.focus();
return false;
}}
$('.image-lookup-{name}').click(function(e) {{
e.preventDefault();
showMediaAdminPopup();
return false;
}});
}});
</script>
'''
class FileInput(DFileInput):
def __init__(self, page=None, language=None, attrs=None, **kwargs):
self.language = language
self.page = page
super(FileInput, self).__init__(attrs)
please_save_msg = _('Please save the page to show the file field')
delete_msg = _('Delete file')
def render(self, name, value, attrs=None, **kwargs):
if not self.page:
field_content = self.please_save_msg
else:
field_content = '<span class="placeholder-fileinput">'
if value:
field_content += _('Current file: %s<br/>') % value
field_content += '<hr>'
field_content += super(FileInput, self).render(name, attrs)
field_content += insert_image_link.format(
name=name,
del_msg=self.delete_msg,
value=value)
field_content += '</span>'
return mark_safe(field_content)
register_widget(FileInput)
class ImageInput(FileInput):
please_save_msg = _('Please save the page to show the image field')
delete_msg = _('Delete image')
register_widget(ImageInput)
class LanguageChoiceWidget(TextInput):
def __init__(self, language=None, attrs=None, **kwargs):
self.language = language
self.page = kwargs.get('page')
# page is None
super(LanguageChoiceWidget, self).__init__(attrs)
def render(self, name, value, attrs=None, **kwargs):
context = {
'name': name,
'value': value,
'page': self.page,
'language': value,
'page_languages': PAGE_LANGUAGES
}
return mark_safe(render_to_string(
'pages/widgets/languages.html', context))
class PageLinkWidget(MultiWidget):
'''A page link `Widget` for the admin.'''
def __init__(
self, attrs=None, page=None, language=None,
video_url=None, linkedpage=None, text=None):
l = [('', '----')]
for p in Page.objects.all():
l.append((p.id, str(p)))
widgets = [
forms.Select(choices=l),
TextInput(attrs=attrs)
]
super(PageLinkWidget, self).__init__(widgets, attrs)
def decompress(self, value):
import json
try:
return json.loads(value)
except:
pass
return []
def value_from_datadict(self, data, files, name):
import json
value = ['', '']
for da in [x for x in data if x.startswith(name)]:
index = int(da[len(name) + 1:])
value[index] = data[da]
if value[0] == value[1] == '':
return None
return json.dumps(value)
def _has_changed(self, initial, data):
"""Need to be reimplemented to be correct."""
if data == initial:
return False
return bool(initial) != bool(data)
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), it inserts an HTML
linebreak between them.
Returns a Unicode string representing the HTML for the whole lot.
"""
return """<table>
<tr><td>page</td><td>%s</td></tr>
<tr><td>text</td><td>%s</td></tr>
</table>""" % tuple(rendered_widgets)
register_widget(PageLinkWidget)
|
bsd-3-clause
|
alexmojaki/blaze
|
blaze/expr/tests/test_broadcast.py
|
11
|
3042
|
from blaze.expr import *
from blaze.expr.broadcast import *
from blaze.expr.broadcast import leaves_of_type, broadcast_collect
from blaze.compatibility import builtins
from toolz import isdistinct
x = symbol('x', '5 * 3 * int32')
xx = symbol('xx', 'int32')
y = symbol('y', '5 * 3 * int32')
yy = symbol('yy', 'int32')
a = symbol('a', 'int32')
def test_broadcast_basic():
b = Broadcast((x, y), (xx, yy), xx + yy)
assert b.shape == x.shape
assert b.schema == (xx + yy).dshape
assert eval(str(b)).isidentical(b)
def test_scalar_symbols():
exprs = [x, y]
scalars = scalar_symbols(exprs)
assert len(scalars) == len(exprs)
assert isdistinct([s._name for s in scalars])
assert builtins.all(s.dshape == e.schema for s, e in zip(scalars, exprs))
def test_broadcast_function():
expr = Pow(Add(x, Mult(2, y)), 2) # (x + (2 * y)) ** 2
b = broadcast(expr, [x, y])
xx, yy = b._scalars
assert b._scalar_expr.isidentical((xx + (2 * yy)) ** 2)
# A different set of leaves
b = broadcast(expr, [x, Mult(2, y)])
xx, yy = b._scalars
assert b._scalar_expr.isidentical((xx + yy) ** 2)
t = symbol('t', 'var * {x: int, y: int, z: int}')
def test_tabular_case():
expr = t.x + t.y * 2
b = broadcast(expr, [t])
tt, = b._scalars
assert b._scalar_expr.isidentical(tt.x + tt.y * 2)
def test_optimize_broadcast():
expr = (t.distinct().x + 1).distinct()
expected = broadcast(t.distinct().x + 1, [t.distinct()]).distinct()
result = broadcast_collect(expr, Broadcastable=(Field, Arithmetic),
WantToBroadcast=(Field, Arithmetic))
assert result.isidentical(expected)
def test_leaves_of_type():
expr = Distinct(Distinct(Distinct(t.x)))
result = leaves_of_type((Distinct,), expr)
assert len(result) == 1
assert list(result)[0].isidentical(t.x)
def test_broadcast_collect_doesnt_collect_scalars():
expr = xx + yy * a
assert broadcast_collect(expr, Broadcastable=Arithmetic,
WantToBroadcast=Arithmetic).isidentical(expr)
def test_table_broadcast():
t = symbol('t', 'var * {x: int, y: int, z: int}')
expr = t.distinct()
expr = (2 * expr.x + expr.y + 1).distinct()
expected = t.distinct()
expected = broadcast(2 * expected.x + expected.y + 1, [expected]).distinct()
assert broadcast_collect(expr).isidentical(expected)
expr = (t.x + t.y).sum()
result = broadcast_collect(expr)
expected = broadcast(t.x + t.y, [t]).sum()
assert result.isidentical(expected)
def test_broadcast_doesnt_affect_scalars():
t = symbol('t', '{x: int, y: int, z: int}')
expr = (2 * t.x + t.y + 1)
assert broadcast_collect(expr).isidentical(expr)
def test_full_expr():
b = Broadcast((x, y), (xx, yy), xx + yy)
assert b._full_expr.isidentical(x + y)
def test_broadcast_naming():
t = symbol('t', 'var * {x: int, y: int, z: int}')
for expr in [t.x, t.x + 1]:
assert broadcast(expr, [t])._name == 'x'
|
bsd-3-clause
|
moijes12/oh-mainline
|
vendor/packages/Django/django/conf/locale/pl/formats.py
|
107
|
1089
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.