repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vinoth3v/In | In/html/object_lister.py | 1 | 5992 |
class ObjectLister(HTMLObject):
'''List objects'''
# value from this argument will be used to identify the current page index
current_arg = 'page'
# default object view mode
view_mode = 'default'
# if set this query will be used to load entities
entity_load_query = None
entity_type = None
limit = 1
current = None
__total__ = None
handler_get_total = None
handler_prepare_objects = None
list_object_class = None
list_object_wrapper = None
empty_text = None
# lister object itself will be added to this if set
# not confused with content panel
# container -> lister -> content_panel -> objects
container = None
url = None
pager = None
add_function = None
def __init__(self, data = None, items = None, **args):
super().__init__(data, items, **args)
content_panel = data.get('content_panel', {})
if not isinstance(content_panel, Object):
type_of_content_panel = type(content_panel)
if type_of_content_panel is str and content_panel: # content panel type
content_panel_type = content_panel
content_panel_data = {}
elif type_of_content_panel is dict:
content_panel_type = 'TextDiv'
content_panel_data = content_panel
content_panel_data.update({
'id' : 'content_panel',
'weight' : 0,
'default_children_view_mode' : self.view_mode
})
self.content_panel = self.add(content_panel_type, content_panel_data)
self.content_panel.css.append('content-panel')
self.pager_panel = self.add('TextDiv', {
'id' : 'pager_panel',
'css' : ['pager-panel'],
'weight' : 10,
})
try:
request_args = IN.context.request.args['query']
self.current = int(request_args.get(self.current_arg, 1))
except ValueError:
self.current = 1
#self.css.append('i-container')
def list(self):
'''list objects'''
# prepare and add the objects to self.content_panel
try:
self.__prepare_objects__()
except Exception as e:
IN.logger.debug()
context = IN.context
pager = self.pager
listed = self.current * self.limit
if len(self.content_panel) and pager: # and listed < self.total:
if 'data' not in pager:
pager['data'] = {}
if not self.url:
self.url = IN.context.request.path
pager['data'].update({
'current_page' : self.current,
'total_pages' : int(self.total / self.limit),
'url' : self.url,
'link_ajax_args' : {'lister' : 1},
})
pager = self.pager_panel.add(**pager)
# add empty_text
if len(self.content_panel) == 0 and self.current == 1 and self.empty_text:
self.content_panel.add('TextDiv', {
'value' : self.empty_text,
'css' : ['empty-text']
})
if not 'lister' in context.request.ajax_args:
# new list, # add self, theme self
if self.container:
self.container.add(self)
else:
context.response.output.add(self)
else:
# already in lister, # append or replace based on pager
if self.pager and 'append_type' in self.pager:
append_type = self.pager['append_type']
else:
append_type = 'replace'
context.response = In.core.response.CustomResponse()
output = IN.themer.theme(self)
if append_type == 'replace':
# replace self
IN.context.response.output = [{
'method' : append_type,
'args' : ['#' + self.id, output]
}]
else:
# append to content list and
output = self.content_panel.theme_output['html']['default']['output']['children']
context.response.output = [{
'method' : append_type,
'args' : ['#' + self.id + ' .content-panel', output]
}]
# replace pager
pager_output = self.pager_panel.theme_current_output['output']['final']
context.response.output.append({
'method' : 'replace',
'args' : [''.join(('#', self.id, ' .pager-panel')), pager_output]
})
# hack
# if grid is dynamic, reinit it
if 'i-grid-dynamic' in self['content_panel'].css:
script = '''
require(['uikit!grid'], function(uigrid) {
$('.i-grid-dynamic').each(function() {
$(this).trigger('display.uk.check');
});
});
'''
context.response.output.append({
'method' : 'script',
'args' : [script]
})
@property
def total(self):
if self.__total__ is not None:
return self.__total__
if self.entity_load_query:
cursor = self.entity_load_query.execute_count()
if cursor.rowcount > 0:
self.__total__ = cursor.fetchone()[0]
elif self.handler_get_total:
self.__total__ = self.handler_get_total()
else:
self.__total__ = 0
return self.__total__
def __prepare_objects__(self):
if self.handler_prepare_objects:
# call the handler to load objects
self.handler_prepare_objects(self)
elif self.entity_load_query is not None:
# load entities and add
limit = self.limit
if self.current > 1:
if self.current == 2:
limit = [limit, limit]
else:
limit = [(self.current -1) * limit, limit]
self.entity_load_query.limit = limit
cursor = self.entity_load_query.execute()
entitier = IN.entitier
content_panel = self.content_panel
if cursor.rowcount > 0:
result = cursor.fetchall()
entity_ids = []
for r in result:
if r[0] not in entity_ids:
entity_ids.append(r[0])
entities = entitier.load_multiple(self.entity_type, entity_ids)
weight = 1
for id in entity_ids: # keep order
if id in entities:
entity = entities[id]
entity.weight = weight
if self.list_object_class:
entity.css.append(self.list_object_class)
if self.add_function:
self.add_function(self, entity, content_panel, weight)
else:
if self.list_object_wrapper is None:
content_panel.add(entity)
else:
content_panel.add(self.list_object_wrapper).add(entity)
weight += 1
else:
pass
builtins.ObjectLister = ObjectLister
| apache-2.0 | 8,786,976,584,150,277,000 | 23.357724 | 85 | 0.613151 | false |
godfather1103/WeiboRobot | python27/1.0/lib/ctypes/test/test_pointers.py | 11 | 6550 | import unittest, sys
from ctypes import *
import _ctypes_test
ctype_types = [c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong, c_double, c_float]
python_types = [int, int, int, int, int, long,
int, long, long, long, float, float]
class PointersTestCase(unittest.TestCase):
def test_pointer_crash(self):
class A(POINTER(c_ulong)):
pass
POINTER(c_ulong)(c_ulong(22))
# Pointer can't set contents: has no _type_
self.assertRaises(TypeError, A, c_ulong(33))
def test_pass_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
func.restype = c_long
i = c_int(12345678)
## func.argtypes = (POINTER(c_int),)
address = func(byref(i))
self.assertEqual(c_int.from_address(address).value, 12345678)
func.restype = POINTER(c_int)
res = func(pointer(i))
self.assertEqual(res.contents.value, 12345678)
self.assertEqual(res[0], 12345678)
def test_change_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
i = c_int(87654)
func.restype = POINTER(c_int)
func.argtypes = (POINTER(c_int),)
res = func(pointer(i))
self.assertEqual(res[0], 87654)
self.assertEqual(res.contents.value, 87654)
# C code: *res = 54345
res[0] = 54345
self.assertEqual(i.value, 54345)
# C code:
# int x = 12321;
# res = &x
res.contents = c_int(12321)
self.assertEqual(i.value, 54345)
def test_callbacks_with_pointers(self):
# a function type receiving a pointer
PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
self.result = []
def func(arg):
for i in range(10):
## print arg[i],
self.result.append(arg[i])
## print
return 0
callback = PROTOTYPE(func)
dll = CDLL(_ctypes_test.__file__)
# This function expects a function pointer,
# and calls this with an integer pointer as parameter.
# The int pointer points to a table containing the numbers 1..10
doit = dll._testfunc_callback_with_pointer
## i = c_int(42)
## callback(byref(i))
## self.assertEqual(i.value, 84)
doit(callback)
## print self.result
doit(callback)
## print self.result
def test_basics(self):
from operator import delitem
for ct, pt in zip(ctype_types, python_types):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
self.assertIs(type(p.contents), ct)
# p.contents is the same as p[0]
## print p.contents
## self.assertEqual(p.contents, 42)
## self.assertEqual(p[0], 42)
self.assertRaises(TypeError, delitem, p, 0)
def test_from_address(self):
from array import array
a = array('i', [100, 200, 300, 400, 500])
addr = a.buffer_info()[0]
p = POINTER(POINTER(c_int))
## print dir(p)
## print p.from_address
## print p.from_address(addr)[0][0]
def test_other(self):
class Table(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int)]
pt = pointer(Table(1, 2, 3))
self.assertEqual(pt.contents.a, 1)
self.assertEqual(pt.contents.b, 2)
self.assertEqual(pt.contents.c, 3)
pt.contents.c = 33
from ctypes import _pointer_type_cache
del _pointer_type_cache[Table]
def test_basic(self):
p = pointer(c_int(42))
# Although a pointer can be indexed, it ha no length
self.assertRaises(TypeError, len, p)
self.assertEqual(p[0], 42)
self.assertEqual(p.contents.value, 42)
def test_charpp(self):
"""Test that a character pointer-to-pointer is correctly passed"""
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int( 2 )
argv[0] = 'hello'
argv[1] = 'world'
result = func( byref(argc), argv )
assert result == 'world', result
def test_bug_1467852(self):
# http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
x = c_int(5)
dummy = []
for i in range(32000):
dummy.append(c_int(i))
y = c_int(6)
p = pointer(x)
pp = pointer(p)
q = pointer(y)
pp[0] = q # <==
self.assertEqual(p[0], 6)
def test_c_void_p(self):
# http://sourceforge.net/tracker/?func=detail&aid=1518190&group_id=5470&atid=105470
if sizeof(c_void_p) == 4:
self.assertEqual(c_void_p(0xFFFFFFFFL).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
elif sizeof(c_void_p) == 8:
self.assertEqual(c_void_p(0xFFFFFFFFL).value,
0xFFFFFFFFL)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
self.assertRaises(TypeError, c_void_p, 3.14) # make sure floats are NOT accepted
self.assertRaises(TypeError, c_void_p, object()) # nor other objects
def test_pointers_bool(self):
# NULL pointers have a boolean False value, non-NULL pointers True.
self.assertEqual(bool(POINTER(c_int)()), False)
self.assertEqual(bool(pointer(c_int())), True)
self.assertEqual(bool(CFUNCTYPE(None)(0)), False)
self.assertEqual(bool(CFUNCTYPE(None)(42)), True)
# COM methods are boolean True:
if sys.platform == "win32":
mth = WINFUNCTYPE(None)(42, "name", (), None)
self.assertEqual(bool(mth), True)
def test_pointer_type_name(self):
LargeNamedType = type('T' * 2 ** 25, (Structure,), {})
self.assertTrue(POINTER(LargeNamedType))
def test_pointer_type_str_name(self):
large_string = 'T' * 2 ** 25
self.assertTrue(POINTER(large_string))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,798,600,423,060,353,000 | 31.75 | 92 | 0.549313 | false |
bennibaermann/maximetro | config.py | 1 | 2649 | # -*- coding: utf-8 -*-
# set which part of the game you want to debug, set all for maximum output
# DEBUG = ('init', 'font', 'track', 'path', 'station', 'passenger', 'random' )
DEBUG = ('init','font' )
# these basic gamemodes change the gameplay drastical
ANIMALS = False # an alternative animals graphic set from erlehmann
FREE_PASSENGERS = True # new passengers not at stations
STATION_PASSENGERS = False # new passengers at stations
BUILD_STATIONS = True # player can build new stations
DOUBLE_TRACKS = False # more than one track between same stations allowed?
CROSSING = False # crossing tracks allowed? TODO: Crossing=True does not work
COLLISION = False # set False if Cars should stop if other car is in the way
MAXWAITING = 80
BLACK = ( 0, 0, 0)
VERYLIGHTGREY= (220, 220, 220)
LIGHTGREY= (200, 200, 200)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
MAGENTA = (255, 0, 255)
CYAN = ( 0, 255, 255)
YELLOW = (255, 255, 0)
COLORS = [YELLOW,MAGENTA,CYAN,GREEN,BLUE,RED]
#COLORS = [CYAN,GREEN,BLUE,RED]
# LINES = list(COLORS)
COLORNAMES = ['red','blue','green','cyan','magenta','yellow']
SHAPES = ('circle','triangle','square','rhombus','semicircle')
OTHERSTATIONS = ('circle','triangle','rhombus','semicircle')
# use this two lines instead if you don't have the pygame patch from benni:
SHAPES = ('circle','triangle','square','rhombus')
OTHERSTATIONS = ('circle','triangle','rhombus')
MAINSTATION = 'square'
MAXSTATIONS = 0 # stations build (without mainstation) during game init
PASSENGERSIZE = 7
PASSENGERSPEED = 0.3 # speed of passengers by foot
PASSENGER_RANDOMNESS = 0.01 # rate at which passenger in random mode changes direction
CARCAPACITY = 3
CARWITH = PASSENGERSIZE + 3 # actually half of it
CARLENGTH = 13 + PASSENGERSIZE * CARCAPACITY # actually half of it
CARSPEED = 3
MAXTRACKLENGTH = CARLENGTH * 5 # set to 0 for no length restrictions
# MAXTRACKLENGTH = 0 # set to 0 for no length restrictions
STATIONSIZE = 17
STATIONTHICKNESS = 5
STATIONDISTANCE = CARLENGTH * 2
MAXSTATIONTRACKS = 5
STATIONTRACKDIST = 0 # TODO: minimal distance between tracks and center of station
PROBABILITY_START = .01
PROBABILITY_DIFF = .000005
# PROBABILITY_DIFF = 0
MAXWAITING_STATION = 9999
RIGHT_OFFSET = 200
#RIGHT_OFFSET = 200
MAX_Y = 500
MAX_X = MAX_Y + RIGHT_OFFSET
STATUSHEIGHT = 30 # height of status line at the bottom
MAX_DEPTH = 99999 # max distance for path finding (means no path)
FPS = 30
# money and prices
STARTMONEY = 30 # 30
STATIONCOST = 5
TRACKCOST = 1
DELETECOST = 1
# LINECOST = 5
FONTSIZE = 18 # size of the default font used | agpl-3.0 | 1,604,158,864,433,164,500 | 30.176471 | 86 | 0.705172 | false |
bhavishyagopesh/hyper-h2 | examples/asyncio/wsgi-server.py | 1 | 31076 | # -*- coding: utf-8 -*-
"""
asyncio-server.py
~~~~~~~~~~~~~~~~~
A fully-functional WSGI server, written using hyper-h2. Requires asyncio.
To test it, try installing httpin from pip (``pip install httpbin``) and then
running the server (``python asyncio-server.py httpbin:app``).
This server does not support HTTP/1.1: it is a HTTP/2-only WSGI server. The
purpose of this code is to demonstrate how to integrate hyper-h2 into a more
complex application, and to demonstrate several principles of concurrent
programming.
The architecture looks like this:
+---------------------------------+
| 1x HTTP/2 Server Thread |
| (running asyncio) |
+---------------------------------+
+---------------------------------+
| N WSGI Application Threads |
| (no asyncio) |
+---------------------------------+
Essentially, we spin up an asyncio-based event loop in the main thread. This
launches one HTTP/2 Protocol instance for each inbound connection, all of which
will read and write data from within the main thread in an asynchronous manner.
When each HTTP request comes in, the server will build the WSGI environment
dictionary and create a ``Stream`` object. This object will hold the relevant
state for the request/response pair and will act as the WSGI side of the logic.
That object will then be passed to a background thread pool, and when a worker
is available the WSGI logic will begin to be executed. This model ensures that
the asyncio web server itself is never blocked by the WSGI application.
The WSGI application and the HTTP/2 server communicate via an asyncio queue,
together with locks and threading events. The locks themselves are implicit in
asyncio's "call_soon_threadsafe", which allows for a background thread to
register an action with the main asyncio thread. When the asyncio thread
eventually takes the action in question it sets as threading event, signaling
to the background thread that it is free to continue its work.
To make the WSGI application work with flow control, there is a very important
invariant that must be observed. Any WSGI action that would cause data to be
emitted to the network MUST be accompanied by a threading Event that is not
set until that data has been written to the transport. This ensures that the
WSGI application *blocks* until the data is actually sent. The reason we
require this invariant is that the HTTP/2 server may choose to re-order some
data chunks for flow control reasons: that is, the application for stream X may
have actually written its data first, but the server may elect to send the data
for stream Y first. This means that it's vital that there not be *two* writes
for stream X active at any one point or they may get reordered, which would be
particularly terrible.
Thus, the server must cooperate to ensure that each threading event only fires
when the *complete* data for that event has been written to the asyncio
transport. Any earlier will cause untold craziness.
"""
import asyncio
import importlib
import queue
import ssl
import sys
import threading
from h2.connection import H2Connection
from h2.events import (
DataReceived, RequestReceived, WindowUpdated, StreamEnded, StreamReset
)
# Used to signal that a request has completed.
#
# This is a convenient way to do "in-band" signaling of stream completion
# without doing anything so heavyweight as using a class. Essentially, we can
# test identity against this empty object. In fact, this is so convenient that
# we use this object for all streams, for data in both directions: in and out.
END_DATA_SENTINEL = object()
# The WSGI callable. Stored here so that the protocol instances can get hold
# of the data.
APPLICATION = None
class H2Protocol(asyncio.Protocol):
def __init__(self):
# Our server-side state machine.
self.conn = H2Connection(client_side=False)
# The backing transport.
self.transport = None
# A dictionary of ``Stream`` objects, keyed by their stream ID. This
# makes it easy to route data to the correct WSGI application instance.
self.streams = {}
# A queue of data emitted by WSGI applications that has not yet been
# sent. Each stream may only have one chunk of data in either this
# queue or the flow_controlled_data dictionary at any one time.
self._stream_data = asyncio.Queue()
# Data that has been pulled off the queue that is for a stream blocked
# behind flow control limitations. This is used to avoid spinning on
# _stream_data queue when a stream cannot have its data sent. Data that
# cannot be sent on the connection when it is popped off the queue gets
# placed here until the stream flow control window opens up again.
self._flow_controlled_data = {}
# A reference to the loop in which this protocol runs. This is needed
# to synchronise up with background threads.
self._loop = asyncio.get_event_loop()
# Any streams that have been remotely reset. We keep track of these to
# ensure that we don't emit data from a WSGI application whose stream
# has been cancelled.
self._reset_streams = set()
# Keep track of the loop sending task so we can kill it when the
# connection goes away.
self._send_loop_task = None
def connection_made(self, transport):
"""
The connection has been made. Here we need to save off our transport,
do basic HTTP/2 connection setup, and then start our data writing
coroutine.
"""
self.transport = transport
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
self._send_loop_task = self._loop.create_task(self.sending_loop())
def connection_lost(self, exc):
"""
With the end of the connection, we just want to cancel our data sending
coroutine.
"""
self._send_loop_task.cancel()
def data_received(self, data):
"""
Process inbound data.
"""
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event)
elif isinstance(event, DataReceived):
self.data_frame_received(event)
elif isinstance(event, WindowUpdated):
self.window_opened(event)
elif isinstance(event, StreamEnded):
self.end_stream(event)
elif isinstance(event, StreamReset):
self.reset_stream(event)
outbound_data = self.conn.data_to_send()
if outbound_data:
self.transport.write(outbound_data)
def window_opened(self, event):
"""
The flow control window got opened.
This is important because it's possible that we were unable to send
some WSGI data because the flow control window was too small. If that
happens, the sending_loop coroutine starts buffering data.
As the window gets opened, we need to unbuffer the data. We do that by
placing the data chunks back on the back of the send queue and letting
the sending loop take another shot at sending them.
This system only works because we require that each stream only have
*one* data chunk in the sending queue at any time. The threading events
force this invariant to remain true.
"""
if event.stream_id:
# This is specific to a single stream.
if event.stream_id in self._flow_controlled_data:
self._stream_data.put_nowait(
self._flow_controlled_data.pop(event.stream_id)
)
else:
# This event is specific to the connection. Free up *all* the
# streams. This is a bit tricky, but we *must not* yield the flow
# of control here or it all goes wrong.
for data in self._flow_controlled_data.values():
self._stream_data.put_nowait(data)
self._flow_controlled_data = {}
@asyncio.coroutine
def sending_loop(self):
"""
A call that loops forever, attempting to send data. This sending loop
contains most of the flow-control smarts of this class: it pulls data
off of the asyncio queue and then attempts to send it.
The difficulties here are all around flow control. Specifically, a
chunk of data may be too large to send. In this case, what will happen
is that this coroutine will attempt to send what it can and will then
store the unsent data locally. When a flow control event comes in that
data will be freed up and placed back onto the asyncio queue, causing
it to pop back up into the sending logic of this coroutine.
This method explicitly *does not* handle HTTP/2 priority. That adds an
extra layer of complexity to what is already a fairly complex method,
and we'll look at how to do it another time.
This coroutine explicitly *does not end*.
"""
while True:
stream_id, data, event = yield from self._stream_data.get()
# If this stream got reset, just drop the data on the floor. Note
# that we need to reset the event here to make sure that
# application doesn't lock up.
if stream_id in self._reset_streams:
event.set()
# Check if the body is done. If it is, this is really easy! Again,
# we *must* set the event here or the application will lock up.
if data is END_DATA_SENTINEL:
self.conn.end_stream(stream_id)
self.transport.write(self.conn.data_to_send())
event.set()
continue
# We need to send data, but not to exceed the flow control window.
# For that reason, grab only the data that fits: we'll buffer the
# rest.
window_size = self.conn.local_flow_control_window(stream_id)
chunk_size = min(window_size, len(data))
data_to_send = data[:chunk_size]
data_to_buffer = data[chunk_size:]
if data_to_send:
# There's a maximum frame size we have to respect. Because we
# aren't paying any attention to priority here, we can quite
# safely just split this string up into chunks of max frame
# size and blast them out.
#
# In a *real* application you'd want to consider priority here.
max_size = self.conn.max_outbound_frame_size
chunks = (
data_to_send[x:x+max_size]
for x in range(0, len(data_to_send), max_size)
)
for chunk in chunks:
self.conn.send_data(stream_id, chunk)
self.transport.write(self.conn.data_to_send())
# If there's data left to buffer, we should do that. Put it in a
# dictionary and *don't set the event*: the app must not generate
# any more data until we got rid of all of this data.
if data_to_buffer:
self._flow_controlled_data[stream_id] = (
stream_id, data_to_buffer, event
)
else:
# We sent everything. We can let the WSGI app progress.
event.set()
def request_received(self, event):
"""
A HTTP/2 request has been received. We need to invoke the WSGI
application in a background thread to handle it.
"""
# First, we are going to want an object to hold all the relevant state
# for this request/response. For that, we have a stream object. We
# need to store the stream object somewhere reachable for when data
# arrives later.
s = Stream(event.stream_id, self)
self.streams[event.stream_id] = s
# Next, we need to build the WSGI environ dictionary.
environ = _build_environ_dict(event.headers, s)
# Finally, we want to throw these arguments out to a threadpool and
# let it run.
self._loop.run_in_executor(
None,
s.run_in_threadpool,
APPLICATION,
environ,
)
def data_frame_received(self, event):
"""
Data has been received by WSGI server and needs to be dispatched to a
running application.
Note that the flow control window is not modified here. That's
deliberate: see Stream.__next__ for a longer discussion of why.
"""
# Grab the stream in question from our dictionary and pass it on.
stream = self.streams[event.stream_id]
stream.receive_data(event.data, event.flow_controlled_length)
def end_stream(self, event):
"""
The stream data is complete.
"""
stream = self.streams[event.stream_id]
stream.request_complete()
def reset_stream(self, event):
"""
A stream got forcefully reset.
This is a tricky thing to deal with because WSGI doesn't really have a
good notion for it. Essentially, you have to let the application run
until completion, but not actually let it send any data.
We do that by discarding any data we currently have for it, and then
marking the stream as reset to allow us to spot when that stream is
trying to send data and drop that data on the floor.
We then *also* signal the WSGI application that no more data is
incoming, to ensure that it does not attempt to do further reads of the
data.
"""
if event.stream_id in self._flow_controlled_data:
del self._flow_controlled_data
self._reset_streams.add(event.stream_id)
self.end_stream(event)
def data_for_stream(self, stream_id, data):
"""
Thread-safe method called from outside the main asyncio thread in order
to send data on behalf of a WSGI application.
Places data being written by a stream on an asyncio queue. Returns a
threading event that will fire when that data is sent.
"""
event = threading.Event()
self._loop.call_soon_threadsafe(
self._stream_data.put_nowait,
(stream_id, data, event)
)
return event
def send_response(self, stream_id, headers):
"""
Thread-safe method called from outside the main asyncio thread in order
to send the HTTP response headers on behalf of a WSGI application.
Returns a threading event that will fire when the headers have been
emitted to the network.
"""
event = threading.Event()
def _inner_send(stream_id, headers, event):
self.conn.send_headers(stream_id, headers, end_stream=False)
self.transport.write(self.conn.data_to_send())
event.set()
self._loop.call_soon_threadsafe(
_inner_send,
stream_id,
headers,
event
)
return event
def open_flow_control_window(self, stream_id, increment):
"""
Opens a flow control window for the given stream by the given amount.
Called from a WSGI thread. Does not return an event because there's no
need to block on this action, it may take place at any time.
"""
def _inner_open(stream_id, increment):
self.conn.increment_flow_control_window(increment, stream_id)
self.conn.increment_flow_control_window(increment, None)
self.transport.write(self.conn.data_to_send())
self._loop.call_soon_threadsafe(
_inner_open,
stream_id,
increment,
)
class Stream:
"""
This class holds all of the state for a single stream. It also provides
several of the callables used by the WSGI application. Finally, it provides
the logic for actually interfacing with the WSGI application.
For these reasons, the object has *strict* requirements on thread-safety.
While the object can be initialized in the main WSGI thread, the
``run_in_threadpool`` method *must* be called from outside that thread. At
that point, the main WSGI thread may only call specific methods.
"""
def __init__(self, stream_id, protocol):
self.stream_id = stream_id
self._protocol = protocol
# Queue for data that has been received from the network. This is a
# thread-safe queue, to allow both the WSGI application to block on
# receiving more data and to allow the asyncio server to keep sending
# more data.
#
# This queue is unbounded in size, but in practice it cannot contain
# too much data because the flow control window doesn't get adjusted
# unless data is removed from it.
self._received_data = queue.Queue()
# This buffer is used to hold partial chunks of data from
# _received_data that were not returned out of ``read`` and friends.
self._temp_buffer = b''
# Temporary variables that allow us to keep hold of the headers and
# response status until such time as the application needs us to send
# them.
self._response_status = b''
self._response_headers = []
self._headers_emitted = False
# Whether the application has received all the data from the network
# or not. This allows us to short-circuit some reads.
self._complete = False
def receive_data(self, data, flow_controlled_size):
"""
Called by the H2Protocol when more data has been received from the
network.
Places the data directly on the queue in a thread-safe manner without
blocking. Does not introspect or process the data.
"""
self._received_data.put_nowait((data, flow_controlled_size))
def request_complete(self):
"""
Called by the H2Protocol when all the request data has been received.
This works by placing the ``END_DATA_SENTINEL`` on the queue. The
reading code knows, when it sees the ``END_DATA_SENTINEL``, to expect
no more data from the network. This ensures that the state of the
application only changes when it has finished processing the data from
the network, even though the server may have long-since finished
receiving all the data for this request.
"""
self._received_data.put_nowait((END_DATA_SENTINEL, None))
def run_in_threadpool(self, wsgi_application, environ):
"""
This method should be invoked in a threadpool. At the point this method
is invoked, the only safe methods to call from the original thread are
``receive_data`` and ``request_complete``: any other method is unsafe.
This method handles the WSGI logic. It invokes the application callable
in this thread, passing control over to the WSGI application. It then
ensures that the data makes it back to the HTTP/2 connection via
the thread-safe APIs provided below.
"""
result = wsgi_application(environ, self.start_response)
try:
for data in result:
self.write(data)
finally:
# This signals that we're done with data. The server will know that
# this allows it to clean up its state: we're done here.
self.write(END_DATA_SENTINEL)
# The next few methods are called by the WSGI application. Firstly, the
# three methods provided by the input stream.
def read(self, size=None):
"""
Called by the WSGI application to read data.
This method is the one of two that explicitly pumps the input data
queue, which means it deals with the ``_complete`` flag and the
``END_DATA_SENTINEL``.
"""
# If we've already seen the END_DATA_SENTINEL, return immediately.
if self._complete:
return b''
# If we've been asked to read everything, just iterate over ourselves.
if size is None:
return b''.join(self)
# Otherwise, as long as we don't have enough data, spin looking for
# another data chunk.
data = b''
while len(data) < size:
try:
chunk = next(self)
except StopIteration:
break
# Concatenating strings this way is slow, but that's ok, this is
# just a demo.
data += chunk
# We have *at least* enough data to return, but we may have too much.
# If we do, throw it on a buffer: we'll use it later.
to_return = data[:size]
self._temp_buffer = data[size:]
return to_return
def readline(self, hint=None):
"""
Called by the WSGI application to read a single line of data.
This method rigorously observes the ``hint`` parameter: it will only
ever read that much data. It then splits the data on a newline
character and throws everything it doesn't need into a buffer.
"""
data = self.read(hint)
first_newline = data.find(b'\n')
if first_newline == -1:
# No newline, return all the data
return data
# We want to slice the data so that the head *includes* the first
# newline. Then, any data left in this line we don't care about should
# be prepended to the internal buffer.
head, tail = data[:first_newline + 1], data[first_newline + 1:]
self._temp_buffer = tail + self._temp_buffer
return head
def readlines(self, hint=None):
"""
Called by the WSGI application to read several lines of data.
This method is really pretty stupid. It rigorously observes the
``hint`` parameter, and quite happily returns the input split into
lines.
"""
# This method is *crazy inefficient*, but it's also a pretty stupid
# method to call.
data = self.read(hint)
lines = data.split(b'\n')
# Split removes the newline character, but we want it, so put it back.
lines = [line + b'\n' for line in lines]
# Except if the last character was a newline character we now have an
# extra line that is just a newline: pull that out.
if lines[-1] == b'\n':
lines = lines[:-1]
return lines
def start_response(self, status, response_headers, exc_info=None):
"""
This is the PEP-3333 mandated start_response callable.
All it does is store the headers for later sending, and return our
```write`` callable.
"""
if self._headers_emitted and exc_info is not None:
raise exc_info[1].with_traceback(exc_info[2])
assert not self._response_status or exc_info is not None
self._response_status = status
self._response_headers = response_headers
return self.write
def write(self, data):
"""
Provides some data to write.
This function *blocks* until such time as the data is allowed by
HTTP/2 flow control. This allows a client to slow or pause the response
as needed.
This function is not supposed to be used, according to PEP-3333, but
once we have it it becomes quite convenient to use it, so this app
actually runs all writes through this function.
"""
if not self._headers_emitted:
self._emit_headers()
event = self._protocol.data_for_stream(self.stream_id, data)
event.wait()
return
def _emit_headers(self):
"""
Sends the response headers.
This is only called from the write callable and should only ever be
called once. It does some minor processing (converts the status line
into a status code because reason phrases are evil) and then passes
the headers on to the server. This call explicitly blocks until the
server notifies us that the headers have reached the network.
"""
assert self._response_status and self._response_headers
assert not self._headers_emitted
self._headers_emitted = True
# We only need the status code
status = self._response_status.split(" ", 1)[0]
headers = [(":status", status)]
headers.extend(self._response_headers)
event = self._protocol.send_response(self.stream_id, headers)
event.wait()
return
# These two methods implement the iterator protocol. This allows a WSGI
# application to iterate over this Stream object to get the data.
def __iter__(self):
return self
def __next__(self):
# If the complete request has been read, abort immediately.
if self._complete:
raise StopIteration()
# If we have data stored in a temporary buffer for any reason, return
# that and clear the buffer.
#
# This can actually only happen when the application uses one of the
# read* callables, but that's fine.
if self._temp_buffer:
buffered_data = self._temp_buffer
self._temp_buffer = b''
return buffered_data
# Otherwise, pull data off the queue (blocking as needed). If this is
# the end of the request, we're done here: mark ourselves as complete
# and call it time. Otherwise, open the flow control window an
# appropriate amount and hand the chunk off.
chunk, chunk_size = self._received_data.get()
if chunk is END_DATA_SENTINEL:
self._complete = True
raise StopIteration()
# Let's talk a little bit about why we're opening the flow control
# window *here*, and not in the server thread.
#
# The purpose of HTTP/2 flow control is to allow for servers and
# clients to avoid needing to buffer data indefinitely because their
# peer is producing data faster than they can consume it. As a result,
# it's important that the flow control window be opened as late in the
# processing as possible. In this case, we open the flow control window
# exactly when the server hands the data to the application. This means
# that the flow control window essentially signals to the remote peer
# how much data hasn't even been *seen* by the application yet.
#
# If you wanted to be really clever you could consider not opening the
# flow control window until the application asks for the *next* chunk
# of data. That means that any buffers at the application level are now
# included in the flow control window processing. In my opinion, the
# advantage of that process does not outweigh the extra logical
# complexity involved in doing it, so we don't bother here.
#
# Another note: you'll notice that we don't include the _temp_buffer in
# our flow control considerations. This means you could in principle
# lead us to buffer slightly more than one connection flow control
# window's worth of data. That risk is considered acceptable for the
# much simpler logic available here.
#
# Finally, this is a pretty dumb flow control window management scheme:
# it causes us to emit a *lot* of window updates. A smarter server
# would want to use the content-length header to determine whether
# flow control window updates need to be emitted at all, and then to be
# more efficient about emitting them to avoid firing them off really
# frequently. For an example like this, there's very little gained by
# worrying about that.
self._protocol.open_flow_control_window(self.stream_id, chunk_size)
return chunk
def _build_environ_dict(headers, stream):
"""
Build the WSGI environ dictionary for a given request. To do that, we'll
temporarily create a dictionary for the headers. While this isn't actually
a valid way to represent headers, we know that the special headers we need
can only have one appearance in the block.
This code is arguably somewhat incautious: the conversion to dictionary
should only happen in a way that allows us to correctly join headers that
appear multiple times. That's acceptable in a demo app: in a productised
version you'd want to fix it.
"""
header_dict = dict(headers)
path = header_dict.pop(u':path')
try:
path, query = path.split(u'?', 1)
except ValueError:
query = u""
server_name = header_dict.pop(u':authority')
try:
server_name, port = server_name.split(u':', 1)
except ValueError as e:
port = "8443"
environ = {
u'REQUEST_METHOD': header_dict.pop(u':method'),
u'SCRIPT_NAME': u'',
u'PATH_INFO': path,
u'QUERY_STRING': query,
u'SERVER_NAME': server_name,
u'SERVER_PORT': port,
u'SERVER_PROTOCOL': u'HTTP/2',
u'HTTPS': u"on",
u'SSL_PROTOCOL': u'TLSv1.2',
u'wsgi.version': (1, 0),
u'wsgi.url_scheme': header_dict.pop(u':scheme'),
u'wsgi.input': stream,
u'wsgi.errors': sys.stderr,
u'wsgi.multithread': True,
u'wsgi.multiprocess': False,
u'wsgi.run_once': False,
}
if u'content-type' in header_dict:
environ[u'CONTENT_TYPE'] = header_dict[u'content-type']
if u'content-length' in header_dict:
environ[u'CONTENT_LENGTH'] = header_dict[u'content-length']
for name, value in header_dict.items():
environ[u'HTTP_' + name.upper()] = value
return environ
# Set up the WSGI app.
application_string = sys.argv[1]
path, func = application_string.split(':', 1)
module = importlib.import_module(path)
APPLICATION = getattr(module, func)
# Set up TLS
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
ssl_context.set_alpn_protocols(["h2"])
# Do the asnycio bits
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| mit | 4,342,187,864,877,386,000 | 40.050198 | 79 | 0.639517 | false |
YYWen0o0/python-frame-django | django/contrib/postgres/fields/array.py | 10 | 8527 | import json
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, Lookup, Transform, IntegerField
from django.utils import six
from django.utils.translation import string_concat, ugettext_lazy as _
__all__ = ['ArrayField']
class AttributeSetter(object):
def __init__(self, name, value):
setattr(self, name, value)
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
super(ArrayField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.rel:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
hint=None,
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
hint=None,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_prep_value(self, value):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_prep_value(i) for i in value]
return value
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if lookup_type == 'contains':
return [self.get_prep_value(value)]
return super(ArrayField, self).get_db_prep_lookup(lookup_type, value,
connection, prepared=False)
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
path = 'django.contrib.postgres.fields.ArrayField'
args.insert(0, self.base_field)
kwargs['size'] = self.size
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def get_default(self):
"""Overridden from the default to prevent string-mangling."""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
return ''
def value_to_string(self, obj):
values = []
vals = self._get_val_from_obj(obj)
base_field = self.base_field
for val in vals:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for i, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], e.message),
code='item_invalid',
params={'nth': i},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
class ArrayContainsLookup(Lookup):
lookup_name = 'contains'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
type_cast = self.lhs.source.db_type(connection)
return '%s @> %s::%s' % (lhs, rhs, type_cast), params
ArrayField.register_lookup(ArrayContainsLookup)
class ArrayContainedByLookup(Lookup):
lookup_name = 'contained_by'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s <@ %s' % (lhs, rhs), params
ArrayField.register_lookup(ArrayContainedByLookup)
class ArrayOverlapLookup(Lookup):
lookup_name = 'overlap'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s && %s' % (lhs, rhs), params
ArrayField.register_lookup(ArrayOverlapLookup)
class ArrayLenTransform(Transform):
lookup_name = 'len'
@property
def output_field(self):
return IntegerField()
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return 'array_length(%s, 1)' % lhs, params
ArrayField.register_lookup(ArrayLenTransform)
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
| bsd-3-clause | 6,668,538,543,405,886,000 | 31.422053 | 100 | 0.588249 | false |
lorenzogil/mandelbrot | python/mandelbrot.py | 1 | 4044 | from math import floor, log, sqrt
import sdl2
import sdl2.ext
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
MAX_ITERATIONS = 16
def gradient(pos):
if (pos < 0.16):
ratio = (pos - 0.0) / (0.16 - 0.0)
red = 0 + ratio * (32 - 0)
green = 7 + ratio * (107 - 7)
blue = 100 + ratio * (203 - 100)
elif (pos < 0.42):
ratio = (pos - 0.16) / (0.42 - 0.16)
red = 32 + ratio * (237 - 32)
green = 107 + ratio * (255 - 107)
blue = 203 + ratio * (255 - 203)
elif (pos < 0.6425):
ratio = (pos - 0.42) / (0.6425 - 0.42)
red = 237 + ratio * (255 - 237)
green = 255 + ratio * (170 - 255)
blue = 255 + ratio * (0 - 255)
elif (pos < 0.8575):
ratio = (pos - 0.6425) / (0.8575 - 0.6425)
red = 255 + ratio * (0 - 255)
green = 170 + ratio * (2 - 170)
blue = 0
else:
ratio = (pos - 0.8575) / (1.0 - 0.8575)
red = 0
green = 2 + ratio * (7 - 2)
blue = 0 + ratio * (100 - 0)
return sdl2.ext.Color(red, green, blue, 255)
def render_mandelbrot(ren, colors, viewport_size, center_x, center_y):
ren.clear(0)
black = sdl2.ext.Color(0, 0, 0, 255)
for j in xrange(SCREEN_HEIGHT):
for i in xrange(SCREEN_WIDTH):
# transform pixel coordinates to viewport coordinates
x = (center_x - 0.5 * viewport_size +
(float(i) / SCREEN_WIDTH) * viewport_size)
y = (center_y - 0.5 * viewport_size +
(float(j) / SCREEN_HEIGHT) * viewport_size)
# compute the Mandelbrot formula
index = 0
z = 0.0
zi = 0.0
while index < MAX_ITERATIONS:
newz = (z * z) - (zi * zi) + x
newzi = 2 * z * zi + y
z = newz
zi = newzi
if ((z * z) + (zi * zi)) > 4:
break
index += 1
if index == 0 or index >= MAX_ITERATIONS:
ren.draw_point([i, j], black)
else:
smooth = index + 1 - log(log(sqrt(z * z + zi * zi))) / log(2)
color = colors[int(floor(smooth)) - 1]
ren.draw_point([i, j], color)
ren.present()
def main():
try:
sdl2.ext.init()
except sdl2.ext.SDLError as e:
print("SDL init error: %s" % e.msg)
try:
win = sdl2.ext.Window(
"Mandelbrot set",
size=(SCREEN_WIDTH, SCREEN_HEIGHT)
)
win.show()
except sdl2.ext.SDLError as e:
print("SDL Window init error: %s" % e.msg)
sdl2.ext.quit()
return 1
try:
flags = (
sdl2.SDL_RENDERER_ACCELERATED |
sdl2.SDL_RENDERER_PRESENTVSYNC
)
ren = sdl2.ext.Renderer(win, flags=flags)
except sdl2.ext.SDLError as e:
print("SDL Renderer init error: %s" % e.msg)
sdl2.ext.quit()
return 1
colors = [gradient(float(i) / MAX_ITERATIONS)
for i in xrange(MAX_ITERATIONS)]
viewport_size = 4.0
x = 0.0
y = 0.0
running = True
while running:
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_QUIT:
running = False
break
elif event.type == sdl2.SDL_KEYDOWN:
key = event.key.keysym.sym
if key == sdl2.SDLK_ESCAPE:
running = False
elif key == sdl2.SDLK_UP:
y -= 0.1 * viewport_size
elif key == sdl2.SDLK_DOWN:
y += 0.1 * viewport_size
elif key == sdl2.SDLK_LEFT:
x -= 0.1 * viewport_size
elif key == sdl2.SDLK_RIGHT:
x += 0.1 * viewport_size
break
render_mandelbrot(ren, colors, viewport_size, x, y)
viewport_size = 0.9 * viewport_size
win.refresh()
sdl2.ext.quit()
if __name__ == '__main__':
main()
| gpl-3.0 | -7,877,150,443,130,833,000 | 27.27972 | 77 | 0.465875 | false |
gauravbose/digital-menu | digimenu2/tests/cache/tests.py | 12 | 84938 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, InvalidCacheBackendError, cache,
caches, close_caches, get_cache,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections, transaction
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, TestCase, TransactionTestCase, ignore_warnings,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(TestCase):
@ignore_warnings(category=RemovedInDjango19Warning)
def test_simple(self):
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
@ignore_warnings(category=RemovedInDjango19Warning)
def test_close_deprecated(self):
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
# Ensure that we don't close the global cache instances.
signals.request_finished.disconnect(close_caches)
try:
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
finally:
signals.request_finished.connect(close_caches)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
| bsd-3-clause | 2,151,760,029,988,173,000 | 38.614559 | 121 | 0.62664 | false |
LabConnect/Signalgenerator | firmware/HostTestApp/test_generic_hid_libusb.py | 1 | 3211 | #!/usr/bin/env python
"""
LUFA Library
Copyright (C) Dean Camera, 2014.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
"""
"""
LUFA Generic HID device demo host test script. This script will send a
continuous stream of generic reports to the device, to show a variable LED
pattern on the target board. Send and received report data is printed to
the terminal.
Requires the PyUSB library (http://sourceforge.net/apps/trac/pyusb/).
"""
import sys
from time import sleep
import usb.core
import usb.util
# Generic HID device VID, PID and report payload length (length is increased
# by one to account for the Report ID byte that must be pre-pended)
device_vid = 0x03EB
device_pid = 0x204F
def get_and_init_hid_device():
device = usb.core.find(idVendor=device_vid, idProduct=device_pid)
if device is None:
sys.exit("Could not find USB device.")
if device.is_kernel_driver_active(0):
try:
device.detach_kernel_driver(0)
except usb.core.USBError as exception:
sys.exit("Could not detatch kernel driver: %s" % str(exception))
try:
device.set_configuration()
except usb.core.USBError as exception:
sys.exit("Could not set configuration: %s" % str(exception))
return device
def send_led_pattern(device, led1, led2, led3, led4):
# Report data for the demo is LED on/off data
report_data = [0x20, 0x00, 0x40, 0x00, 0x69, 0xf1, 0x00, 0x00, 0x00, 0x00, 0x38]
# Send the generated report to the device
number_of_bytes_written = device.ctrl_transfer( # Set Report control request
0b00100001, # bmRequestType (constant for this control request)
0x09, # bmRequest (constant for this control request)
0, # wValue (MSB is report type, LSB is report number)
0, # wIndex (interface number)
report_data # report data to be sent
);
assert number_of_bytes_written == len(report_data)
print("Sent LED Pattern: {0}".format(report_data))
def receive_led_pattern(hid_device):
endpoint = hid_device[0][(0,0)][0]
report_data = hid_device.read(endpoint.bEndpointAddress, endpoint.wMaxPacketSize)
return list(report_data)
def main():
hid_device = get_and_init_hid_device()
print("Connected to device 0x%04X/0x%04X - %s [%s]" %
(hid_device.idVendor, hid_device.idProduct,
usb.util.get_string(hid_device, 256, hid_device.iProduct),
usb.util.get_string(hid_device, 256, hid_device.iManufacturer)))
p = 0
while (True):
# Convert the current pattern index to a bit-mask and send
send_led_pattern(hid_device,
(p >> 3) & 1,
(p >> 2) & 1,
(p >> 1) & 1,
(p >> 0) & 1)
# Receive and print the current LED pattern
led_pattern = receive_led_pattern(hid_device)[0:4]
print("Received LED Pattern: {0}".format(led_pattern))
# Compute next LED pattern in sequence
p = (p + 1) % 16
# Delay a bit for visual effect
sleep(.2)
if __name__ == '__main__':
main()
| gpl-2.0 | 9,159,657,997,410,438,000 | 31.765306 | 85 | 0.621925 | false |
bilabon/django-s3direct | runtests.py | 5 | 2028 | import sys
import os
from os import environ
import django
from django.conf import settings
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
ROOT_URLCONF='s3direct.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
's3direct',),
MIDDLEWARE_CLASSES=('django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',),
AWS_ACCESS_KEY_ID=environ.get('AWS_ACCESS_KEY_ID', ''),
AWS_SECRET_ACCESS_KEY=environ.get(
'AWS_SECRET_ACCESS_KEY', ''),
AWS_STORAGE_BUCKET_NAME=environ.get(
'AWS_STORAGE_BUCKET_NAME',
'test-bucket'),
S3DIRECT_REGION='us-east-1',
S3DIRECT_DESTINATIONS={
'misc': (lambda original_filename: 'images/unique.jpg',),
'files': ('uploads/files', lambda u: u.is_staff,),
'imgs': ('uploads/imgs', lambda u: True, ['image/jpeg', 'image/png'],),
'vids': ('uploads/vids', lambda u: u.is_authenticated(), ['video/mp4'],)
})
if hasattr(django, 'setup'):
django.setup()
if django.get_version() < '1.6':
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
else:
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests(['s3direct', ])
if failures:
sys.exit(failures)
| mit | 2,137,180,755,506,505,700 | 40.387755 | 99 | 0.498521 | false |
on-c-e/oncepy | caltext.py | 1 | 37073 | #! python
import codecs
import datetime
import sys
import os
import pprint as ppr
from numpy import *
import numpy.linalg as LA
from sympy import *
from sympy import var as varsym
from sympy import printing
from once.calunit import *
from once.calcheck import ModCheck
from once import config as cfg
__version__ = "0.9.0"
__author__ = '[email protected]'
class CalcUTF(object):
"""Return UTF-8 calcs
::
Arguments:
odict (ordered dict) : model dictionary
Files written:
cfile : UTF-8 calc file name
cfilepy : Python model file name
Operation keys, number of parameters and associated tags:
_r + line number - 6 - [r] run
_i + line number - 6 - [i] insert
_v + line number - 4 - [v] value
_e + line number - 7 - [e] equation
_t + line number - 9 - [t] table
_s + line number - 3 - [s] sections
_~ + line number - 1 - blank line
_x + line number - 2 - pass-through text
_y + line number - 2 - value heading
_# - 1 - control line
_lt - 2 - license text [licensetext]
[r] p0 | p1 | p2 | p3 | p4 | p5
'os' command arg1 arg2 arg3 arg4
'py' script arg1 arg2 arg3 arg4
[i] p0 | p1 | p2 | p4 | p5
'fig' fig file caption size location
'tex' text file descrip (text)
'mod' model file descrip (text)
'fun' funct file var name descrip (text)
'rea' file var name descrip (text)
'wri' file var name descrip (text)
'app' file var name descrip (text)
[v] p0 | p1 | p2 | p3
var expr statemnt descrip
[e] p0 | p1 | p2 | p3 | p4 | p5 | p6
var expr statemnt descrip dec1 dec2 units
[t] p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7 | p8
var expr state1 desc range1 range2 dec1 un1 un2
[s] p0 | p1 | p2 | p3
left string calc number sect num toc flag
"""
def __init__(self, odict1):
"""Initialize parameters for UTF calc.
::
Arguments:
odict1 (dictionary): model dictionary
"""
# dicts
self.vbos = cfg.verboseflag
self.el = ModCheck()
self.odict = odict1
# paths and files
self.mpath = cfg.ppath
self.mfile = cfg.mfile
self.cfile = cfg.cfileutf
self.ppath = cfg.ppath
self.cpath = cfg.cpath
self.calfile = os.path.join(self.cpath, self.cfile)
#print("calcfile path", self.calfile)
self.cfile = codecs.open(self.calfile, 'w', encoding='utf8')
self.cfilepy = cfg.cfilepy
self.cfilepypath = os.path.join(cfg.spath,cfg.cfilepy)
# parameters
self.fignum = 0
self.widthc = cfg.calcwidth
self.xtraline = False
def _gen_utf(self):
"""Generate utf calc from model dictionary.
"""
_dt = datetime.datetime
self._write_utf(str(_dt.now()) + " on-c-e version: " +
__version__, 0, 0)
if cfg.stocflag:
_sects1 = []
for _dkey in self.odict:
if _dkey[0:2] == '_s':
_sval1 = self.odict[_dkey]
_sects1.append(str(_sval1[2]) + ' ' + str(_sval1[0]))
self._write_utf('\n Calc Sections', 0, 1)
self._write_utf(' =============', 0, 1)
for _sect1 in _sects1:
self._write_utf(' '+ _sect1, 0, 1)
self._write_utf('\n', 0, 0)
for _i in self.odict:
mtag = _i[0:2]
mvals = self.odict[_i]
#print('rmtag', mtag, _i, self.odict[_i])
if mvals[0].strip() == '#- stop':
sys.exit(1)
if mtag == '_#':
self.xtraline = False
continue
if mtag == '_~':
self._prt_blnk()
self.xtraline = False
continue
if mtag == '_r': # execute dictionary line by line
self._prt_run(self.odict[_i])
self.xtraline = False
elif mtag == '_i':
self._prt_ins(self.odict[_i])
self.xtraline = False
elif mtag == '_v':
self._prt_val2(self.odict[_i])
self.xtraline = False
elif mtag == '_e':
#print('_e', self.odict[_i])
self._prt_eq(self.odict[_i])
self.xtraline = True
elif mtag == '_t':
self._prt_table(self.odict[_i])
self.xtraline = True
elif mtag == '_s':
self._prt_sect(self.odict[_i])
self.xtraline = False
elif mtag == '_x':
self._prt_txt(self.odict[_i])
self.xtraline = False
elif mtag == '_y':
self._prt_val1(self.odict[_i])
self.xtraline = False
else:
pASs
if self.xtraline:
self._prt_blnk()
for _i2 in self.odict: # add calc license
if _i2 == '_lt':
self._write_utf(self.odict[_i2],0)
self._write_utf('\n[end of calc]', 0, 0) # end calc
self.cfile.close() # close calc file
self.el.logwrite("< UTF calc written >", self.vbos)
#for _i in self.odict: print(i, self.odict[i])
def get_odict(self):
"""Return model dictionary
"""
return self.odict
def _prt_blnk(self):
"""Insert blank line.
"""
self._write_utf(' ', 0, 0)
def _prt_txt(self, txt):
"""Print or strip pASs-through text.
txt (string): text that is not part of an operation
"""
if txt[0].strip() == '|' : return
elif txt[0].strip() == "::" : return
elif txt[0].strip() == '`' : return
elif txt[0][2:4] == ".." : return
elif txt[0][0] == '|' : self._write_utf(txt[0], 1, 0)
elif txt[0][2] == "\\" : self._write_utf(txt[0], 1, 0)
else: self._write_utf(txt[0], 1, 0)
def _write_utf(self, mentry, pp, indent):
"""Write model text to utf-8 encoded file.
mentry (string): text
pp (int): pretty print flag
indent (int): indent flag
"""
if pp: mentry = pretty(mentry, use_unicode=True, num_columns=92)
if indent: mentry = " "*4 + mentry
print(mentry, file=self.cfile)
def _prt_run(self, dval):
"""Process run operations.
::
options: script, os
scripts are stored in resource subfolder
"""
dval = self.fidict[refnum1[1]]
#print('dval', dval)
option = dval[0].strip()
fpath = dval[1].strip()
fp = os.path.abspath(fpath)
var1 = dval[2].strip()
var2 = dval[3].strip()
var3 = dval[4] # variable with edit lines
if option == 'script': # execute script in model namespace
with open(fp, 'r') as f1:
fr = f1.read()
exec(fr, globals())
link1 = "< ran python script : " + str(fpath) + " >"
self.ew.errwrite(link1, 1)
self.ew.errwrite("file: " + str(fp) + " executed", 0)
elif option == 'os': # execute operating system command
os.system(fpath)
link1 = "< execute command: " + str(fp) + " >"
self.ew.errwrite(link1, 0)
self.ew.errwrite('', 0)
else:
pass
def _prt_ins(self, dval):
"""Insert file data into or from UTF calc
::
[i] p0 | p1 | p2 | p3 | p4
'fig' file caption size location
'text' file reference
'lit' file reference
'csv' file
'mod' file
'func' file var name reference
'read' file var name vector or table
'write' file var name
'app' file var name
only the first three letters of p0 are read
"""
option = dval[0].strip()[0:3]
fname1 = dval[1].strip()
caption1 = dval[2].strip()
if option == 'fig':
self._write_utf("figure | " + fname1 + " | " + caption1, 0, 1)
"""
if option == 'text':
# insert file from text into model, do not process
with open(fpath, 'r') as f1:
txstrng = f1.readlines()
if var1.strip() != '':
instxt = eval('txstrng[' + var1.strip() + ']')
instxt = ''.join(instxt)
else:
instxt = ''.join(txstrng)
self._write_utf(instxt, 0)
link1 = "< text inserted from file: " + str(fpath) + " >"
self.ew.errwrite(link1, 1)
self.ew.errwrite('', 0)
elif option == 'write':
# write data to file, replace if exists
sep1 = var2
if sep1 == '':
sep1 = ','
elif sep1 == '*':
sep1 = " "
format1 = var3
if format1 == '':
format1 = '%s'
file1 = open(fp, 'w')
var11 = array(var1)
var11.tofile(file1, sep=sep1, format=format1)
file1.close()
link1 = "< write variable " + var1 + " to file: " \
+ str(fp) + ">"
self.ew.errwrite(link1, 0)
elif option == 'w+':
# append data to file
sep1 = var2
if sep1 == '':
sep1 = ','
elif sep1 == '*':
sep1 = " "
format1 = var3
if format1 == '':
format1 = '%s'
file1 = open(fp, 'a')
var11 = array(var1)
var11.tofile(file1, sep=sep1, format=format1)
file1.close()
link1 = "< append variable " + var1 + " to file: " \
+ str(fp) + ">"
self.ew.errwrite(link1, 0)
elif option == 'figure':
# insert figure reference in utf-8 document
self.fignum += 1
link1 = "< insert figure " + str(self.fignum) + '. ' \
+ " file: " + str(fpath) + '>'
self.ew.errwrite(link1, 0)
link2 = "Figure " + str(self.fignum) + '. ' + var1 \
+ " <file: " + str(fpath) + " >"
self._write_utf(link2, 1)
self._write_utf(" ", 0)
elif option == 'model':
# this option is handled in ModDicts.__init__
# comodels are always processed first, regardless of location
pass
# absolute path specified for the following options
elif option == 'read':
self._prt_read(dval)
else:
pass
"""
def _prt_val1(self, dval):
"""Print term description.
::
key: _y + line number
_y : p0 | p1
description eqnum
"""
#print('dval_v1', dval)
self._write_utf((dval[0] + " " + dval[1]).rjust(self.widthc-1), 0, 0)
self._write_utf(" ", 0, 0)
def _prt_val2(self, dval):
"""Print terms.
::
key: _v + line number
_v : p0 | p1 | p2 | p3
var expr statemnt descrip
"""
#print('dval_v2', dval)
exec(dval[2])
val1 = eval(dval[1])
var1 = dval[2].split('=')[0].strip()
state = var1 + ' = ' + str(val1)
ptype = type(val1)
if ptype == ndarray or ptype == list or ptype == tuple:
state = var1 + ' = ' + '\n' + str(val1)
shift = int(self.widthc / 2.5)
ref = dval[3].strip().ljust(shift)
if ptype == ndarray or ptype == list or ptype == tuple:
self._write_utf(" "*2 + ref + " | " + state, 0, 1)
else:
self._write_utf(" "*2 + ref + " | " + state, 1, 1)
def _prt_eq(self, dval):
""" print equations.
key : _e + line number
value: p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7
var expr state descrip dec1 dec2 unit eqnum
"""
try: # set decimal format
eformat, rformat = str(dval[4]).strip(), str(dval[5]).strip()
exec("set_printoptions(precision=" + eformat.strip() + ")")
exec("Unum.VALUE_FORMAT = '%." + eformat.strip() + "f'")
except:
rformat = '3'
eformat = '3'
set_printoptions(precision=3)
Unum.VALUE_FORMAT = "%.3f"
cunit = dval[6].strip()
var0 = dval[0].strip()
#print('dval_e', dval
for k1 in self.odict: # evaluate
if k1[0:2] in ['_v','_e']:
try: exec(self.odict[k1][2].strip())
except: pass
tmp = int(self.widthc-2) * '-' # print line
self._write_utf(" ", 0, 0)
self._write_utf((u'\u250C' + tmp + u'\u2510').rjust(self.widthc), 1, 0)
self._write_utf((dval[3] + " " + dval[7]).rjust(self.widthc-1), 0, 0)
self._write_utf(" ", 0, 0)
for _j in self.odict: # symbolic form
if _j[0:2] in ['_v','_e']:
#print(str(self.odict[_j][0]))
varsym(str(self.odict[_j][0]))
try:
symeq = sympify(dval[1].strip()) # sympy form
self._write_utf(symeq, 1, 0)
self._write_utf(" ", 0, 0)
self._write_utf(" ", 0, 0)
except:
self._write_utf(dval[1], 1, 0) # ASCII form
self._write_utf(" ", 0, 0)
try: # substitute
symat = symeq.atoms(Symbol)
for _n2 in symat:
evlen = len((eval(_n2.__str__())).__str__()) # get var length
new_var = str(_n2).rjust(evlen, '~')
new_var = new_var.replace('_','|')
symeq1 = symeq.subs(_n2, symbols(new_var))
out2 = pretty(symeq1, wrap_line=False)
#print('out2a\n', out2)
symat1 = symeq1.atoms(Symbol) # adjust character length
for _n1 in symat1:
orig_var = str(_n1).replace('~', '')
orig_var = orig_var.replace('|', '_')
try:
expr = eval((self.odict[orig_var][1]).split("=")[1])
if type(expr) == float:
form = '{:.' + eformat +'f}'
symeval1 = form.format(eval(str(expr)))
else:
symeval1 = eval(orig_var.__str__()).__str__()
except:
symeval1 = eval(orig_var.__str__()).__str__()
out2 = out2.replace(_n1.__str__(), symeval1)
#print('out2b\n', out2)
out3 = out2 # clean up unicode
out3.replace('*', '\\u22C5')
#print('out3a\n', out3)
_cnt = 0
for _m in out3:
if _m == '-':
_cnt += 1
continue
else:
if _cnt > 1:
out3 = out3.replace('-'*_cnt, u'\u2014'*_cnt)
_cnt = 0
#print('out3b \n', out3)
self._write_utf(out3, 1, 0) # print substituted form
self._write_utf(" ", 0, 0)
except:
pass
for j2 in self.odict: # restore units
try:
statex = self.odict[j2][2].strip()
exec(statex)
except:
pass
typev = type(eval(var0)) # print result right justified
if typev == ndarray:
tmp1 = eval(var0)
self._write_utf((var0 + " = "), 1, 0)
self._write_utf(' ', 0, 0)
self._write_utf(tmp1, 0, 0)
elif typev == list or typev == tuple:
tmp1 = eval(var0)
self._write_utf((var0 + " = "), 1)
self._write_utf(' ', 0)
plist1 = ppr.pformat(tmp1, width=40)
self._write_utf(plist1, 0, 0)
elif typev == Unum:
exec("Unum.VALUE_FORMAT = '%." + rformat.strip() + "f'")
if len(cunit) > 0:
tmp = eval(var0).au(eval(cunit))
else:
tmp = eval(var0)
tmp1 = tmp.strUnit()
tmp2 = tmp.asNumber()
chkunit = str(tmp).split()
#print('chkunit', tmp, chkunit)
if len(chkunit) < 2: tmp1 = ''
resultform = "{:,."+ rformat + "f}"
result1 = resultform.format(tmp2)
tmp3 = result1 + ' ' + tmp1
self._write_utf((var0 + " = " + tmp3).rjust(self.widthc-1), 1, 0)
else:
if type(eval(var0)) == float or type(eval(var0)) == float64:
resultform = "{:,."+rformat + "f}"
result1 = resultform.format(eval(var0))
self._write_utf((var0 +"="+
str(result1)).rjust(self.widthc-1), 1, 0)
else:
self._write_utf((var0 +"="+
str(eval(var0))).rjust(self.widthc-1), 1, 0)
tmp = int(self.widthc-2) * '-' # print horizontal line
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 1, 0)
self._write_utf(" ", 0, 0)
def _prt_table(self, dval):
"""print arrays
Dictionary:
arrays: [[a], statement, expr, range1, range2,
ref, decimals, unit1, unit2, modnum, eqnum]
"""
try:
eformat, rformat = dval[6].split(',')
exec("set_printoptions(precision=" + eformat.strip() + ")")
exec("Unum.VALUE_FORMAT = '%." + eformat.strip() + "f'")
except:
rformat = '3'
eformat = '3'
set_printoptions(precision=3)
Unum.VALUE_FORMAT = "%.3f"
#print('array dval', dval)
# table heading
tmp = int(self.widthc-2) * '-'
self._write_utf((u'\u250C' + tmp + u'\u2510').rjust(self.widthc), 0, 0)
tleft = 'Table'
self._write_utf((tleft + ' ' + dval[10]).rjust(self.widthc), 0, 0)
self._write_utf(dval[5].strip().rjust(self.widthc-1), 0, 0)
self._write_utf(' ', 0, 0)
vect = dval[1:]
# symbolic forms
for _j in self.symb:
if str(_j)[0] != '_':
varsym(str(_j))
# range variables
try:
var1 = vect[2].strip()
var2 = vect[3].strip()
except:
pass
# equation
try:
var0 = vect[0].split('=')[0].strip()
var0s = varsym(var0)
symeq = vect[0].split('=')[1].strip()
symeq1 = sympify(symeq)
except:
pass
# evaluate equation and array variables - keep units
for k1 in self.odict:
if k1[0] != '_' or k1[0:2] == '_a':
try: exec(self.odict[k1][3].strip())
except: pass
try: exec(self.odict[k1][4].strip())
except: pass
try: exec(self.odict[k1][1].strip())
except: pass
#print(k1, eval(k1))
# write explicit table
if len(str(vect[2])) == 0 and len(str(vect[3])) == 0:
ops = [' - ',' + ',' * ',' / ']
_z1 = vect[0].split('=')[0].strip()
cmd_str1 = _z1 + ' = array(' + vect[1] +')'
exec(cmd_str1)
cunit = dval[7]
print('cunit', cunit)
_rc = eval(_z1).tolist()
# evaluate variables with units
for inx in ndindex(eval(_z1).shape):
print(21, type(_rc[inx[0]][inx[1]]),_rc[inx[0]][inx[1]] )
try:
_fltn2a = _rc[inx[0]][inx[1]]
_fltn2b = _fltn2a.au(eval(cunit))
_fltn2c = _fltn2b.asNumber()
_rc[inx[0]][inx[1]] = str(_fltn2c)
except:
pass
# evaluate numbers
for inx in ndindex(eval(_z1).shape):
try:
_fltn1 = float(_rc[inx[0]][inx[1]])
_rc[inx[0]][inx[1]] = _fltn1
except:
pass
# evaluate expressions
for inx in ndindex(eval(_z1).shape):
for _k in ops:
if _k in str(_rc[inx[0]][inx[1]]) :
_fltn2 = _rc[inx[0]][inx[1]]
_rc[inx[0]][inx[1]] = eval(_fltn2)
break
# print table
table2 = tabulate
fltf = "." + eformat.strip() + "f"
ptable = table2.tabulate(_rc[1:], _rc[0], 'rst', floatfmt=fltf)
nstr = pretty(ptable, use_unicode=True, num_columns=92)
self._write_utf(nstr, 1, 0)
tmp = int(self.widthc-1) * '-'
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 0, 0)
return
# evaluate equation and array variables - strip units
for k1 in self.odict:
if k1[0] != '_' or k1[0:2] == '_a':
try: exec(self.odict[k1][3].strip())
except: pass
try: exec(self.odict[k1][4].strip())
except: pass
try: exec(self.odict[k1][1].strip())
except: pass
try:
state = self.odict[k1][1].strip()
varx = state.split('=')
state2 = varx[0].strip()+'='+varx[0].strip() + '.asNumber()'
exec(state2)
#print('j1', k1)
except:
pass
#print(k1, eval(k1))
# imported table
if len(str(vect[1])) == 0:
_a = eval(vect[0])
# print table
table2 = tabulate
flt1 = "." + eformat.strip() + "f"
ptable = table2.tabulate(_a[1:], _a[0], 'rst', floatfmt=flt1)
nstr = pretty(ptable, use_unicode=True, num_columns=92)
self._write_utf(nstr, 1)
tmp = int(self.widthc-1) * '-'
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 0)
# single row vector - 1D table
elif len(str(vect[3])) == 0 and len(str(vect[0])) != 0:
out1 = var0s
out1a = symeq1
out2 = var1
self._write_utf(" ", 0)
self._write_utf("Variables: ", 0)
self._write_utf("----------", 0)
self._write_utf(' ', 0)
self._write_utf(out2, 1)
self._write_utf(' ', 0)
self._write_utf(out1a, 1)
self._write_utf(' ', 0)
# process range variable 1
rnge1 = vect[2]
exec(rnge1.strip())
rnge1a = rnge1.split('=')
rlist = [vect[6].strip() + ' = ' +
str(_r) for _r in eval(rnge1a[1])]
#print('rlist', rlist)
#process equation
equa1 = vect[0].strip()
#print('equa1', equa1)
exec(equa1)
var2 = equa1.split('=')[0]
etype = equa1.split('=')[1]
elist1 = eval(var2)
if etype.strip()[:1] == '[':
# data is in list form
elist2 = []
alist1 = eval(equa1.split('=')[1])
for _v in alist1:
try: elist2.append(list(_v))
except: elist2.append(_v)
else:
try: elist2 = elist1.tolist()
except: elist2 = elist1
elist2 = [elist2]
#print('elist', elist2)
# create table
table1 = tabulate
ptable = table1.tabulate(elist2, rlist, 'rst',
floatfmt="."+ eformat +"f")
self._write_utf(ptable, 1)
tmp = int(self.widthc-2) * '-'
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 0)
# 2D table
elif len(str(vect[3])) != 0 and len(str(vect[0])) != 0:
out1 = var0s
out1a = symeq1
out2 = var1
out3 = var2
self._write_utf(" ", 0)
self._write_utf("Variables: ", 0)
self._write_utf("----------", 0)
self._write_utf(" ", 0)
self._write_utf(out2, 1)
self._write_utf(' ', 0)
self._write_utf(out3, 1)
self._write_utf(" ", 0)
self._write_utf(out1a, 1)
self._write_utf(' ', 0)
rnge1 = vect[2] # process range variable 1
exec(rnge1.strip())
rnge1a = rnge1.split('=')
rlist = [vect[6].strip() + ' = ' +
str(_r) for _r in eval(rnge1a[1])]
rnge2 = vect[3] # process range variable 2
exec(rnge2.strip())
rnge2a = rnge2.split('=')
clist = [str(_r).strip() for _r in eval(rnge2a[1])]
rlist.insert(0, vect[7].strip())
equa1 = vect[0].strip() # process equation
exec(equa1)
etype = equa1.split('=')[1]
if etype.strip()[:1] == '[':
# data is in list form
#alist = []
alist = eval(equa1.split('=')[1])
#for _v in alist1:
# for _x in _v:
# print('_x', type(_x), _x)
# alist.append(list(_x))
else:
# data is in equation form
equa1a = vect[0].strip().split('=')
equa2 = equa1a[1]
rngx = rnge1a[1]
rngy = rnge2a[1]
ascii1 = rnge1a[0].strip()
ascii2 = rnge2a[0].strip()
# format table
alist = []
for _y12 in eval(rngy):
alistr = []
for _x12 in eval(rngx):
eq2a = equa2.replace(ascii1, str(_x12))
eq2b = eq2a.replace(ascii2, str(_y12))
el = eval(eq2b)
alistr.append(el)
alist.append(alistr)
for _n, _p in enumerate(alist):
_p.insert(0, clist[_n])
# print table
table2 = tabulate
flt1 = "." + eformat.strip() + "f"
ptable = table2.tabulate(alist, rlist, 'rst', floatfmt=flt1)
nstr = pretty(ptable, use_unicode=True, num_columns=92)
self._write_utf(nstr, 1)
tmp = int(self.widthc-1) * '-'
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 0)
def _prt_sect(self, dval):
"""Print sections to UTF-8.
::
key : value
_s : p0 | p1 | p2
left string calc number sect num
"""
self._write_utf('='*self.widthc, 0, 0)
self._write_utf(" " + dval[0] + ((dval[1])+dval[2]).rjust(self.widthc -
len(dval[0])-2), 1, 0)
self._write_utf('='*self.widthc, 0, 0)
def _prt_func(self, dval):
"""Print functions to UTF-8.
::
arguments:
dval (dictionary value): [[f], function call, var, ref, eqn number
"""
# convert symbols to numbers - retain units
for k1 in self.odict:
if k1[0] != '_':
try:
exec(self.odict[k1][1].strip())
except:
pass
if k1[0:2] == '_a':
#print('ek1-2', k1, self.odict[k1])
try:
exec(self.odict[k1][3].strip())
exec(self.odict[k1][4].strip())
exec(self.odict[k1][1].strip())
except:
pass
# print reference line
tmp = int(self.widthc-2) * '-'
self._write_utf((u'\u250C' + tmp + u'\u2510').rjust(self.widthc-1), 0)
funcdescrip = dval[3].split(']')[1]
strend = funcdescrip.strip() + ' ' + dval[4].strip()
self._write_utf(strend.rjust(self.widthc - 1), 0)
# evaluate function
self._write_utf(" ", 0)
self._write_utf('return variable: ' + dval[2].strip(), 0)
self._write_utf(" ", 1)
self._write_utf('function call: ' + dval[1].strip(), 0)
funcname = dval[1].split('(')[0]
docs1 = eval(funcname + '.__doc__')
self._write_utf(' ', 0)
self._write_utf('function doc:', 0)
self._write_utf(docs1, 0)
self._write_utf(' ', 0)
#print(dval[1].strip())
return1 = eval(dval[1].strip())
if return1 is None:
self._write_utf('function evaluates to None', 0)
else:
self._write_utf('function return: ', 0)
self._write_utf(return1, 0)
# add function variable to dict
return2 = (return1.__repr__()).replace('\n', '')
self.odict[dval[2]] = ['[z]', str(dval[2])+'='+return2]
#print(self.odict[dval[2]])
tmp = int(self.widthc-2) * '-'
self._write_utf((u'\u2514' + tmp + u'\u2518').rjust(self.widthc), 0)
self._write_utf(" ", 0)
def _write_py(self):
"""write python code to file from dictionary
::
write imports, terms, equations to Python importable file
the following libraries are imported when the file is imported:
os, sys
sympy
numpy and numpy.linalg
unum and units
[v] p0 | p1 | p2 | p3
var expr statemnt descrip
[e] p0 | p1 | p2 | p3 | p4 | p5 | p6
var expr statemnt descrip dec1 dec2 units
[t] p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7 | p8
var expr state1 desc range1 range2 dec1 un1 un2
[s] p0 | p1 | p2 | p3
left string calc number sect num toc flag
"""
pyfile1 = open(self.cfilepypath, 'w')
str1 = ('"""\nThis file contains Python equations from the '
'on-c-e model \n\n '+ self.mfile + '\n'
'\nFor interactive analysis open the file\n'
'in an IDE executable shell e.g. Pyzo,\n'
'Spyder, Jupyter Notebook or Komodo IDE \n'
'""" \n')
str2 = ('import os\n'
'import sys\n'
'from sympy import *\n'
'from numpy import *\n'
'import numpy.linalg as LA\n'
'import importlib.util\n'
'import once.config as cfg\n')
# str2a = ('pypath = os.path.dirname(sys.executable)\n'
# 'oncedir = os.path.join(pypath,"Lib","site-packages","once")\n'
# 'cfg.opath = oncedir\n'
# 'from once.calunit import *\n')
vlist1 = []
vlist2 = []
str3a = str(os.getcwd()).replace("\\", "\\\\")
str3 = "sys.path.append('" + str3a + "')"
importstr = str1 + str2 + str3
pyfile1.write(importstr + 2*"\n")
_vardef =[]
for k1 in self.odict: # write values and equations
if k1[0:2] == '_e' or k1[0:2] == '_v':
try:
exec(self.odict[k1][2].strip())
_vardef.append(self.odict[k1][2].strip() + " #- " +
self.odict[k1][3].strip())
vlist1.append(self.odict[k1][0].strip())
vlist2.append(self.odict[k1][3].strip())
except:
pass
if k1[0:2] == '_s': # write section headings
_vardef.append('#')
_vardef.append('# section: '+self.odict[k1][0].strip())
for il in _vardef:
pyfile1.write(str(il) + '\n\n')
vlen = len(vlist1)
str4 = '[ '
for ix in range(0,vlen):
try:
for jx in range(3*ix, 3*ix+3):
str4 += "'" + str(vlist1[jx]) + "', "
str4 += '\n'
except:
pass
str4 += ']'
str4a = '[ '
for ix in range(0,vlen):
try:
for jx in range(3*ix, 3*ix+3):
str4a += "'" + str(vlist2[jx]) + "', "
str4a += '\n'
except:
pass
str4a += ']'
pyfile1.write('#\n# variables\nvlist1 = ' + str4 + '\n')
pyfile1.write('#\n# variable definitions\nvlist2 = ' + str4a + '\n')
str5 = ('\ndef vlist(vlistx = vlist1, vlisty= vlist2):\n'
' """Utility function for interactively listing once\n'
' variable values. Variables are stored in vlist1 and'
' definitions in vlist2. Type vlist()\n'
' to list updated variable summary after executing\n'
' calc Python script\n'
' """\n\n'
' for lsti in zip(vlistx, vlisty):\n'
' item1 = eval(str(lsti[0]))\n'
' def1 = lsti[1]\n'
' cncat1 = str(lsti[0]) + " = " + str(item1) + " "*30\n'
' print(cncat1[:25] + "# " + def1)\n\n'
'if __name__ == "__main__":\n'
' vlist()'
' \n\n')
pyfile1.write(str5 + "\n")
pyfile1.close()
for lsti in zip(vlist1, vlist2):
#print(lsti)
item1 = eval(str(lsti[0]))
def1 = lsti[1]
cncat1 = str(lsti[0]) + " = " + str(item1) + " "*40
cfg.varevaled += cncat1[:30] + "# " + def1 + "\n"
| mit | -1,956,707,070,761,840,000 | 37.565635 | 100 | 0.402827 | false |
data-exp-lab/girder | test/test_cache.py | 4 | 2574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import mock
import pytest
from girder import _setupCache
from girder.constants import SettingKey
from girder.models.setting import Setting
from girder.utility import config
from girder.utility._cache import cache, requestCache
@pytest.fixture
def enabledCache():
"""
Side effect fixture which enables and sets up predefined caches.
"""
cfg = config.getConfig()
cfg['cache']['enabled'] = True
_setupCache()
yield
cfg['cache']['enabled'] = False
_setupCache()
def testCachesAreAlwaysConfigured():
assert cache.is_configured is True
assert requestCache.is_configured is True
def testSettingsCache(db, enabledCache):
setting = Setting()
# 'foo' should be cached as the brand name
setting.set(SettingKey.BRAND_NAME, 'foo')
# change the brand name bypassing the cache via mongo
returnedSetting = setting.findOne({'key': SettingKey.BRAND_NAME})
returnedSetting['value'] = 'bar'
# verify the cache still gives us the old brand name
assert setting.get(SettingKey.BRAND_NAME) == 'foo'
# change the brand name through .set (which updates the cache)
setting.set(SettingKey.BRAND_NAME, 'bar')
# verify retrieving gives us the new value
with mock.patch.object(setting, 'findOne') as findOneMock:
assert setting.get(SettingKey.BRAND_NAME) == 'bar'
# findOne shouldn't be called since the cache is returning the setting
findOneMock.assert_not_called()
# unset the setting, invalidating the cache
setting.unset(SettingKey.BRAND_NAME)
# verify the database needs to be accessed to retrieve the setting now
with mock.patch.object(setting, 'findOne') as findOneMock:
setting.get(SettingKey.BRAND_NAME)
findOneMock.assert_called_once()
| apache-2.0 | -3,508,126,955,801,532,400 | 31.582278 | 79 | 0.672494 | false |
tomwire/AutobahnPython | autobahn/websocket/test/test_websocket_url.py | 11 | 5487 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import unittest2 as unittest
from autobahn.websocket.protocol import createWsUrl, parseWsUrl
class TestCreateWsUrl(unittest.TestCase):
def test_create_url01(self):
self.assertEqual(createWsUrl("localhost"), "ws://localhost:80/")
def test_create_url02(self):
self.assertEqual(createWsUrl("localhost", port=8090), "ws://localhost:8090/")
def test_create_url03(self):
self.assertEqual(createWsUrl("localhost", path="ws"), "ws://localhost:80/ws")
def test_create_url04(self):
self.assertEqual(createWsUrl("localhost", path="/ws"), "ws://localhost:80/ws")
def test_create_url05(self):
self.assertEqual(createWsUrl("localhost", path="/ws/foobar"), "ws://localhost:80/ws/foobar")
def test_create_url06(self):
self.assertEqual(createWsUrl("localhost", isSecure=True), "wss://localhost:443/")
def test_create_url07(self):
self.assertEqual(createWsUrl("localhost", isSecure=True, port=443), "wss://localhost:443/")
def test_create_url08(self):
self.assertEqual(createWsUrl("localhost", isSecure=True, port=80), "wss://localhost:80/")
def test_create_url09(self):
self.assertEqual(createWsUrl("localhost", isSecure=True, port=9090, path="ws", params={'foo': 'bar'}), "wss://localhost:9090/ws?foo=bar")
def test_create_url10(self):
wsurl = createWsUrl("localhost", isSecure=True, port=9090, path="ws", params={'foo': 'bar', 'moo': 23})
self.assertTrue(wsurl == "wss://localhost:9090/ws?foo=bar&moo=23" or wsurl == "wss://localhost:9090/ws?moo=23&foo=bar")
def test_create_url11(self):
self.assertEqual(createWsUrl("127.0.0.1", path="ws"), "ws://127.0.0.1:80/ws")
def test_create_url12(self):
self.assertEqual(createWsUrl("62.146.25.34", path="ws"), "ws://62.146.25.34:80/ws")
def test_create_url13(self):
self.assertEqual(createWsUrl("subsub1.sub1.something.com", path="ws"), "ws://subsub1.sub1.something.com:80/ws")
def test_create_url14(self):
self.assertEqual(createWsUrl("::1", path="ws"), "ws://::1:80/ws")
def test_create_url15(self):
self.assertEqual(createWsUrl("0:0:0:0:0:0:0:1", path="ws"), "ws://0:0:0:0:0:0:0:1:80/ws")
class TestParseWsUrl(unittest.TestCase):
# parseWsUrl -> (isSecure, host, port, resource, path, params)
def test_parse_url01(self):
self.assertEqual(parseWsUrl("ws://localhost"), (False, 'localhost', 80, '/', '/', {}))
def test_parse_url02(self):
self.assertEqual(parseWsUrl("ws://localhost:80"), (False, 'localhost', 80, '/', '/', {}))
def test_parse_url03(self):
self.assertEqual(parseWsUrl("wss://localhost"), (True, 'localhost', 443, '/', '/', {}))
def test_parse_url04(self):
self.assertEqual(parseWsUrl("wss://localhost:443"), (True, 'localhost', 443, '/', '/', {}))
def test_parse_url05(self):
self.assertEqual(parseWsUrl("wss://localhost/ws"), (True, 'localhost', 443, '/ws', '/ws', {}))
def test_parse_url06(self):
self.assertEqual(parseWsUrl("wss://localhost/ws?foo=bar"), (True, 'localhost', 443, '/ws?foo=bar', '/ws', {'foo': ['bar']}))
def test_parse_url07(self):
self.assertEqual(parseWsUrl("wss://localhost/ws?foo=bar&moo=23"), (True, 'localhost', 443, '/ws?foo=bar&moo=23', '/ws', {'moo': ['23'], 'foo': ['bar']}))
def test_parse_url08(self):
self.assertEqual(parseWsUrl("wss://localhost/ws?foo=bar&moo=23&moo=44"), (True, 'localhost', 443, '/ws?foo=bar&moo=23&moo=44', '/ws', {'moo': ['23', '44'], 'foo': ['bar']}))
def test_parse_url09(self):
self.assertRaises(Exception, parseWsUrl, "http://localhost")
def test_parse_url10(self):
self.assertRaises(Exception, parseWsUrl, "https://localhost")
def test_parse_url11(self):
self.assertRaises(Exception, parseWsUrl, "http://localhost:80")
def test_parse_url12(self):
self.assertRaises(Exception, parseWsUrl, "http://localhost#frag1")
def test_parse_url13(self):
self.assertRaises(Exception, parseWsUrl, "wss://")
def test_parse_url14(self):
self.assertRaises(Exception, parseWsUrl, "ws://")
| mit | 4,034,182,700,942,526,000 | 42.204724 | 181 | 0.647348 | false |
Theer108/invenio | invenio/legacy/weblinkback/db_layer.py | 13 | 13392 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebLinkback - Database Layer"""
from invenio.legacy.dbquery import run_sql
from invenio.legacy.weblinkback.config import CFG_WEBLINKBACK_STATUS, \
CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME, \
CFG_WEBLINKBACK_DEFAULT_USER, \
CFG_WEBLINKBACK_PAGE_TITLE_STATUS
from invenio.utils.text import xml_entities_to_utf8
def get_all_linkbacks(recid=None, status=None, order=CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME["ASC"], linkback_type=None):
"""
Get all linkbacks
@param recid: of one record, of all if None
@param status: with a certain status, of all if None
@param order: order by insertion time either "ASC" or "DESC"
@param linkback_type: of a certain type, of all if None
@return [(linkback_id,
origin_url,
recid,
additional_properties,
linkback_type,
linkback_status,
insert_time)]
in order by id
"""
header_sql = """SELECT id,
origin_url,
id_bibrec,
additional_properties,
type,
status,
insert_time
FROM lnkENTRY"""
conditions = []
order_sql = "ORDER by id %s" % order
params = []
def add_condition(column, value):
if value:
if not conditions:
conditions.append('WHERE %s=%%s' % column)
else:
conditions.append('AND %s=%%s' % column)
params.append(value)
add_condition('id_bibrec', recid)
add_condition('status', status)
add_condition('type', linkback_type)
return run_sql(header_sql + ' ' + ' '.join(conditions) + ' ' + order_sql, tuple(params))
def approve_linkback(linkbackid, user_info):
"""
Approve linkback
@param linkbackid: linkback id
@param user_info: user info
"""
update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['APPROVED'], user_info)
def reject_linkback(linkbackid, user_info):
"""
Reject linkback
@param linkbackid: linkback id
@param user_info: user info
"""
update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['REJECTED'], user_info)
def update_linkback_status(linkbackid, new_status, user_info = None):
"""
Update status of a linkback
@param linkbackid: linkback id
@param new_status: new status
@param user_info: user info
"""
if user_info == None:
user_info = {}
user_info['uid'] = CFG_WEBLINKBACK_DEFAULT_USER
run_sql("""UPDATE lnkENTRY
SET status=%s
WHERE id=%s
""", (new_status, linkbackid))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], new_status))
run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY , id_lnkLOG)
VALUES
(%s, %s);
""", (linkbackid, logid))
def create_linkback(origin_url, recid, additional_properties, linkback_type, user_info):
"""
Create linkback
@param origin_url: origin URL,
@param recid: recid
@param additional_properties: additional properties
@param linkback_type: linkback type
@param user_info: user info
@return id of the created linkback
"""
linkbackid = run_sql("""INSERT INTO lnkENTRY (origin_url, id_bibrec, additional_properties, type, status, insert_time)
VALUES
(%s, %s, %s, %s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (origin_url, recid, str(additional_properties), linkback_type, CFG_WEBLINKBACK_STATUS['PENDING']))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED']))
run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY, id_lnkLOG)
VALUES
(%s, %s);
""", (linkbackid, logid))
# add url title entry if necessary
if len(run_sql("""SELECT url
FROM lnkENTRYURLTITLE
WHERE url=%s
""", (origin_url, ))) == 0:
manual_set_title = 0
title = ""
if additional_properties != "" and 'title' in additional_properties.keys():
manual_set_title = 1
title = additional_properties['title']
run_sql("""INSERT INTO lnkENTRYURLTITLE (url, title, manual_set)
VALUES
(%s, %s, %s)
""", (origin_url, title, manual_set_title))
return linkbackid
def get_approved_latest_added_linkbacks(count):
"""
Get approved latest added linkbacks
@param count: count of the linkbacks
@return [(linkback_id,
origin_url,
recid,
additional_properties,
type,
status,
insert_time)]
in descending order by insert_time
"""
return run_sql("""SELECT id,
origin_url,
id_bibrec,
additional_properties,
type,
status,
insert_time
FROM lnkENTRY
WHERE status=%s
ORDER BY insert_time DESC
LIMIT %s
""", (CFG_WEBLINKBACK_STATUS['APPROVED'], count))
def get_url_list(list_type):
"""
@param list_type: of CFG_WEBLINKBACK_LIST_TYPE
@return (url0, ..., urln) in ascending order by url
"""
result = run_sql("""SELECT url
FROM lnkADMINURL
WHERE list=%s
ORDER by url ASC
""", (list_type, ))
return tuple(url[0] for (url) in result)
def get_urls():
"""
Get all URLs and the corresponding listType
@return ((url, CFG_WEBLINKBACK_LIST_TYPE), ..., (url, CFG_WEBLINKBACK_LIST_TYPE)) in ascending order by url
"""
return run_sql("""SELECT url, list
FROM lnkADMINURL
ORDER by url ASC
""")
def url_exists(url, list_type=None):
"""
Check if url exists
@param url
@param list_type: specific list of CFG_WEBLINKBACK_LIST_TYPE, all if None
@return True or False
"""
header_sql = """SELECT url
FROM lnkADMINURL
WHERE url=%s
"""
optional_sql = " AND list=%s"
result = None
if list_type:
result = run_sql(header_sql + optional_sql, (url, list_type))
else:
result = run_sql(header_sql, (url, ))
if result != ():
return True
else:
return False
def add_url_to_list(url, list_type, user_info):
"""
Add a URL to a list
@param url: unique URL string for all lists
@param list_type: of CFG_WEBLINKBACK_LIST_TYPE
@param user_info: user info
@return id of the created url
"""
urlid = run_sql("""INSERT INTO lnkADMINURL (url, list)
VALUES
(%s, %s);
SELECT LAST_INSERT_ID();
""", (url, list_type))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED']))
run_sql("""INSERT INTO lnkADMINURLLOG (id_lnkADMINURL, id_lnkLOG)
VALUES
(%s, %s);
""", (urlid, logid))
return urlid
def remove_url(url):
"""
Remove a URL from list
@param url: unique URL string for all lists
"""
# get ids
urlid = run_sql("""SELECT id
FROM lnkADMINURL
WHERE url=%s
""", (url, ))[0][0]
logids = run_sql("""SELECT log.id
FROM lnkLOG log
JOIN lnkADMINURLLOG url_log
ON log.id=url_log.id_lnkLOG
WHERE url_log.id_lnkADMINURL=%s
""", (urlid, ))
# delete url and url log
run_sql("""DELETE FROM lnkADMINURL
WHERE id=%s;
DELETE FROM lnkADMINURLLOG
WHERE id_lnkADMINURL=%s
""", (urlid, urlid))
# delete log
for logid in logids:
run_sql("""DELETE FROM lnkLOG
WHERE id=%s
""", (logid[0], ))
def get_urls_and_titles(title_status=None):
"""
Get URLs and their corresponding title
@param old_new: of CFG_WEBLINKBACK_PAGE_TITLE_STATUS or None
@return ((url, title, manual_set),...), all rows of the table if None
"""
top_query = """SELECT url, title, manual_set, broken_count
FROM lnkENTRYURLTITLE
WHERE
"""
where_sql = ""
if title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['NEW']:
where_sql = " title='' AND manual_set=0 AND"
elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['OLD']:
where_sql = " title<>'' AND manual_set=0 AND"
elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['MANUALLY_SET']:
where_sql = " manual_set=1 AND"
where_sql += " broken=0"
return run_sql(top_query + where_sql)
def update_url_title(url, title):
"""
Update the corresponding title of a URL
@param url: URL
@param title: new title
"""
run_sql("""UPDATE lnkENTRYURLTITLE
SET title=%s,
manual_set=0,
broken_count=0,
broken=0
WHERE url=%s
""", (title, url))
def remove_url_title(url):
"""
Remove URL title
@param url: URL
"""
run_sql("""DELETE FROM lnkENTRYURLTITLE
WHERE url=%s
""", (url, ))
def set_url_broken(url):
"""
Set URL broken
@param url: URL
"""
linkbackids = run_sql("""SELECT id
FROM lnkENTRY
WHERE origin_url=%s
""", (url, ))
run_sql("""UPDATE lnkENTRYURLTITLE
SET title=%s,
broken=1
WHERE url=%s
""", (CFG_WEBLINKBACK_STATUS['BROKEN'], url))
# update all linkbacks
for linkbackid in linkbackids:
update_linkback_status(linkbackid[0], CFG_WEBLINKBACK_STATUS['BROKEN'])
def get_url_title(url):
"""
Get URL title or URL if title does not exist (empty string)
@param url: URL
@return title or URL if titles does not exist (empty string)
"""
title = run_sql("""SELECT title
FROM lnkENTRYURLTITLE
WHERE url=%s and title<>"" and broken=0
""", (url, ))
res = url
if len(title) != 0:
res = title[0][0]
return xml_entities_to_utf8(res)
def increment_broken_count(url):
"""
Increment broken count a URL
@param url: URL
"""
run_sql("""UPDATE lnkENTRYURLTITLE
SET broken_count=broken_count+1
WHERE url=%s
""", (url, ))
def remove_linkback(linkbackid):
"""
Remove a linkback database
@param linkbackid: unique URL string for all lists
"""
# get ids
logids = run_sql("""SELECT log.id
FROM lnkLOG log
JOIN lnkENTRYLOG entry_log
ON log.id=entry_log.id_lnkLOG
WHERE entry_log.id_lnkENTRY=%s
""", (linkbackid, ))
# delete linkback entry and entry log
run_sql("""DELETE FROM lnkENTRY
WHERE id=%s;
DELETE FROM lnkENTRYLOG
WHERE id_lnkENTRY=%s
""", (linkbackid, linkbackid))
# delete log
for logid in logids:
run_sql("""DELETE FROM lnkLOG
WHERE id=%s
""", (logid[0], ))
| gpl-2.0 | 6,760,731,266,523,626,000 | 30.961814 | 128 | 0.51822 | false |
petervanderdoes/wger | wger/nutrition/tests/test_ingredient_overview.py | 1 | 4670 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
# Third Party
from django.core.urlresolvers import reverse
# wger
from wger.core.tests.base_testcase import WorkoutManagerTestCase
from wger.utils.constants import PAGINATION_OBJECTS_PER_PAGE
class OverviewPlanTestCase(WorkoutManagerTestCase):
'''
Tests the ingredient overview
'''
def test_overview(self):
# Add more ingredients so we can test the pagination
self.user_login('admin')
data = {
"name": "Test ingredient",
"language": 2,
"sodium": 10.54,
"energy": 176,
"fat": 8.19,
"carbohydrates_sugar": 0.0,
"fat_saturated": 3.24,
"fibres": 0.0,
"protein": 25.63,
"carbohydrates": 0.0,
'license': 1,
'license_author': 'internet'
}
for i in range(0, 50):
self.client.post(reverse('nutrition:ingredient:add'), data)
# Page exists
self.user_logout()
response = self.client.get(reverse('nutrition:ingredient:list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['ingredients_list']), PAGINATION_OBJECTS_PER_PAGE)
response = self.client.get(reverse('nutrition:ingredient:list'), {'page': 2})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['ingredients_list']), PAGINATION_OBJECTS_PER_PAGE)
rest_ingredients = 13
response = self.client.get(reverse('nutrition:ingredient:list'), {'page': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['ingredients_list']), rest_ingredients)
# 'last' is a special case
response = self.client.get(reverse('nutrition:ingredient:list'), {'page': 'last'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['ingredients_list']), rest_ingredients)
# Page does not exist
response = self.client.get(reverse('nutrition:ingredient:list'), {'page': 100})
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('nutrition:ingredient:list'), {'page': 'foobar'})
self.assertEqual(response.status_code, 404)
def ingredient_overview(self, logged_in=True, demo=False, admin=False):
'''
Helper function to test the ingredient overview page
'''
# Page exists
response = self.client.get(reverse('nutrition:ingredient:list'))
self.assertEqual(response.status_code, 200)
# No ingredients pending review
if admin:
self.assertContains(response, 'Ingredients pending review')
else:
self.assertNotContains(response, 'Ingredients pending review')
# Only authorized users see the edit links
if logged_in and not demo:
self.assertNotContains(response, 'Only registered users can do this')
if logged_in and demo:
self.assertContains(response, 'Only registered users can do this')
def test_ingredient_index_editor(self):
'''
Tests the ingredient overview page as a logged in user with editor rights
'''
self.user_login('admin')
self.ingredient_overview(admin=True)
def test_ingredient_index_non_editor(self):
'''
Tests the overview overview page as a logged in user without editor rights
'''
self.user_login('test')
self.ingredient_overview()
def test_ingredient_index_demo_user(self):
'''
Tests the overview overview page as a logged in demo user
'''
self.user_login('demo')
self.ingredient_overview(demo=True)
def test_ingredient_index_logged_out(self):
'''
Tests the overview overview page as an anonymous (logged out) user
'''
self.ingredient_overview(logged_in=False)
| agpl-3.0 | -658,755,332,976,260,400 | 35.484375 | 96 | 0.648822 | false |
catapult-project/catapult | telemetry/telemetry/internal/backends/browser_backend.py | 3 | 16982 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
from __future__ import absolute_import
import logging
import os
import posixpath
import uuid
import sys
import tempfile
import threading
import time
from datetime import datetime
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry import decorators
from telemetry.core import debug_data
from telemetry.core import exceptions
from telemetry.internal.backends import app_backend
from telemetry.internal.browser import web_contents
from telemetry.internal.results import artifact_logger
from telemetry.util import screenshot
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(app_backend.AppBackend):
"""A base class for browser backends."""
def __init__(self, platform_backend, browser_options,
supports_extensions, tab_list_backend):
assert browser_options.browser_type
super(BrowserBackend, self).__init__(browser_options.browser_type,
platform_backend)
self.browser_options = browser_options
self._supports_extensions = supports_extensions
self._tab_list_backend_class = tab_list_backend
self._dump_finder = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._symbolized_minidump_paths = set([])
self._periodic_screenshot_timer = None
self._collect_periodic_screenshots = False
def SetBrowser(self, browser):
super(BrowserBackend, self).SetApp(app=browser)
@property
def log_file_path(self):
# Specific browser backend is responsible for overriding this properly.
raise NotImplementedError
def GetLogFileContents(self):
if not self.log_file_path:
return 'No log file'
with open(self.log_file_path) as f:
return f.read()
def UploadLogsToCloudStorage(self):
""" Uploading log files produce by this browser instance to cloud storage.
Check supports_uploading_logs before calling this method.
"""
assert self.supports_uploading_logs
remote_path = (self.browser_options.logs_cloud_remote_path or
'log_%s' % uuid.uuid4())
cloud_url = cloud_storage.Insert(
bucket=self.browser_options.logs_cloud_bucket,
remote_path=remote_path,
local_path=self.log_file_path)
sys.stderr.write('Uploading browser log to %s\n' % cloud_url)
@property
def browser(self):
return self.app
@property
def browser_type(self):
return self.app_type
@property
def screenshot_timeout(self):
return None
@property
def supports_uploading_logs(self):
# Specific browser backend is responsible for overriding this properly.
return False
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def supports_tab_control(self):
raise NotImplementedError()
@property
@decorators.Cache
def tab_list_backend(self):
return self._tab_list_backend_class(self)
@property
def supports_app_ui_interactions(self):
return False
def Start(self, startup_args):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def IsAppRunning(self):
return self.IsBrowserRunning()
def GetStandardOutput(self):
raise NotImplementedError()
def PullMinidumps(self):
"""Pulls any minidumps off a test device if necessary."""
pass
def CollectDebugData(self, log_level):
"""Collects various information that may be useful for debugging.
Specifically:
1. Captures a screenshot.
2. Collects stdout and system logs.
3. Attempts to symbolize all currently unsymbolized minidumps.
All collected information is stored as artifacts, and everything but the
screenshot is also included in the return value.
Platforms may override this to provide other debug information in addition
to the above set of information.
Args:
log_level: The logging level to use from the logging module, e.g.
logging.ERROR.
Returns:
A debug_data.DebugData object containing the collected data.
"""
suffix = artifact_logger.GetTimestampSuffix()
data = debug_data.DebugData()
self._CollectScreenshot(log_level, suffix + '.png')
self._CollectSystemLog(log_level, suffix + '.txt', data)
self._CollectStdout(log_level, suffix + '.txt', data)
self._SymbolizeAndLogMinidumps(log_level, data)
return data
def StartCollectingPeriodicScreenshots(self, frequency_ms):
self._collect_periodic_screenshots = True
self._CollectPeriodicScreenshots(datetime.now(), frequency_ms)
def StopCollectingPeriodicScreenshots(self):
self._collect_periodic_screenshots = False
self._periodic_screenshot_timer.cancel()
def _CollectPeriodicScreenshots(self, start_time, frequency_ms):
self._CollectScreenshot(logging.INFO, "periodic.png", start_time)
#2To3-division: this line is unchanged as result is expected floats.
self._periodic_screenshot_timer = threading.Timer(
frequency_ms / 1000.0,
self._CollectPeriodicScreenshots,
[start_time, frequency_ms])
if self._collect_periodic_screenshots:
self._periodic_screenshot_timer.start()
def _CollectScreenshot(self, log_level, suffix, start_time=None):
"""Helper function to handle the screenshot portion of CollectDebugData.
Attempts to take a screenshot at the OS level and save it as an artifact.
Args:
log_level: The logging level to use from the logging module, e.g.
logging.ERROR.
suffix: The suffix to prepend to the names of any created artifacts.
start_time: If set, prepend elaped time to screenshot path.
Should be time at which the test started, as a datetime.
This is done here because it may take a nonzero amount of time
to take a screenshot.
"""
screenshot_handle = screenshot.TryCaptureScreenShot(
self.browser.platform, timeout=self.screenshot_timeout)
if screenshot_handle:
with open(screenshot_handle.GetAbsPath(), 'rb') as infile:
if start_time:
# Prepend time since test started to path
test_time = datetime.now() - start_time
suffix = str(test_time.total_seconds()).replace(
'.', '_') + '-' + suffix
artifact_name = posixpath.join(
'debug_screenshots', 'screenshot-%s' % suffix)
logging.log(
log_level, 'Saving screenshot as artifact %s', artifact_name)
artifact_logger.CreateArtifact(artifact_name, infile.read())
else:
logging.log(log_level, 'Failed to capture screenshot')
def _CollectSystemLog(self, log_level, suffix, data):
"""Helper function to handle the system log part of CollectDebugData.
Attempts to retrieve the system log, save it as an artifact, and add it to
the given DebugData object.
Args:
log_level: The logging level to use from the logging module, e.g.
logging.ERROR.
suffix: The suffix to append to the names of any created artifacts.
data: The debug_data.DebugData object to add collected data to.
"""
system_log = self.browser.platform.GetSystemLog()
if system_log is None:
logging.log(log_level, 'Platform did not provide a system log')
return
artifact_name = posixpath.join('system_logs', 'system_log-%s' % suffix)
logging.log(log_level, 'Saving system log as artifact %s', artifact_name)
artifact_logger.CreateArtifact(artifact_name, system_log)
data.system_log = system_log
def _CollectStdout(self, log_level, suffix, data):
"""Helper function to handle the stdout part of CollectDebugData.
Attempts to retrieve stdout, save it as an artifact, and add it to the given
DebugData object.
Args:
log_level: The logging level to use from the logging module, e.g.
logging.ERROR.
suffix: The suffix to append to the names of any created artifacts.
data: The debug_data.DebugData object to add collected data to.
"""
stdout = self.browser.GetStandardOutput()
if stdout is None:
logging.log(log_level, 'Browser did not provide stdout')
return
artifact_name = posixpath.join('stdout', 'stdout-%s' % suffix)
logging.log(log_level, 'Saving stdout as artifact %s', artifact_name)
artifact_logger.CreateArtifact(artifact_name, stdout)
data.stdout = stdout
def _SymbolizeAndLogMinidumps(self, log_level, data):
"""Helper function to handle the minidump portion of CollectDebugData.
Attempts to find all unsymbolized minidumps, symbolize them, save the
results as artifacts, add them to the given DebugData object, and log the
results.
Args:
log_level: The logging level to use from the logging module, e.g.
logging.ERROR.
data: The debug_data.DebugData object to add collected data to.
"""
paths = self.GetAllUnsymbolizedMinidumpPaths()
# It's probable that CollectDebugData() is being called in response to a
# crash. Minidumps are usually written to disk in time, but there's no
# guarantee that is the case. So, if we don't find any minidumps, poll for
# a bit to ensure we don't miss them.
if not paths:
self.browser.GetRecentMinidumpPathWithTimeout(5)
paths = self.GetAllUnsymbolizedMinidumpPaths()
if not paths:
logging.log(log_level, 'No unsymbolized minidump paths')
return
logging.log(log_level, 'Unsymbolized minidump paths: ' + str(paths))
for unsymbolized_path in paths:
minidump_name = os.path.basename(unsymbolized_path)
artifact_name = posixpath.join('unsymbolized_minidumps', minidump_name)
logging.log(log_level, 'Saving minidump as artifact %s', artifact_name)
with open(unsymbolized_path, 'rb') as infile:
artifact_logger.CreateArtifact(artifact_name, infile.read())
valid, output = self.SymbolizeMinidump(unsymbolized_path)
# Store the symbolization attempt as an artifact.
artifact_name = posixpath.join('symbolize_attempts', minidump_name)
logging.log(log_level, 'Saving symbolization attempt as artifact %s',
artifact_name)
artifact_logger.CreateArtifact(artifact_name, output)
if valid:
logging.log(log_level, 'Symbolized minidump:\n%s', output)
data.symbolized_minidumps.append(output)
else:
logging.log(
log_level,
'Minidump symbolization failed, check artifact %s for output',
artifact_name)
def CleanupUnsymbolizedMinidumps(self, fatal=False):
"""Cleans up any unsymbolized minidumps so they aren't found later.
Args:
fatal: Whether the presence of unsymbolized minidumps should be considered
a fatal error or not. Typically, before a test should be non-fatal,
while after a test should be fatal.
"""
log_level = logging.ERROR if fatal else logging.WARNING
unsymbolized_paths = self.GetAllUnsymbolizedMinidumpPaths(log=False)
if not unsymbolized_paths:
return
culprit_test = 'current test' if fatal else 'a previous test'
logging.log(log_level,
'Found %d unsymbolized minidumps leftover from %s. Outputting '
'below: ', len(unsymbolized_paths), culprit_test)
self._SymbolizeAndLogMinidumps(log_level, debug_data.DebugData())
if fatal:
raise RuntimeError(
'Test left unsymbolized minidumps around after finishing.')
def IgnoreMinidump(self, path):
"""Ignores the given minidump, treating it as already symbolized.
Args:
path: The path to the minidump to ignore.
"""
self._symbolized_minidump_paths.add(path)
def GetMostRecentMinidumpPath(self):
"""Gets the most recent minidump that has been written to disk.
Returns:
The path to the most recent minidump on disk, or None if no minidumps are
found.
"""
self.PullMinidumps()
dump_path, explanation = self._dump_finder.GetMostRecentMinidump(
self._tmp_minidump_dir)
logging.info('\n'.join(explanation))
return dump_path
def GetRecentMinidumpPathWithTimeout(self, timeout_s, oldest_ts):
"""Get a path to a recent minidump, blocking until one is available.
Similar to GetMostRecentMinidumpPath, but does not assume that any pending
dumps have been written to disk yet. Instead, waits until a suitably fresh
minidump is found or the timeout is reached.
Args:
timeout_s: The timeout in seconds.
oldest_ts: The oldest allowable timestamp (in seconds since epoch) that a
minidump was created at for it to be considered fresh enough to
return. Defaults to a minute from the current time if not set.
Returns:
None if the timeout is hit or a str containing the path to the found
minidump if a suitable one is found.
"""
assert timeout_s > 0
assert oldest_ts >= 0
explanation = ['No explanation returned.']
start_time = time.time()
try:
while time.time() - start_time < timeout_s:
self.PullMinidumps()
dump_path, explanation = self._dump_finder.GetMostRecentMinidump(
self._tmp_minidump_dir)
if not dump_path or os.path.getmtime(dump_path) < oldest_ts:
continue
return dump_path
return None
finally:
logging.info('\n'.join(explanation))
def GetAllMinidumpPaths(self, log=True):
"""Get all paths to minidumps currently written to disk.
Args:
log: Whether to log the output from looking for minidumps or not.
Returns:
A list of paths to all found minidumps.
"""
self.PullMinidumps()
paths, explanation = self._dump_finder.GetAllMinidumpPaths(
self._tmp_minidump_dir)
if log:
logging.info('\n'.join(explanation))
return paths
def GetAllUnsymbolizedMinidumpPaths(self, log=True):
"""Get all paths to minidumps have have not yet been symbolized.
Args:
log: Whether to log the output from looking for minidumps or not.
Returns:
A list of paths to all found minidumps that have not been symbolized yet.
"""
minidump_paths = set(self.GetAllMinidumpPaths(log=log))
# If we have already symbolized paths remove them from the list
unsymbolized_paths = (
minidump_paths - self._symbolized_minidump_paths)
return list(unsymbolized_paths)
def SymbolizeMinidump(self, minidump_path):
"""Symbolizes the given minidump.
Args:
minidump_path: The path to the minidump to symbolize.
Returns:
A tuple (valid, output). |valid| is True if the minidump was symbolized,
otherwise False. |output| contains an error message if |valid| is False,
otherwise it contains the symbolized minidump.
"""
raise NotImplementedError()
def GetSystemInfo(self):
return None
@property
def supports_memory_dumping(self):
return False
def DumpMemory(self, timeout=None, detail_level=None):
raise NotImplementedError()
# pylint: disable=invalid-name
@property
def supports_overriding_memory_pressure_notifications(self):
return False
def SetMemoryPressureNotificationsSuppressed(
self, suppressed, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
raise NotImplementedError()
def SimulateMemoryPressureNotification(
self, pressure_level, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
raise NotImplementedError()
@property
def supports_cpu_metrics(self):
raise NotImplementedError()
@property
def supports_memory_metrics(self):
raise NotImplementedError()
@property
def supports_overview_mode(self): # pylint: disable=invalid-name
return False
def EnterOverviewMode(self, timeout): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Overview mode is not supported')
def ExitOverviewMode(self, timeout): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Overview mode is not supported')
def ExecuteBrowserCommand(
self, command_id, timeout): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Execute browser command not supported')
def SetDownloadBehavior(
self, behavior, downloadPath, timeout): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Set download behavior not supported')
def GetUIDevtoolsBackend(self):
raise exceptions.StoryActionError('UI Devtools not supported')
def GetWindowForTarget(self, target_id): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Get Window For Target not supported')
def SetWindowBounds(
self, window_id, bounds): # pylint: disable=unused-argument
raise exceptions.StoryActionError('Set Window Bounds not supported')
| bsd-3-clause | 9,061,864,184,293,802,000 | 35.055202 | 80 | 0.704216 | false |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/pylint/test/functional/redefine_in_handler.py | 6 | 1585 | """Test for W0623, overwriting names in exception handlers."""
# pylint: disable=broad-except,bare-except,print-statement,no-absolute-import,duplicate-except
# pylint: disable=invalid-name, unused-variable
import exceptions
__revision__ = ''
class MyError(Exception):
"""Special exception class."""
pass
def some_function():
"""A function."""
exc = None
try:
{}["a"]
except KeyError, exceptions.RuntimeError: # [redefine-in-handler]
pass
except KeyError, OSError: # [redefine-in-handler]
pass
except KeyError, MyError: # [redefine-in-handler]
pass
except KeyError, exc: # this is fine
print exc
except KeyError, exc1: # this is fine
print exc1
except KeyError, FOO: # C0103
print FOO
try:
pass
except KeyError, exc1: # this is fine
print exc1
class MyOtherError(Exception):
"""Special exception class."""
pass
exc3 = None
try:
pass
except KeyError, exceptions.RuntimeError: # [redefine-in-handler]
pass
except KeyError, exceptions.RuntimeError.args: # [redefine-in-handler]
pass
except KeyError, OSError: # [redefine-in-handler]
pass
except KeyError, MyOtherError: # [redefine-in-handler]
pass
except KeyError, exc3: # this is fine
print exc3
except KeyError, exc4: # this is fine
print exc4
except KeyError, OOPS: # C0103
print OOPS
try:
pass
except KeyError, exc4: # this is fine
print exc4
except IOError, exc5: # this is fine
print exc5
except MyOtherError, exc5: # this is fine
print exc5
| mit | -2,941,373,177,925,864,400 | 22.308824 | 94 | 0.666246 | false |
nwjs/chromium.src | chrome/test/chromedriver/server/server.py | 2 | 3626 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import os
import socket
import subprocess
import threading
import time
import urllib2
def terminate_process(proc):
"""Terminates the process.
If an error occurs ignore it, just print out a message.
Args:
proc: A subprocess.
"""
try:
proc.terminate()
except OSError as ex:
print 'Error while killing a process: %s' % ex
class Server(object):
"""A running ChromeDriver server."""
def __init__(self, exe_path, log_path=None, verbose=True,
replayable=False, devtools_replay_path=None):
"""Starts the ChromeDriver server and waits for it to be ready.
Args:
exe_path: path to the ChromeDriver executable
log_path: path to the log file
verbose: make the logged data verbose
replayable: don't truncate strings in log to make the session replayable
devtools_replay_path: replay devtools events from the log at this path
Raises:
RuntimeError: if ChromeDriver fails to start
"""
if not os.path.exists(exe_path):
raise RuntimeError('ChromeDriver exe not found at: ' + exe_path)
port = self._FindOpenPort()
chromedriver_args = [exe_path, '--port=%d' % port]
if log_path:
chromedriver_args.extend(['--log-path=%s' % log_path])
chromedriver_args.extend(['--append-log'])
chromedriver_args.extend(['--readable-timestamp'])
if verbose:
chromedriver_args.extend(['--verbose',
'--vmodule=*/chrome/test/chromedriver/*=3'])
if replayable:
chromedriver_args.extend(['--replayable'])
if devtools_replay_path:
chromedriver_args.extend(['--devtools-replay=%s' % devtools_replay_path])
self._process = subprocess.Popen(chromedriver_args)
self._host = '127.0.0.1'
self._port = port
self._url = 'http://%s:%d' % (self._host, port)
if self._process is None:
raise RuntimeError('ChromeDriver server cannot be started')
max_time = time.time() + 20
while not self.IsRunning():
if time.time() > max_time:
self._process.poll()
if self._process.returncode is None:
print 'ChromeDriver process still running, but not responding'
else:
print ('ChromeDriver process exited with return code %d'
% self._process.returncode)
self._process.terminate()
raise RuntimeError('ChromeDriver server did not start')
time.sleep(0.1)
atexit.register(self.Kill)
def _FindOpenPort(self):
for port in range(9500, 10000):
try:
socket.create_connection(('127.0.0.1', port), 0.2).close()
except socket.error:
return port
raise RuntimeError('Cannot find open port to launch ChromeDriver')
def GetUrl(self):
return self._url
def GetHost(self):
return self._host
def GetPort(self):
return self._port
def IsRunning(self):
"""Returns whether the server is up and running."""
try:
urllib2.urlopen(self.GetUrl() + '/status')
return True
except urllib2.URLError:
return False
def Kill(self):
"""Kills the ChromeDriver server, if it is running."""
if self._process is None:
return
try:
urllib2.urlopen(self.GetUrl() + '/shutdown', timeout=10).close()
except:
self._process.terminate()
timer = threading.Timer(5, terminate_process, [self._process])
timer.start()
self._process.wait()
timer.cancel()
self._process = None
| bsd-3-clause | -6,857,719,779,066,672,000 | 28.966942 | 79 | 0.646167 | false |
cherokee/webserver | qa/112-NiceError-CGI.py | 8 | 1832 | from base import *
ERROR = 403
ERROR_MSG = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<title>Permission Denied - Cherokee Web Server</title>
</head>
<body>
<!-- Poem by Thomas Thurman <[email protected]> -->
<h1>403 Access Denied</h1>
<p>So many years have passed since first you sought
the lands beyond the edges of the sky,
so many moons reflected in your eye,
(familiar newness, fear of leaving port),
since first you sought, and failed, and learned to fall,
(first hope, then cynicism, silent dread,
the countless stars, still counting overhead
the seconds to your final voyage of all...)
and last, in glory gold and red around
your greatest search, your final quest to know!
yet... ashes drift, the embers cease to glow,
and darkened life in frozen death is drowned;
and ashes on the swell are seen no more.
The silence surges.
<p><b>Error 403</b>.
</body>
</html>"""
CONF = """
vserver!1!rule!1120!match = directory
vserver!1!rule!1120!match!directory = /cgi_error_403_1
vserver!1!rule!1120!handler = cgi
vserver!1!rule!1120!handler!error_handler = 1
"""
CGI_BASE = """#!/bin/sh
echo "Content-type: text/html"
echo "Status: %s"
echo ""
cat << EOF
%s
EOF
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "CGI error message"
self.request = "GET /cgi_error_403_1/exec.cgi HTTP/1.0\r\n"
self.expected_error = ERROR
self.expected_content = ERROR_MSG
self.conf = CONF
def Prepare (self, www):
d = self.Mkdir (www, "cgi_error_403_1")
f = self.WriteFile (d, "exec.cgi", 0555, CGI_BASE % (ERROR, ERROR_MSG))
| gpl-2.0 | -2,774,066,265,116,544,000 | 27.625 | 124 | 0.656659 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/webdriverwait_tests.py | 17 | 15629 | #!/usr/bin/python
# Copyright 2011 WebDriver committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def not_available_on_remote(func):
def testMethod(self):
print self.driver
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
def throwSERE(driver):
raise StaleElementReferenceException("test")
class WebDriverWaitTest(unittest.TestCase):
def testShouldExplicitlyWaitForASingleElement(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.ID, "box0"))) # All is well if this doesn't throw.
def testShouldStillFailToFindAnElementWithExplicitWait(self):
self._loadPage("dynamic")
try:
WebDriverWait(self.driver, 0.7).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
except Exception, e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldExplicitlyWaituntilAtLeastOneElementIsFoundWhenSearchingForMany(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
add.click();
elements = WebDriverWait(self.driver, 2).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) >= 1)
def testShouldFailToFindElementsWhenExplicitWaiting(self):
self._loadPage("dynamic")
try:
elements = WebDriverWait(self.driver, 0.7).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
except TimeoutException, e:
pass # we should get a timeout
except Exception, e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldWaitOnlyAsLongAsTimeoutSpecifiedWhenImplicitWaitsAreSet(self):
self._loadPage("dynamic")
self.driver.implicitly_wait(0.5)
try:
start = time.time()
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.assertTrue(time.time() - start < 1.5,
"Expected to take just over 1 second to execute, but took %f" %
(time.time() - start))
finally:
self.driver.implicitly_wait(0)
def testShouldWaitAtLeastOnce(self):
self._loadPage("simpleTest")
elements_exists = lambda driver: driver.find_elements_by_tag_name('h1')
elements = WebDriverWait(self.driver, 0).until(elements_exists)
self.assertTrue(len(elements) >= 1)
def testWaitUntilNotReturnsIfEvaluatesToFalse(self):
falsum = lambda driver: False
self.assertFalse(WebDriverWait(self.driver, 1).until_not(falsum))
def testWaitShouldStillFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
def testWaitShouldStillFailIfProduceChildOfIgnoredException(self):
ignored = (WebDriverException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
def testWaitUntilNotShouldNotFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
self.assertTrue(WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until_not(throwSERE))
def testExpectedConditionTitleIs(self):
self._loadPage("blank")
WebDriverWait(self.driver, 1).until(EC.title_is("blank"))
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_is("not blank"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_is("blank"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
def testExpectedConditionTitleContains(self):
self._loadPage("blank")
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_contains("not"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_contains("blanket"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
def testExpectedConditionVisibilityOfElementLocated(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.assertTrue(element.is_displayed())
def testExpectedConditionVisibilityOf(self):
self._loadPage("javascriptPage")
hidden = self.driver.find_element_by_id('clickToHide')
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of(hidden))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of(hidden))
self.assertTrue(element.is_displayed())
def testExpectedConditionTextToBePresentInElement(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 0.7).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){var el = document.getElementById('unwrappable'); el.textContent = el.innerText = 'Unwrappable Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.assertEqual('Unwrappable Expected text', self.driver.find_element_by_id('unwrappable').text)
def testExpectedConditionTextToBePresentInElementValue(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('inputRequired').value = 'Example Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.assertEqual('Example Expected text', self.driver.find_element_by_id('inputRequired').get_attribute('value'))
def testExpectedConditionFrameToBeAvailableAndSwitchTo(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '"+self._pageURL('iframeWithAlert')+"'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionInvisiblityOfElementLocated(self):
self._loadPage("javascriptPage")
self.driver.execute_script("delayedShowHide(0, true)")
try:
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("delayedShowHide(200, false)")
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(self.driver.find_element_by_id('clickToHide').is_displayed())
def testExpectedConditionElementToBeClickable(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("delayedShowHide(200, true)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
element = self.driver.find_element_by_id('clickToHide')
element.click()
WebDriverWait(self.driver, 3.5).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionStalenessOf(self):
self._loadPage('dynamicallyModifiedPage')
element = self.driver.find_element_by_id('element-to-remove')
try:
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.find_element_by_id('buttonDelete').click()
self.assertEqual('element', element.text)
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
try:
element.text
self.fail("Expected StaleReferenceException to have been thrown")
except StaleElementReferenceException, e:
pass
def testExpectedConditionElementToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.assertTrue(element.is_selected())
def testExpectedConditionElementSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.assertTrue(element.is_selected())
def testExpectedConditionAlertIsPresent(self):
self._loadPage('blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException, e:
pass
self.driver.execute_script("setTimeout(function(){alert('alerty')}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
alert = self.driver.switch_to_alert()
self.assertEqual('alerty', alert.text)
alert.dismiss()
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| mit | -8,254,959,777,622,366,000 | 47.237654 | 201 | 0.673428 | false |
EthanBlackburn/sync-engine | migrations/versions/115_eas_twodevices_turn.py | 1 | 3173 | """EAS two-devices turn
Revision ID: 17dc9c049f8b
Revises: ad7b856bcc0
Create Date: 2014-10-21 20:38:14.311747
"""
# revision identifiers, used by Alembic.
revision = '17dc9c049f8b'
down_revision = 'ad7b856bcc0'
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
from inbox.ignition import main_engine
engine = main_engine()
if not engine.has_table('easaccount'):
return
from inbox.models.session import session_scope
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
primary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.primary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
secondary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.secondary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
class EASDevice(Base):
__table__ = Base.metadata.tables['easdevice']
with session_scope(ignore_soft_deletes=False, versioned=False) as \
db_session:
accts = db_session.query(EASAccount).all()
for a in accts:
# Set both to filtered=False, //needed// for correct deploy.
primary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
secondary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
a.primary_device = primary
a.secondary_device = secondary
db_session.add(a)
db_session.commit()
conn = op.get_bind()
acct_device_map = dict(
(id_, device_id) for id_, device_id in conn.execute(text(
"""SELECT id, secondary_device_id from easaccount""")))
print 'acct_device_map: ', acct_device_map
for acct_id, device_id in acct_device_map.iteritems():
conn.execute(text("""
UPDATE easfoldersyncstatus
SET device_id=:device_id
WHERE account_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
conn.execute(text("""
UPDATE easuid
SET device_id=:device_id
WHERE easaccount_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
def downgrade():
raise Exception('!')
| agpl-3.0 | -1,912,877,296,050,088,000 | 31.71134 | 88 | 0.566341 | false |
danieljaouen/ansible | lib/ansible/modules/cloud/amazon/aws_waf_condition.py | 31 | 27277 | #!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_condition
short_description: create and delete WAF Conditions
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/)
version_added: "2.5"
author:
- Will Thames (@willthames)
- Mike Mochan (@mmochan)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall condition to manage
required: yes
type:
description: the type of matching to perform
choices:
- byte
- geo
- ip
- regex
- size
- sql
- xss
filters:
description:
- A list of the filters against which to match
- For I(type)=C(byte), valid keys are C(field_to_match), C(position), C(header), C(transformation)
- For I(type)=C(geo), the only valid key is C(country)
- For I(type)=C(ip), the only valid key is C(ip_address)
- For I(type)=C(regex), valid keys are C(field_to_match), C(transformation) and C(regex_pattern)
- For I(type)=C(size), valid keys are C(field_to_match), C(transformation), C(comparison) and C(size)
- For I(type)=C(sql), valid keys are C(field_to_match) and C(transformation)
- For I(type)=C(xss), valid keys are C(field_to_match) and C(transformation)
- I(field_to_match) can be one of C(uri), C(query_string), C(header) C(method) and C(body)
- If I(field_to_match) is C(header), then C(header) must also be specified
- I(transformation) can be one of C(none), C(compress_white_space), C(html_entity_decode), C(lowercase), C(cmd_line), C(url_decode)
- I(position), can be one of C(exactly), C(starts_with), C(ends_with), C(contains), C(contains_word),
- I(comparison) can be one of C(EQ), C(NE), C(LE), C(LT), C(GE), C(GT),
- I(target_string) is a maximum of 50 bytes
- I(regex_pattern) is a dict with a C(name) key and C(regex_strings) list of strings to match
purge_filters:
description: Whether to remove existing filters from a condition if not passed in I(filters). Defaults to false
state:
description: Whether the condition should be C(present) or C(absent)
choices:
- present
- absent
default: present
'''
EXAMPLES = '''
- name: create WAF byte condition
aws_waf_condition:
name: my_byte_condition
filters:
- field_to_match: header
position: STARTS_WITH
target_string: Hello
header: Content-type
type: byte
- name: create WAF geo condition
aws_waf_condition:
name: my_geo_condition
filters:
- country: US
- country: AU
- country: AT
type: geo
- name: create IP address condition
aws_waf_condition:
name: "{{ resource_prefix }}_ip_condition"
filters:
- ip_address: "10.0.0.0/8"
- ip_address: "192.168.0.0/24"
type: ip
- name: create WAF regex condition
aws_waf_condition:
name: my_regex_condition
filters:
- field_to_match: query_string
regex_pattern:
name: greetings
regex_strings:
- '[hH]ello'
- '^Hi there'
- '.*Good Day to You'
type: regex
- name: create WAF size condition
aws_waf_condition:
name: my_size_condition
filters:
- field_to_match: query_string
size: 300
comparison: GT
type: size
- name: create WAF sql injection condition
aws_waf_condition:
name: my_sql_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: sql
- name: create WAF xss condition
aws_waf_condition:
name: my_xss_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: xss
'''
RETURN = '''
condition:
description: condition returned by operation
returned: always
type: complex
contains:
condition_id:
description: type-agnostic ID for the condition
returned: when state is present
type: string
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
byte_match_set_id:
description: ID for byte match set
returned: always
type: string
sample: c4882c96-837b-44a2-a762-4ea87dbf812b
byte_match_tuples:
description: list of byte match tuples
returned: always
type: complex
contains:
field_to_match:
description: Field to match
returned: always
type: complex
contains:
data:
description: Which specific header (if type is header)
type: string
sample: content-type
type:
description: Type of field
type: string
sample: HEADER
positional_constraint:
description: Position in the field to match
type: string
sample: STARTS_WITH
target_string:
description: String to look for
type: string
sample: Hello
text_transformation:
description: Transformation to apply to the field before matching
type: string
sample: NONE
geo_match_constraints:
description: List of geographical constraints
returned: when type is geo and state is present
type: complex
contains:
type:
description: Type of geo constraint
type: string
sample: Country
value:
description: Value of geo constraint (typically a country code)
type: string
sample: AT
geo_match_set_id:
description: ID of the geo match set
returned: when type is geo and state is present
type: string
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
ip_set_descriptors:
description: list of IP address filters
returned: when type is ip and state is present
type: complex
contains:
type:
description: Type of IP address (IPV4 or IPV6)
returned: always
type: string
sample: IPV4
value:
description: IP address
returned: always
type: string
sample: 10.0.0.0/8
ip_set_id:
description: ID of condition
returned: when type is ip and state is present
type: string
sample: 78ad334a-3535-4036-85e6-8e11e745217b
name:
description: Name of condition
returned: when state is present
type: string
sample: my_waf_condition
regex_match_set_id:
description: ID of the regex match set
returned: when type is regex and state is present
type: string
sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
regex_match_tuples:
description: List of regex matches
returned: when type is regex and state is present
type: complex
contains:
field_to_match:
description: Field on which the regex match is applied
type: complex
contains:
type:
description: The field name
returned: when type is regex and state is present
type: string
sample: QUERY_STRING
regex_pattern_set_id:
description: ID of the regex pattern
type: string
sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
text_transformation:
description: transformation applied to the text before matching
type: string
sample: NONE
size_constraint_set_id:
description: ID of the size constraint set
returned: when type is size and state is present
type: string
sample: de84b4b3-578b-447e-a9a0-0db35c995656
size_constraints:
description: List of size constraints to apply
returned: when type is size and state is present
type: complex
contains:
comparison_operator:
description: Comparison operator to apply
type: string
sample: GT
field_to_match:
description: Field on which the size constraint is applied
type: complex
contains:
type:
description: Field name
type: string
sample: QUERY_STRING
size:
description: size to compare against the field
type: int
sample: 300
text_transformation:
description: transformation applied to the text before matching
type: string
sample: NONE
sql_injection_match_set_id:
description: ID of the SQL injection match set
returned: when type is sql and state is present
type: string
sample: de84b4b3-578b-447e-a9a0-0db35c995656
sql_injection_match_tuples:
description: List of SQL injection match sets
returned: when type is sql and state is present
type: complex
contains:
field_to_match:
description: Field on which the SQL injection match is applied
type: complex
contains:
type:
description: Field name
type: string
sample: QUERY_STRING
text_transformation:
description: transformation applied to the text before matching
type: string
sample: URL_DECODE
xss_match_set_id:
description: ID of the XSS match set
returned: when type is xss and state is present
type: string
sample: de84b4b3-578b-447e-a9a0-0db35c995656
xss_match_tuples:
description: List of XSS match sets
returned: when type is xss and state is present
type: complex
contains:
field_to_match:
description: Field on which the XSS match is applied
type: complex
contains:
type:
description: Field name
type: string
sample: QUERY_STRING
text_transformation:
description: transformation applied to the text before matching
type: string
sample: URL_DECODE
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff
class Condition(object):
def __init__(self, client, module):
self.client = client
self.module = module
self.type = module.params['type']
self.method_suffix = MATCH_LOOKUP[self.type]['method']
self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
self.conditiontype = MATCH_LOOKUP[self.type]['type']
def format_for_update(self, condition_set_id):
# Prep kwargs
kwargs = dict()
kwargs['Updates'] = list()
for filtr in self.module.params.get('filters'):
# Only for ip_set
if self.type == 'ip':
# there might be a better way of detecting an IPv6 address
if ':' in filtr.get('ip_address'):
ip_type = 'IPV6'
else:
ip_type = 'IPV4'
condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
# Specific for geo_match_set
if self.type == 'geo':
condition_insert = dict(Type='Country', Value=filtr.get('country'))
# Common For everything but ip_set and geo_match_set
if self.type not in ('ip', 'geo'):
condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
TextTransformation=filtr.get('transformation', 'none').upper())
if filtr.get('field_to_match').upper() == "HEADER":
if filtr.get('header'):
condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
else:
self.module.fail_json(msg=str("DATA required when HEADER requested"))
# Specific for byte_match_set
if self.type == 'byte':
condition_insert['TargetString'] = filtr.get('target_string')
condition_insert['PositionalConstraint'] = filtr.get('position')
# Specific for size_constraint_set
if self.type == 'size':
condition_insert['ComparisonOperator'] = filtr.get('comparison')
condition_insert['Size'] = filtr.get('size')
# Specific for regex_match_set
if self.type == 'regex':
condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
kwargs[self.conditionsetid] = condition_set_id
return kwargs
def format_for_deletion(self, condition):
return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
for current_condition_tuple in condition[self.conditiontuples]],
self.conditionsetid: condition[self.conditionsetid]}
@AWSRetry.exponential_backoff()
def list_regex_patterns_with_backoff(self, **params):
return self.client.list_regex_pattern_sets(**params)
@AWSRetry.exponential_backoff()
def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
def list_regex_patterns(self):
# at time of writing(2017-11-20) no regex pattern paginator exists
regex_patterns = []
params = {}
while True:
try:
response = self.list_regex_patterns_with_backoff(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list regex patterns')
regex_patterns.extend(response['RegexPatternSets'])
if 'NextMarker' in response:
params['NextMarker'] = response['NextMarker']
else:
break
return regex_patterns
def get_regex_pattern_by_name(self, name):
existing_regex_patterns = self.list_regex_patterns()
regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
if name in regex_lookup:
return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
else:
return None
def ensure_regex_pattern_present(self, regex_pattern):
name = regex_pattern['name']
pattern_set = self.get_regex_pattern_by_name(name)
if not pattern_set:
pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
self.client.create_regex_pattern_set)['RegexPatternSet']
missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
if not missing and not extra:
return pattern_set
updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
self.client.update_regex_pattern_set, wait=True)
return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
def delete_unused_regex_pattern(self, regex_pattern_set_id):
try:
regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
updates = list()
for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
self.client.update_regex_pattern_set)
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id},
self.client.delete_regex_pattern_set, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if e.response['Error']['Code'] == 'WAFNonexistentItemException':
return
self.module.fail_json_aws(e, msg='Could not delete regex pattern')
def get_condition_by_name(self, name):
all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
if all_conditions:
return all_conditions[0][self.conditionsetid]
@AWSRetry.exponential_backoff()
def get_condition_by_id_with_backoff(self, condition_set_id):
params = dict()
params[self.conditionsetid] = condition_set_id
func = getattr(self.client, 'get_' + self.method_suffix)
return func(**params)[self.conditionset]
def get_condition_by_id(self, condition_set_id):
try:
return self.get_condition_by_id_with_backoff(condition_set_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get condition')
def list_conditions(self):
method = 'list_' + self.method_suffix + 's'
try:
paginator = self.client.get_paginator(method)
func = paginator.paginate().build_full_result
except botocore.exceptions.OperationNotPageableError:
# list_geo_match_sets and list_regex_match_sets do not have a paginator
func = getattr(self.client, method)
try:
return func()[self.conditionsets]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
def tidy_up_regex_patterns(self, regex_match_set):
all_regex_match_sets = self.list_conditions()
all_match_set_patterns = list()
for rms in all_regex_match_sets:
all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
for filtr in regex_match_set[self.conditiontuples]:
if filtr['RegexPatternSetId'] not in all_match_set_patterns:
self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
def find_condition_in_rules(self, condition_set_id):
rules_in_use = []
try:
all_rules = list_rules_with_backoff(self.client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list rules')
for rule in all_rules:
try:
rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get rule details')
if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
rules_in_use.append(rule_details['Name'])
return rules_in_use
def find_and_delete_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
in_use_rules = self.find_condition_in_rules(condition_set_id)
if in_use_rules:
rulenames = ', '.join(in_use_rules)
self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
if current_condition[self.conditiontuples]:
# Filters are deleted using update with the DELETE action
func = getattr(self.client, 'update_' + self.method_suffix)
params = self.format_for_deletion(current_condition)
try:
# We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete filters from condition')
func = getattr(self.client, 'delete_' + self.method_suffix)
params = dict()
params[self.conditionsetid] = condition_set_id
try:
run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete condition')
# tidy up regex patterns
if self.type == 'regex':
self.tidy_up_regex_patterns(current_condition)
return True, {}
def find_missing(self, update, current_condition):
missing = []
for desired in update['Updates']:
found = False
desired_condition = desired[self.conditiontuple]
current_conditions = current_condition[self.conditiontuples]
for condition in current_conditions:
if not compare_policies(condition, desired_condition):
found = True
if not found:
missing.append(desired)
return missing
def find_and_update_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
update = self.format_for_update(condition_set_id)
missing = self.find_missing(update, current_condition)
if self.module.params.get('purge_filters'):
extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
for current_tuple in current_condition[self.conditiontuples]
if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
else:
extra = []
changed = bool(missing or extra)
if changed:
update['Updates'] = missing + extra
func = getattr(self.client, 'update_' + self.method_suffix)
try:
result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not update condition')
return changed, self.get_condition_by_id(condition_set_id)
def ensure_condition_present(self):
name = self.module.params['name']
condition_set_id = self.get_condition_by_name(name)
if condition_set_id:
return self.find_and_update_condition(condition_set_id)
else:
params = dict()
params['Name'] = name
func = getattr(self.client, 'create_' + self.method_suffix)
try:
condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not create condition')
return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
def ensure_condition_absent(self):
condition_set_id = self.get_condition_by_name(self.module.params['name'])
if condition_set_id:
return self.find_and_delete_condition(condition_set_id)
return False, {}
def main():
filters_subspec = dict(
country=dict(),
field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
header=dict(),
transformation=dict(choices=['none', 'compress_white_space',
'html_entity_decode', 'lowercase',
'cmd_line', 'url_decode']),
position=dict(choices=['exactly', 'starts_with', 'ends_with',
'contains', 'contains_word']),
comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
target_string=dict(), # Bytes
size=dict(type='int'),
ip_address=dict(),
regex_pattern=dict(),
)
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
filters=dict(type='list'),
purge_filters=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['filters']]])
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs)
condition = Condition(client, module)
if state == 'present':
(changed, results) = condition.ensure_condition_present()
# return a condition agnostic ID for use by aws_waf_rule
results['ConditionId'] = results[condition.conditionsetid]
else:
(changed, results) = condition.ensure_condition_absent()
module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| gpl-3.0 | -4,156,373,584,535,031,000 | 40.391502 | 139 | 0.609268 | false |
Azure/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/aio/operations/_export_pipelines_operations.py | 1 | 22808 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExportPipelinesOperations:
"""ExportPipelinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
registry_name: str,
export_pipeline_name: str,
**kwargs: Any
) -> "_models.ExportPipeline":
"""Gets the properties of the export pipeline.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param export_pipeline_name: The name of the export pipeline.
:type export_pipeline_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExportPipeline, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.ExportPipeline
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportPipeline"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'exportPipelineName': self._serialize.url("export_pipeline_name", export_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines/{exportPipelineName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
export_pipeline_name: str,
export_pipeline_create_parameters: "_models.ExportPipeline",
**kwargs: Any
) -> "_models.ExportPipeline":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportPipeline"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'exportPipelineName': self._serialize.url("export_pipeline_name", export_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(export_pipeline_create_parameters, 'ExportPipeline')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExportPipeline', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines/{exportPipelineName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
export_pipeline_name: str,
export_pipeline_create_parameters: "_models.ExportPipeline",
**kwargs: Any
) -> AsyncLROPoller["_models.ExportPipeline"]:
"""Creates an export pipeline for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param export_pipeline_name: The name of the export pipeline.
:type export_pipeline_name: str
:param export_pipeline_create_parameters: The parameters for creating an export pipeline.
:type export_pipeline_create_parameters: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.ExportPipeline
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExportPipeline or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.ExportPipeline]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportPipeline"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
export_pipeline_name=export_pipeline_name,
export_pipeline_create_parameters=export_pipeline_create_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'exportPipelineName': self._serialize.url("export_pipeline_name", export_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines/{exportPipelineName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
export_pipeline_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'exportPipelineName': self._serialize.url("export_pipeline_name", export_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines/{exportPipelineName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
export_pipeline_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an export pipeline from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param export_pipeline_name: The name of the export pipeline.
:type export_pipeline_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
export_pipeline_name=export_pipeline_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'exportPipelineName': self._serialize.url("export_pipeline_name", export_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines/{exportPipelineName}'} # type: ignore
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExportPipelineListResult"]:
"""Lists all export pipelines for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExportPipelineListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.ExportPipelineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportPipelineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExportPipelineListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/exportPipelines'} # type: ignore
| mit | 4,316,208,419,973,742,000 | 51.796296 | 225 | 0.652973 | false |
fatzebra/fatzebra-python | fatzebra/data.py | 1 | 1346 | class Purchase(object):
def __init__(self, data):
self.id = data["id"]
self.authorization = data["authorization"]
self.successful = data["successful"]
self.card_holder = data["card_holder"]
self.card_number = data["card_number"]
self.card_expiry = data["card_expiry"]
self.card_token = data["card_token"]
self.message = data["message"]
self.amount = data["amount"]
self.reference = data["reference"]
self.settlement_date = data["settlement_date"]
self.transaction_date = data["transaction_date"]
self.response_code = data["response_code"]
self.captured = data["captured"]
self.currency = data["currency"]
class CreditCard(object):
def __init__(self, data):
self.token = data["token"]
self.card_number = data["card_number"]
self.card_holder = data["card_holder"]
self.expiry = data["card_expiry"]
class Refund(object):
def __init__(self, data):
self.id = data["id"]
self.authorization = data["authorization"]
self.amount = data["amount"]
self.successful = data["successful"]
self.message = data["message"]
| mit | -7,850,893,359,303,738,000 | 38.588235 | 56 | 0.536404 | false |
Rastii/pydev_docker | pydev_docker/cli/parser.py | 1 | 12905 | from typing import Optional, Sequence, Mapping, List, NamedTuple
import enum
import argparse
import yaml
from pydev_docker import utils
from pydev_docker import options
from pydev_docker import models
from pydev_docker import container
class ParseError(Exception):
"""
Generic exception related to parsing options
"""
class InvalidOption(ParseError):
"""
Specified option was invalid
"""
class InvalidVolume(InvalidOption):
"""
Specified volume was invalid
"""
class InvalidPort(InvalidOption):
"""
Specified port(s) was invalid
"""
class Command(enum.IntEnum):
RUN = 1
RUN_PTY = 2
def __str__(self):
return self.name.lower()
class Verbosity(enum.IntEnum):
DEBUG = 1
INFO = 2
WARN = 3
Arguments = NamedTuple(
"Arguments",
[
("command", Command),
("verbosity", Verbosity),
("container_options", options.ContainerOptions),
]
)
class YamlParserAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
with open(values) as fp:
config = yaml.load(fp)
except (IOError, yaml.YAMLError) as e:
raise argparse.ArgumentError(self, "Unable to parse YML config: {}".format(e))
setattr(namespace, self.dest, config)
class DirectoryAction(argparse.Action):
"""
Action will expand a specified path and ensure that it is a valid directory
"""
def __call__(self, parser, namespace, values, option_string=None):
if not values:
values = self.default
try:
full_path = utils.get_full_directory_path(values)
except utils.InvalidDirectory:
raise argparse.ArgumentError(self, "Path '{}' is not a valid directory".format(values))
setattr(namespace, self.dest, full_path)
def volume_from_str(volume_str: str) -> models.Volume:
split = volume_str.split(":", 2)
if len(split) < 2 or len(split) > 3:
raise InvalidVolume(
"Specified volume: {} was invalid, must be in the "
"form of HOST:CONTAINER[:MODE]".format(volume_str)
)
if len(split) == 2:
# Default volume mode is RW
# Note: to keep mypy happy, I had to do this instead of re-assigning mode
split.append("RW")
try:
mode = models.VolumeMode[split[2].upper()] # type: models.VolumeMode
except KeyError:
raise InvalidVolume(
"Specified mode: {} is invalid, "
"must be one of {}".format(split[2], models.available_volume_modes())
)
return models.Volume(
host_location=utils.get_full_directory_path(split[0]),
container_location=split[1],
mode=mode,
)
def port_from_str(port_str: str) -> models.Port:
try:
split = [int(i) for i in port_str.split(":", 2)]
except ValueError:
raise InvalidPort("Port(s) must be valid integers")
if any(i < 0 or i > 65535 for i in split):
raise InvalidPort("Port range must be from 1-65535")
if len(split) == 1:
split.extend(split)
return models.Port(*split)
def environments_from_dict(environment_dict: Mapping) -> List[models.Environment]:
return [models.Environment(k, v) for k, v in environment_dict.items()]
def _expand_py_paths(path_list: List[str]) -> List[str]:
expanded_paths = []
for path in path_list:
# Attempt to convert the path into a full absolute path
try:
full_path = utils.get_full_directory_path(path)
except utils.InvalidDirectory as e:
raise InvalidOption(e)
# Ensure the specified path is a python package
if not utils.is_python_package_dir(full_path):
raise InvalidOption(
"Path: {} is not a valid python package. "
"Missing expected __init__.py file".format(full_path))
expanded_paths.append(full_path)
return expanded_paths
def parse_yml_file(yml_data: Mapping) -> dict:
# TODO: It would be nice to just re-use the docker-compose parsing methods for this
parsed_options = {}
if "python_packages" in yml_data:
yml_data_py_modules = yml_data["python_packages"]
# Load any options, if they exist
if "container_directory" in yml_data_py_modules:
parsed_options["pypath_directory"] = yml_data_py_modules["container_directory"]
# Load any paths if they exist
if "paths" in yml_data_py_modules:
parsed_options["py_volumes"] = _expand_py_paths(yml_data_py_modules["paths"])
if "docker_options" in yml_data:
yml_data_docker_opts = yml_data["docker_options"]
if "environment" in yml_data_docker_opts:
parsed_options["environment_variables"] = (
environments_from_dict(yml_data_docker_opts["environment"])
)
if "network" in yml_data_docker_opts:
parsed_options["network"] = yml_data_docker_opts["network"]
if "volumes" in yml_data_docker_opts:
parsed_options["ext_volumes"] = [
volume_from_str(v) for v in yml_data_docker_opts["volumes"]
]
if "ports" in yml_data_docker_opts:
parsed_options["ports"] = [
port_from_str(str(p)) for p in yml_data_docker_opts["ports"]
]
return parsed_options
def options_from_args_namespace(args: argparse.Namespace) -> options.ContainerOptions:
kwargs_options = {
"image": args.image,
"source_directory": args.directory,
"command": getattr(args, "command", None),
"remove_container": not args.keep,
}
if args.config:
kwargs_options.update(parse_yml_file(args.config))
# The following CLI args overwrite YML config args
if args.py_packages:
kwargs_options["py_volumes"] = _expand_py_paths(args.py_packages)
if args.network:
kwargs_options["network"] = args.network
if args.ports:
kwargs_options["ports"] = [port_from_str(p) for p in args.ports]
return options.ContainerOptions(**kwargs_options)
def verbosity_from_int(verbosity_int: int) -> Verbosity:
return {
0: Verbosity.WARN,
1: Verbosity.INFO,
}.get(verbosity_int, Verbosity.DEBUG)
def add_run_command_args(sub_parser: argparse.ArgumentParser):
sub_parser.add_argument("image", type=str,
help="The docker image to use")
sub_parser.add_argument("command", type=str,
help="The command to run on the container")
sub_parser.add_argument("directory", nargs="?", type=str, default=".",
action=DirectoryAction,
help="Specify the directory in which the main python package "
"that is being developed is located in. Defaults to the "
"current directory")
def add_run_pty_command_args(sub_parser: argparse.ArgumentParser):
default_pty_cmd = container.PyDevContainer.DEFAULT_PTY_COMMAND
sub_parser.add_argument("-c", "--command", type=str,
default=default_pty_cmd,
help="The command to run that spawns a shell, "
"defaults to {}".format(default_pty_cmd))
sub_parser.add_argument("image", type=str,
help="The docker image to use")
sub_parser.add_argument("directory", nargs="?", type=str, default=".",
action=DirectoryAction,
help="Specify the directory in which the main python package "
"that is being developed is located in. Defaults to the "
"current directory")
_EPILOG = """
EXAMPLES
--------
Run a command by mounting the current directory as the source, using the "py3_dev"
docker image, and mounting an additional "NetworkPackage" python package:
%(prog)s run -g ~/Projects/NetworkPackage py3_dev "python3 setup.py test"
Spawn an interactive shell using the "py3_dev" image on the current directory:
%(prog)s run_pty py3_dev
CONFIG DOCUMENTATION
--------------------
The following describes the documentation for the configurations expected in the YML file
when using the "--config" command:
The **python_packages** section supports the following settings:
- **container_directory** (*string*): The directory in which the additional python packages
will be mounted to in the docker container. Defaults to ``/pypath``.
- **paths** (*list*): A list of paths that are python packages. Note that this *must* be
a path of a python package (contains __init__.py).
The **docker_options** section attempts to closely mimic the syntax of docker-compose files.
It contains the following settings:
- **environment** (*dictionary*): Specifies the environment variables that will be configured
on the docker container. Note that ``PYTHONPATH`` will be automatically configured
and should **not** be used here.
- **network** (*string*): Specifies a network to connect the container to. Defaults
to the default bridge network.
- **ports** (*list*): Specifies a list of HOST_PORT[:CONTAINER_PORT] port mappings where
HOST_PORT is the port that will be opened on the host and the CONTAINER_PORT is the port
that will be opened on the container.
- **volumes** (*list*): List of ``HOST_LOCATION:CONTAINER_LOCATION[:MODE]`` strings where
HOST_LOCATION is the location of the volume on the host, CONTAINER_LOCATION is where
to mount the volume on the container and MODE specifies the mount mode of the volume
ro (Read-Only) or rw (Read-Write) -- defaults to "rw".
""".strip()
def parse_args(args: Optional[Sequence]=None) -> Arguments:
"""
Parses arguments from the `args` specified, or `sys.argv` if not specified, and returns
a `options.ContainerOptions` based on the arguments.
Args:
args: Optional list of arguments to parse in the format of command line arguments
Returns:
A tuple of the command that was specified and the options
"""
parser = argparse.ArgumentParser(
epilog=_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False,
)
parser.add_argument("-v", "--verbose", action="count", default=0,
help="When specified, additional logging messages will be displayed to"
"STDOUT. When specified twice, debugging logs will be included")
parser.add_argument("--config", type=str,
default=None, action=YamlParserAction)
parser.add_argument("--keep", action="store_true", default=False,
help="Keep the container after running a command. "
"The default behavior is to remove the container "
"after the command has been ran")
parser.add_argument("-g", "--py-packages", dest="py_packages", action="append",
help="Specify directories to mount on the container and append "
"the said directory to the $PYTHONPATH environment variable of "
"the container")
parser.add_argument("--network", dest="network", type=str,
help="Specify the network of the container")
parser.add_argument("-p", "--publish", dest="ports", type=str, action="append",
help="Publish a container's port(s) to the host using the following "
"syntax: PORT[:CONTAINER_PORT] where PORT is the port that will "
"be published on the host and CONTAINER_PORT is the optional port "
"of the container")
subparsers = parser.add_subparsers(dest="docker_command")
# Cannot set this attribute in the above function :-(
subparsers.required = True # type: ignore
# Parser for the "run" command
run_parser = subparsers.add_parser(
str(Command.RUN),
help="Create a container and run a command",
)
add_run_command_args(run_parser)
# Parser for the "run pty" command
run_pty_parser = subparsers.add_parser(
str(Command.RUN_PTY),
help="Create a container and spawn an interactive shell on the container",
)
add_run_pty_command_args(run_pty_parser)
args_namespace = parser.parse_args(args=args)
command_type = Command[args_namespace.docker_command.upper()] # type: Command
return Arguments(
command=command_type,
verbosity=verbosity_from_int(args_namespace.verbose),
container_options=options_from_args_namespace(args_namespace),
)
| mit | -7,013,473,686,265,655,000 | 35.148459 | 99 | 0.620767 | false |
dermoth/gramps | gramps/gui/widgets/grampletbar.py | 2 | 27421 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2011 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Module that implements the gramplet bar fuctionality.
"""
#-------------------------------------------------------------------------
#
# Set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger('.grampletbar')
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import time
import os
import configparser
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import URL_MANUAL_PAGE, VERSION_DIR
from gramps.gen.config import config
from gramps.gen.constfunc import win
from ..managedwindow import ManagedWindow
from ..display import display_help, display_url
from .grampletpane import (AVAILABLE_GRAMPLETS,
GET_AVAILABLE_GRAMPLETS,
GET_GRAMPLET_LIST,
get_gramplet_opts,
get_gramplet_options_by_name,
make_requested_gramplet,
GuiGramplet)
from .undoablebuffer import UndoableBuffer
from ..utils import is_right_click
from ..dialog import QuestionDialog
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_MANUAL_PAGE + '_-_Gramplets'
NL = "\n"
#-------------------------------------------------------------------------
#
# GrampletBar class
#
#-------------------------------------------------------------------------
class GrampletBar(Gtk.Notebook):
"""
A class which defines the graphical representation of the GrampletBar.
"""
def __init__(self, dbstate, uistate, pageview, configfile, defaults):
Gtk.Notebook.__init__(self)
self.dbstate = dbstate
self.uistate = uistate
self.pageview = pageview
self.configfile = os.path.join(VERSION_DIR, "%s.ini" % configfile)
self.defaults = defaults
self.detached_gramplets = []
self.empty = False
self.close_buttons = []
self.set_group_name("grampletbar")
self.set_show_border(False)
self.set_scrollable(True)
book_button = Gtk.Button()
# Arrow is too small unless in a box
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
arrow = Gtk.Arrow(arrow_type=Gtk.ArrowType.DOWN,
shadow_type=Gtk.ShadowType.NONE)
arrow.show()
box.add(arrow)
box.show()
book_button.add(box)
book_button.set_relief(Gtk.ReliefStyle.NONE)
book_button.connect('clicked', self.__button_clicked)
book_button.show()
self.set_action_widget(book_button, Gtk.PackType.END)
self.connect('page-added', self.__page_added)
self.connect('page-removed', self.__page_removed)
self.connect('create-window', self.__create_window)
config_settings, opts_list = self.__load(defaults)
opts_list.sort(key=lambda opt: opt["page"])
for opts in opts_list:
if opts["name"] in AVAILABLE_GRAMPLETS():
all_opts = get_gramplet_opts(opts["name"], opts)
gramplet = make_requested_gramplet(TabGramplet, self, all_opts,
self.dbstate, self.uistate)
if gramplet:
self.__add_tab(gramplet)
if len(opts_list) == 0:
self.empty = True
self.__create_empty_tab()
if config_settings[0]:
self.show()
self.set_current_page(config_settings[1])
uistate.connect('grampletbar-close-changed', self.cb_close_changed)
# Connect after gramplets added to prevent making them active
self.connect('switch-page', self.__switch_page)
def _get_config_setting(self, configparser, section, setting, fn=None):
"""
Get a section.setting value from the config parser.
Takes a configparser instance, a section, a setting, and
optionally a post-processing function (typically int).
Always returns a value of the appropriate type.
"""
value = ""
try:
value = configparser.get(section, setting)
value = value.strip()
if fn:
value = fn(value)
except:
if fn:
value = fn()
else:
value = ""
return value
def __load(self, defaults):
"""
Load the gramplets from the configuration file.
"""
retval = []
visible = True
default_page = 0
filename = self.configfile
if filename and os.path.exists(filename):
cp = configparser.ConfigParser()
try:
cp.read(filename, encoding='utf-8')
except:
pass
for sec in cp.sections():
if sec == "Bar Options":
if "visible" in cp.options(sec):
visible = self._get_config_setting(cp, sec, "visible") == "True"
if "page" in cp.options(sec):
default_page = self._get_config_setting(cp, sec, "page", int)
else:
data = {}
for opt in cp.options(sec):
if opt.startswith("data["):
temp = data.get("data", {})
#temp.append(self._get_config_setting(cp, sec, opt))
pos = int(opt[5:-1])
temp[pos] = self._get_config_setting(cp, sec, opt)
data["data"] = temp
else:
data[opt] = self._get_config_setting(cp, sec, opt)
if "data" in data:
data["data"] = [data["data"][key]
for key in sorted(data["data"].keys())]
if "name" not in data:
data["name"] = "Unnamed Gramplet"
data["tname"] = _("Unnamed Gramplet")
retval.append(data)
else:
# give defaults as currently known
for name in defaults:
if name in AVAILABLE_GRAMPLETS():
retval.append(GET_AVAILABLE_GRAMPLETS(name))
return ((visible, default_page), retval)
def __save(self):
"""
Save the gramplet configuration.
"""
filename = self.configfile
try:
with open(filename, "w", encoding='utf-8') as fp:
fp.write(";; Gramplet bar configuration file" + NL)
fp.write((";; Automatically created at %s" %
time.strftime("%Y/%m/%d %H:%M:%S")) + NL + NL)
fp.write("[Bar Options]" + NL)
fp.write(("visible=%s" + NL) % self.get_property('visible'))
fp.write(("page=%d" + NL) % self.get_current_page())
fp.write(NL)
if self.empty:
gramplet_list = []
else:
gramplet_list = [self.get_nth_page(page_num)
for page_num in range(self.get_n_pages())]
for page_num, gramplet in enumerate(gramplet_list):
opts = get_gramplet_options_by_name(gramplet.gname)
if opts is not None:
base_opts = opts.copy()
for key in base_opts:
if key in gramplet.__dict__:
base_opts[key] = gramplet.__dict__[key]
fp.write(("[%s]" + NL) % gramplet.gname)
for key in base_opts:
if key in ["content", "title", "tname", "row", "column",
"page", "version", "gramps"]: # don't save
continue
elif key == "data":
if not isinstance(base_opts["data"], (list, tuple)):
fp.write(("data[0]=%s" + NL) % base_opts["data"])
else:
cnt = 0
for item in base_opts["data"]:
fp.write(("data[%d]=%s" + NL) % (cnt, item))
cnt += 1
else:
fp.write(("%s=%s" + NL)% (key, base_opts[key]))
fp.write(("page=%d" + NL) % page_num)
fp.write(NL)
except IOError:
LOG.warning("Failed writing '%s'; gramplets not saved" % filename)
return
def set_active(self):
"""
Called with the view is set as active.
"""
if not self.empty:
gramplet = self.get_nth_page(self.get_current_page())
if gramplet and gramplet.pui:
gramplet.pui.active = True
if gramplet.pui.dirty:
gramplet.pui.update()
def set_inactive(self):
"""
Called with the view is set as inactive.
"""
if not self.empty:
gramplet = self.get_nth_page(self.get_current_page())
if gramplet and gramplet.pui:
gramplet.pui.active = False
def on_delete(self):
"""
Called when the view is closed.
"""
list(map(self.__dock_gramplet, self.detached_gramplets))
if not self.empty:
for page_num in range(self.get_n_pages()):
gramplet = self.get_nth_page(page_num)
# this is the only place where the gui runs user code directly
if gramplet.pui:
gramplet.pui.on_save()
self.__save()
def add_gramplet(self, gname):
"""
Add a gramplet by name.
"""
if self.has_gramplet(gname):
return
all_opts = get_gramplet_options_by_name(gname)
gramplet = make_requested_gramplet(TabGramplet, self, all_opts,
self.dbstate, self.uistate)
if not gramplet:
LOG.warning("Problem creating '%s'", gname)
return
page_num = self.__add_tab(gramplet)
self.set_current_page(page_num)
def remove_gramplet(self, gname):
"""
Remove a gramplet by name.
"""
for gramplet in self.detached_gramplets:
if gramplet.gname == gname:
self.__dock_gramplet(gramplet)
self.remove_page(self.page_num(gramplet))
return
for page_num in range(self.get_n_pages()):
gramplet = self.get_nth_page(page_num)
if gramplet.gname == gname:
self.remove_page(page_num)
return
def has_gramplet(self, gname):
"""
Return True if the GrampletBar contains the gramplet, else False.
"""
return gname in self.all_gramplets()
def all_gramplets(self):
"""
Return a list of names of all the gramplets in the GrampletBar.
"""
if self.empty:
return self.detached_gramplets
else:
return [gramplet.gname for gramplet in self.get_children() +
self.detached_gramplets]
def restore(self):
"""
Restore the GrampletBar to its default gramplets.
"""
list(map(self.remove_gramplet, self.all_gramplets()))
list(map(self.add_gramplet, self.defaults))
self.set_current_page(0)
def __create_empty_tab(self):
"""
Create an empty tab to be displayed when the GrampletBar is empty.
"""
tab_label = Gtk.Label(label=_('Gramplet Bar'))
tab_label.show()
msg = _('Select the down arrow on the right corner for adding, removing or restoring gramplets.')
content = Gtk.Label(label=msg)
content.set_halign(Gtk.Align.START)
content.set_line_wrap(True)
content.set_size_request(150, -1)
content.show()
self.append_page(content, tab_label)
return content
def __add_tab(self, gramplet):
"""
Add a tab to the notebook for the given gramplet.
"""
width = -1 # Allow tab width to adjust (smaller) to sidebar
height = min(int(self.uistate.screen_height() * 0.20), 400)
gramplet.set_size_request(width, height)
label = self.__create_tab_label(gramplet)
page_num = self.append_page(gramplet, label)
return page_num
def __create_tab_label(self, gramplet):
"""
Create a tab label consisting of a label and a close button.
"""
tablabel = TabLabel(gramplet, self.__delete_clicked)
if hasattr(gramplet.pui, "has_data"):
tablabel.set_has_data(gramplet.pui.has_data)
else: # just a function; always show yes it has data
tablabel.set_has_data(True)
if config.get('interface.grampletbar-close'):
tablabel.use_close(True)
else:
tablabel.use_close(False)
return tablabel
def cb_close_changed(self):
"""
Close button preference changed.
"""
for gramplet in self.get_children():
tablabel = self.get_tab_label(gramplet)
tablabel.use_close(config.get('interface.grampletbar-close'))
def __delete_clicked(self, button, gramplet):
"""
Called when the delete button is clicked.
"""
page_num = self.page_num(gramplet)
self.remove_page(page_num)
def __switch_page(self, notebook, unused, new_page):
"""
Called when the user has switched to a new GrampletBar page.
"""
old_page = notebook.get_current_page()
if old_page >= 0:
gramplet = self.get_nth_page(old_page)
if gramplet and gramplet.pui:
gramplet.pui.active = False
gramplet = self.get_nth_page(new_page)
if not self.empty:
if gramplet and gramplet.pui:
gramplet.pui.active = True
if gramplet.pui.dirty:
gramplet.pui.update()
def __page_added(self, notebook, unused, new_page):
"""
Called when a new page is added to the GrampletBar.
"""
gramplet = self.get_nth_page(new_page)
if self.empty:
if isinstance(gramplet, TabGramplet):
self.empty = False
if new_page == 0:
self.remove_page(1)
else:
self.remove_page(0)
else:
return
gramplet.pane = self
label = self.__create_tab_label(gramplet)
self.set_tab_label(gramplet, label)
self.set_tab_reorderable(gramplet, True)
self.set_tab_detachable(gramplet, True)
if gramplet in self.detached_gramplets:
self.detached_gramplets.remove(gramplet)
self.reorder_child(gramplet, gramplet.page)
def __page_removed(self, notebook, unused, page_num):
"""
Called when a page is removed to the GrampletBar.
"""
if self.get_n_pages() == 0:
self.empty = True
self.__create_empty_tab()
def __create_window(self, grampletbar, gramplet, x_pos, y_pos):
"""
Called when the user has switched to a new GrampletBar page.
"""
gramplet.page = self.page_num(gramplet)
self.detached_gramplets.append(gramplet)
win = DetachedWindow(grampletbar, gramplet, x_pos, y_pos)
gramplet.detached_window = win
return win.get_notebook()
def __dock_gramplet(self, gramplet):
"""
Dock a detached gramplet.
"""
gramplet.detached_window.close()
gramplet.detached_window = None
def __button_clicked(self, button):
"""
Called when the drop-down button is clicked.
"""
self.menu = Gtk.Menu()
menu = self.menu
ag_menu = Gtk.MenuItem(label=_('Add a gramplet'))
nav_type = self.pageview.navigation_type()
skip = self.all_gramplets()
gramplet_list = GET_GRAMPLET_LIST(nav_type, skip)
gramplet_list.sort()
self.__create_submenu(ag_menu, gramplet_list, self.__add_clicked)
ag_menu.show()
menu.append(ag_menu)
if not (self.empty or config.get('interface.grampletbar-close')):
rg_menu = Gtk.MenuItem(label=_('Remove a gramplet'))
gramplet_list = [(gramplet.title, gramplet.gname)
for gramplet in self.get_children() +
self.detached_gramplets]
gramplet_list.sort()
self.__create_submenu(rg_menu, gramplet_list,
self.__remove_clicked)
rg_menu.show()
menu.append(rg_menu)
rd_menu = Gtk.MenuItem(label=_('Restore default gramplets'))
rd_menu.connect("activate", self.__restore_clicked)
rd_menu.show()
menu.append(rd_menu)
menu.show_all()
menu.popup(None, None, cb_menu_position, button, 0, 0)
def __create_submenu(self, main_menu, gramplet_list, callback_func):
"""
Create a submenu of the context menu.
"""
if main_menu:
submenu = main_menu.get_submenu()
submenu = Gtk.Menu()
for entry in gramplet_list:
item = Gtk.MenuItem(label=entry[0])
item.connect("activate", callback_func, entry[1])
item.show()
submenu.append(item)
main_menu.set_submenu(submenu)
def __add_clicked(self, menu, gname):
"""
Called when a gramplet is added from the context menu.
"""
self.add_gramplet(gname)
def __remove_clicked(self, menu, gname):
"""
Called when a gramplet is removed from the context menu.
"""
self.remove_gramplet(gname)
def __restore_clicked(self, menu):
"""
Called when restore defaults is clicked from the context menu.
"""
QuestionDialog(
_("Restore to defaults?"),
_("The gramplet bar will be restored to contain its default "
"gramplets. This action cannot be undone."),
_("OK"),
self.restore,
parent=self.uistate.window)
def get_config_funcs(self):
"""
Return a list of configuration functions.
"""
funcs = []
if self.empty:
gramplets = []
else:
gramplets = self.get_children()
for gramplet in gramplets + self.detached_gramplets:
gui_options = gramplet.make_gui_options()
if gui_options:
funcs.append(self.__build_panel(gramplet.title, gui_options))
return funcs
def __build_panel(self, title, gui_options):
"""
Return a configuration function that returns the title of a page in
the Configure View dialog and a gtk container defining the page.
"""
def gramplet_panel(configdialog):
return title, gui_options
return gramplet_panel
#-------------------------------------------------------------------------
#
# TabGramplet class
#
#-------------------------------------------------------------------------
class TabGramplet(Gtk.ScrolledWindow, GuiGramplet):
"""
Class that handles the plugin interfaces for the GrampletBar.
"""
def __init__(self, pane, dbstate, uistate, title, **kwargs):
"""
Internal constructor for GUI portion of a gramplet.
"""
Gtk.ScrolledWindow.__init__(self)
GuiGramplet.__init__(self, pane, dbstate, uistate, title, **kwargs)
self.scrolledwindow = self
self.textview = Gtk.TextView()
self.textview.set_editable(False)
self.textview.set_wrap_mode(Gtk.WrapMode.WORD)
self.buffer = UndoableBuffer()
self.text_length = 0
self.textview.set_buffer(self.buffer)
self.textview.connect("key-press-event", self.on_key_press_event)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.add(self.textview)
self.show_all()
self.track = []
def get_title(self):
return self.title
def get_container_widget(self):
"""
Return the top level container widget.
"""
return self
#-------------------------------------------------------------------------
#
# DetachedWindow class
#
#-------------------------------------------------------------------------
class DetachedWindow(ManagedWindow):
"""
Class for showing a detached gramplet.
"""
def __init__(self, grampletbar, gramplet, x_pos, y_pos):
"""
Construct the window.
"""
self.title = gramplet.title + " " + _("Gramplet")
self.grampletbar = grampletbar
self.gramplet = gramplet
ManagedWindow.__init__(self, gramplet.uistate, [],
self.title)
self.set_window(Gtk.Dialog("", gramplet.uistate.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
(_('_Close'), Gtk.ResponseType.CLOSE)),
None,
self.title)
self.window.move(x_pos, y_pos)
self.window.set_default_size(gramplet.detached_width,
gramplet.detached_height)
self.window.add_button(_('_Help'), Gtk.ResponseType.HELP)
self.window.connect('response', self.handle_response)
self.notebook = Gtk.Notebook()
self.notebook.set_show_tabs(False)
self.notebook.set_show_border(False)
self.notebook.connect('page-added', self.page_added)
self.notebook.show()
self.window.vbox.pack_start(self.notebook, True, True, 0)
self.show()
def page_added(self, notebook, gramplet, page_num):
"""
Called when the gramplet is added to the notebook. This takes the
focus from the help button (bug #6306).
"""
gramplet.grab_focus()
def handle_response(self, object, response):
"""
Callback for taking care of button clicks.
"""
if response == Gtk.ResponseType.CLOSE:
self.close()
elif response == Gtk.ResponseType.HELP:
# translated name:
if self.gramplet.help_url:
if self.gramplet.help_url.startswith("http://"):
display_url(self.gramplet.help_url)
else:
display_help(self.gramplet.help_url)
else:
display_help(WIKI_HELP_PAGE,
self.gramplet.tname.replace(" ", "_"))
def get_notebook(self):
"""
Return the notebook.
"""
return self.notebook
def build_menu_names(self, obj):
"""
Part of the Gramps window interface.
"""
return (self.title, 'Gramplet')
def get_title(self):
"""
Returns the window title.
"""
return self.title
def close(self, *args):
"""
Dock the detached gramplet back in the GrampletBar from where it came.
"""
size = self.window.get_size()
self.gramplet.detached_width = size[0]
self.gramplet.detached_height = size[1]
self.gramplet.detached_window = None
self.gramplet.reparent(self.grampletbar)
ManagedWindow.close(self, *args)
#-------------------------------------------------------------------------
#
# TabLabel class
#
#-------------------------------------------------------------------------
class TabLabel(Gtk.Box):
"""
Create a tab label consisting of a label and a close button.
"""
def __init__(self, gramplet, callback):
Gtk.Box.__init__(self)
self.text = gramplet.title
self.set_spacing(4)
self.label = Gtk.Label()
self.label.set_tooltip_text(gramplet.tname)
self.label.show()
self.closebtn = Gtk.Button()
image = Gtk.Image()
image.set_from_icon_name('window-close', Gtk.IconSize.MENU)
self.closebtn.connect("clicked", callback, gramplet)
self.closebtn.set_image(image)
self.closebtn.set_relief(Gtk.ReliefStyle.NONE)
self.pack_start(self.label, True, True, 0)
self.pack_end(self.closebtn, False, False, 0)
def set_has_data(self, has_data):
"""
Set the label to indicate if the gramplet has data.
"""
if has_data:
self.label.set_text("<b>%s</b>" % self.text)
self.label.set_use_markup(True)
else:
self.label.set_text(self.text)
def use_close(self, use_close):
"""
Display the cose button according to user preference.
"""
if use_close:
self.closebtn.show()
else:
self.closebtn.hide()
def cb_menu_position(*args):
"""
Determine the position of the popup menu.
"""
# takes two argument: menu, button
if len(args) == 2:
menu = args[0]
button = args[1]
# broken introspection can't handle MenuPositionFunc annotations corectly
else:
menu = args[0]
button = args[3]
ret_val, x_pos, y_pos = button.get_window().get_origin()
x_pos += button.get_allocation().x
y_pos += button.get_allocation().y + button.get_allocation().height
return (x_pos, y_pos, False)
| gpl-2.0 | 7,874,076,515,296,342,000 | 34.657997 | 105 | 0.51559 | false |
vfulco/scalpel | lib/gravity/tae/stemmer/stemmer.py | 2 | 1299 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from gravity.common.internal.misc import list_module_implementations, instantiate_module_implementation
import gravity.tae.stemmer.stemmer
class Stemmer(object):
def __init__(self, lang = "en"):
assert lang
self.lang = lang
def info(self): raise NotImplementedError()
@classmethod
def list(cls):
return list_module_implementations(cls)
@classmethod
def stemmer(cls, name, *args):
return instantiate_module_implementation(cls, name, *args)
| lgpl-3.0 | 9,037,986,856,933,213,000 | 37.235294 | 103 | 0.685912 | false |
mcgachey/edx-platform | pavelib/docs.py | 10 | 1942 | """
Open edX Documentation Builder
Ties into Sphinx to generate files at the specified location(s)
"""
from __future__ import print_function
import sys
from paver.easy import *
DOC_PATHS = {
"dev": "docs/en_us/developers",
"author": "docs/en_us/course_authors",
"data": "docs/en_us/data",
"default": "docs/en_us"
}
def valid_doc_types():
"""
Return a comma-separated string of valid doc types.
"""
return ", ".join(DOC_PATHS.keys())
def doc_path(options, allow_default=True):
"""
Parse `options` (from the Paver task args) to determine the path
to the documentation directory.
If the specified path is not one of the valid options, print an error
message and exit.
If `allow_default` is False, then require that a type is specified,
and exit with an error message if it isn't.
"""
doc_type = getattr(options, 'type', 'default')
path = DOC_PATHS.get(doc_type)
if doc_type == 'default' and not allow_default:
print(
"You must specify a documentation type using '--type'. "
"Valid options are: {options}".format(
options=valid_doc_types()
)
)
sys.exit(1)
if path is None:
print(
"Invalid documentation type '{doc_type}'. "
"Valid options are: {options}".format(
doc_type=doc_type, options=valid_doc_types()
)
)
sys.exit(1)
else:
return path
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("type=", "t", "Type of docs to compile"),
("verbose", "v", "Display verbose output"),
])
def build_docs(options):
"""
Invoke sphinx 'make build' to generate docs.
"""
verbose = getattr(options, 'verbose', False)
cmd = "cd {dir}; make html quiet={quiet}".format(
dir=doc_path(options),
quiet="false" if verbose else "true"
)
sh(cmd)
| agpl-3.0 | 5,751,067,414,969,878,000 | 24.220779 | 73 | 0.593718 | false |
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/plot.py | 1 | 98831 | #-----------------------------------------------------------------------------
# Name: wx.lib.plot.py
# Purpose: Line, Bar and Scatter Graphs
#
# Author: Gordon Williams
#
# Created: 2003/11/03
# RCS-ID: $Id$
# Copyright: (c) 2002
# Licence: Use as you wish.
#-----------------------------------------------------------------------------
# 12/15/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 compatability update.
# o Renamed to plot.py in the wx.lib directory.
# o Reworked test frame to work with wx demo framework. This saves a bit
# of tedious cut and paste, and the test app is excellent.
#
# 12/18/2003 - Jeff Grimmett ([email protected])
#
# o wxScrolledMessageDialog -> ScrolledMessageDialog
#
# Oct 6, 2004 Gordon Williams ([email protected])
# - Added bar graph demo
# - Modified line end shape from round to square.
# - Removed FloatDCWrapper for conversion to ints and ints in arguments
#
# Oct 15, 2004 Gordon Williams ([email protected])
# - Imported modules given leading underscore to name.
# - Added Cursor Line Tracking and User Point Labels.
# - Demo for Cursor Line Tracking and Point Labels.
# - Size of plot preview frame adjusted to show page better.
# - Added helper functions PositionUserToScreen and PositionScreenToUser in PlotCanvas.
# - Added functions GetClosestPoints (all curves) and GetClosestPoint (only closest curve)
# can be in either user coords or screen coords.
#
# Jun 22, 2009 Florian Hoech ([email protected])
# - Fixed exception when drawing empty plots on Mac OS X
# - Fixed exception when trying to draw point labels on Mac OS X (Mac OS X
# point label drawing code is still slow and only supports wx.COPY)
# - Moved label positions away from axis lines a bit
# - Added PolySpline class and modified demo 1 and 2 to use it
# - Added center and diagonal lines option (Set/GetEnableCenterLines,
# Set/GetEnableDiagonals)
# - Added anti-aliasing option with optional high-resolution mode
# (Set/GetEnableAntiAliasing, Set/GetEnableHiRes) and demo
# - Added option to specify exact number of tick marks to use for each axis
# (SetXSpec(<number>, SetYSpec(<number>) -- work like 'min', but with
# <number> tick marks)
# - Added support for background and foreground colours (enabled via
# SetBackgroundColour/SetForegroundColour on a PlotCanvas instance)
# - Changed PlotCanvas printing initialization from occuring in __init__ to
# occur on access. This will postpone any IPP and / or CUPS warnings
# which appear on stderr on some Linux systems until printing functionality
# is actually used.
#
#
"""
This is a simple light weight plotting module that can be used with
Boa or easily integrated into your own wxPython application. The
emphasis is on small size and fast plotting for large data sets. It
has a reasonable number of features to do line and scatter graphs
easily as well as simple bar graphs. It is not as sophisticated or
as powerful as SciPy Plt or Chaco. Both of these are great packages
but consume huge amounts of computer resources for simple plots.
They can be found at http://scipy.com
This file contains two parts; first the re-usable library stuff, then,
after a "if __name__=='__main__'" test, a simple frame and a few default
plots for examples and testing.
Based on wxPlotCanvas
Written by K.Hinsen, R. Srinivasan;
Ported to wxPython Harm van der Heijden, feb 1999
Major Additions Gordon Williams Feb. 2003 ([email protected])
-More style options
-Zooming using mouse "rubber band"
-Scroll left, right
-Grid(graticule)
-Printing, preview, and page set up (margins)
-Axis and title labels
-Cursor xy axis values
-Doc strings and lots of comments
-Optimizations for large number of points
-Legends
Did a lot of work here to speed markers up. Only a factor of 4
improvement though. Lines are much faster than markers, especially
filled markers. Stay away from circles and triangles unless you
only have a few thousand points.
Times for 25,000 points
Line - 0.078 sec
Markers
Square - 0.22 sec
dot - 0.10
circle - 0.87
cross,plus - 0.28
triangle, triangle_down - 0.90
Thanks to Chris Barker for getting this version working on Linux.
Zooming controls with mouse (when enabled):
Left mouse drag - Zoom box.
Left mouse double click - reset zoom.
Right mouse click - zoom out centred on click location.
"""
import string as _string
import time as _time
import sys
import wx
# Needs NumPy
try:
import numpy.oldnumeric as _Numeric
except:
msg= """
This module requires the NumPy module, which could not be
imported. It probably is not installed (it's not part of the
standard Python distribution). See the Numeric Python site
(http://numpy.scipy.org) for information on downloading source or
binaries."""
raise ImportError, "NumPy not found.\n" + msg
#
# Plotting classes...
#
class PolyPoints:
"""Base Class for lines and markers
- All methods are private.
"""
def __init__(self, points, attr):
self._points = _Numeric.array(points).astype(_Numeric.Float64)
self._logscale = (False, False)
self._pointSize = (1.0, 1.0)
self.currentScale= (1,1)
self.currentShift= (0,0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
raise KeyError, "Style attribute incorrect. Should be one of %s" % self._attributes.keys()
self.attributes[name] = value
def setLogScale(self, logscale):
self._logscale = logscale
def __getattr__(self, name):
if name == 'points':
if len(self._points)>0:
data = _Numeric.array(self._points,copy=True)
if self._logscale[0]:
data = self.log10(data, 0)
if self._logscale[1]:
data = self.log10(data, 1)
return data
else:
return self._points
else:
raise AttributeError, name
def log10(self, data, ind):
data = _Numeric.compress(data[:,ind]>0,data,0)
data[:,ind] = _Numeric.log10(data[:,ind])
return data
def boundingBox(self):
if len(self.points) == 0:
# no curves to draw
# defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY= _Numeric.array([-1.0,-1.0])
maxXY= _Numeric.array([ 1.0, 1.0])
else:
minXY= _Numeric.minimum.reduce(self.points)
maxXY= _Numeric.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
if len(self.points) == 0:
# no curves to draw
return
if (scale is not self.currentScale) or (shift is not self.currentShift):
# update point scaling
self.scaled = scale*self.points+shift
self.currentScale= scale
self.currentShift= shift
# else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
def getClosestPoint(self, pntXY, pointScaled= True):
"""Returns the index of closest point on the curve, pointXY, scaledXY, distance
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
if pointScaled == True:
#Using screen coords
p = self.scaled
pxy = self.currentScale * _Numeric.array(pntXY)+ self.currentShift
else:
#Using user coords
p = self.points
pxy = _Numeric.array(pntXY)
#determine distance for each point
d= _Numeric.sqrt(_Numeric.add.reduce((p-pxy)**2,1)) #sqrt(dx^2+dy^2)
pntIndex = _Numeric.argmin(d)
dist = d[pntIndex]
return [pntIndex, self.points[pntIndex], self.scaled[pntIndex] / self._pointSize, dist]
class PolyLine(PolyPoints):
"""Class to define line type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.SOLID,
'legend': ''}
def __init__(self, points, **attr):
"""
Creates PolyLine object
:param `points`: sequence (array, tuple or list) of (x,y) points making up line
:keyword `attr`: keyword attributes, default to:
========================== ================================
'colour'= 'black' wx.Pen Colour any wx.NamedColour
'width'= 1 Pen width
'style'= wx.SOLID wx.Pen style
'legend'= '' Line Legend to display
========================== ================================
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style= self.attributes['style']
if not isinstance(colour, wx.Colour):
colour = wx.NamedColour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_BUTT)
dc.SetPen(pen)
if coord == None:
if len(self.scaled): # bugfix for Mac OS X
dc.DrawLines(self.scaled)
else:
dc.DrawLines(coord) # draw legend line
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
h= self.attributes['width'] * printerScale * self._pointSize[0]
w= 5 * h
return (w,h)
class PolySpline(PolyLine):
"""Class to define line type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.SOLID,
'legend': ''}
def __init__(self, points, **attr):
"""
Creates PolyLine object
:param `points`: sequence (array, tuple or list) of (x,y) points making up spline
:keyword `attr`: keyword attributes, default to:
========================== ================================
'colour'= 'black' wx.Pen Colour any wx.NamedColour
'width'= 1 Pen width
'style'= wx.SOLID wx.Pen style
'legend'= '' Line Legend to display
========================== ================================
"""
PolyLine.__init__(self, points, **attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style= self.attributes['style']
if not isinstance(colour, wx.Colour):
colour = wx.NamedColour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_ROUND)
dc.SetPen(pen)
if coord == None:
if len(self.scaled): # bugfix for Mac OS X
dc.DrawSpline(self.scaled)
else:
dc.DrawLines(coord) # draw legend line
class PolyMarker(PolyPoints):
"""Class to define marker type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
"""
Creates PolyMarker object
:param `points`: sequence (array, tuple or list) of (x,y) points
:keyword `attr`: keyword attributes, default to:
============================ ================================
'colour'= 'black' wx.Pen Colour any wx.NamedColour
'width'= 1 Pen width
'size'= 2 Marker size
'fillcolour'= same as colour wx.Brush Colour any wx.NamedColour
'fillstyle'= wx.SOLID wx.Brush fill style (use wx.TRANSPARENT for no fill)
'style'= wx.SOLID wx.Pen style
'marker'= 'circle' Marker shape
'legend'= '' Line Legend to display
============================ ================================
Marker Shapes:
- 'circle'
- 'dot'
- 'square'
- 'triangle'
- 'triangle_down'
- 'cross'
- 'plus'
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
size = self.attributes['size'] * printerScale * self._pointSize[0]
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
if colour and not isinstance(colour, wx.Colour):
colour = wx.NamedColour(colour)
if fillcolour and not isinstance(fillcolour, wx.Colour):
fillcolour = wx.NamedColour(fillcolour)
dc.SetPen(wx.Pen(colour, width))
if fillcolour:
dc.SetBrush(wx.Brush(fillcolour,fillstyle))
else:
dc.SetBrush(wx.Brush(colour, fillstyle))
if coord == None:
if len(self.scaled): # bugfix for Mac OS X
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) # draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s= 5*self.attributes['size'] * printerScale * self._pointSize[0]
return (s,s)
def _drawmarkers(self, dc, coords, marker,size=1):
f = eval('self._' +marker)
f(dc, coords, size)
def _circle(self, dc, coords, size=1):
fact= 2.5*size
wh= 5.0*size
rect= _Numeric.zeros((len(coords),4),_Numeric.Float)+[0.0,0.0,wh,wh]
rect[:,0:2]= coords-[fact,fact]
dc.DrawEllipseList(rect.astype(_Numeric.Int32))
def _dot(self, dc, coords, size=1):
dc.DrawPointList(coords)
def _square(self, dc, coords, size=1):
fact= 2.5*size
wh= 5.0*size
rect= _Numeric.zeros((len(coords),4),_Numeric.Float)+[0.0,0.0,wh,wh]
rect[:,0:2]= coords-[fact,fact]
dc.DrawRectangleList(rect.astype(_Numeric.Int32))
def _triangle(self, dc, coords, size=1):
shape= [(-2.5*size,1.44*size), (2.5*size,1.44*size), (0.0,-2.88*size)]
poly= _Numeric.repeat(coords,3)
poly.shape= (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(_Numeric.Int32))
def _triangle_down(self, dc, coords, size=1):
shape= [(-2.5*size,-1.44*size), (2.5*size,-1.44*size), (0.0,2.88*size)]
poly= _Numeric.repeat(coords,3)
poly.shape= (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(_Numeric.Int32))
def _cross(self, dc, coords, size=1):
fact= 2.5*size
for f in [[-fact,-fact,fact,fact],[-fact,fact,fact,-fact]]:
lines= _Numeric.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(_Numeric.Int32))
def _plus(self, dc, coords, size=1):
fact= 2.5*size
for f in [[-fact,0,fact,0],[0,-fact,0,fact]]:
lines= _Numeric.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(_Numeric.Int32))
class PlotGraphics:
"""Container to hold PolyXXX objects and graph labels
- All methods except __init__ are private.
"""
def __init__(self, objects, title='', xLabel='', yLabel= ''):
"""Creates PlotGraphics object
objects - list of PolyXXX objects to make graph
title - title shown at top of graph
xLabel - label shown on x-axis
yLabel - label shown on y-axis
"""
if type(objects) not in [list,tuple]:
raise TypeError, "objects argument should be list or tuple"
self.objects = objects
self.title= title
self.xLabel= xLabel
self.yLabel= yLabel
self._pointSize = (1.0, 1.0)
def setLogScale(self, logscale):
if type(logscale) != tuple:
raise TypeError, 'logscale must be a tuple of bools, e.g. (False, False)'
if len(self.objects) == 0:
return
for o in self.objects:
o.setLogScale(logscale)
def boundingBox(self):
p1, p2 = self.objects[0].boundingBox()
for o in self.objects[1:]:
p1o, p2o = o.boundingBox()
p1 = _Numeric.minimum(p1, p1o)
p2 = _Numeric.maximum(p2, p2o)
return p1, p2
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
for o in self.objects:
o.scaleAndShift(scale, shift)
def setPrinterScale(self, scale):
"""Thickens up lines and markers only for printing"""
self.printerScale= scale
def setXLabel(self, xLabel= ''):
"""Set the X axis label on the graph"""
self.xLabel= xLabel
def setYLabel(self, yLabel= ''):
"""Set the Y axis label on the graph"""
self.yLabel= yLabel
def setTitle(self, title= ''):
"""Set the title at the top of graph"""
self.title= title
def getXLabel(self):
"""Get x axis label string"""
return self.xLabel
def getYLabel(self):
"""Get y axis label string"""
return self.yLabel
def getTitle(self, title= ''):
"""Get the title at the top of graph"""
return self.title
def draw(self, dc):
for o in self.objects:
#t=_time.clock() # profile info
o._pointSize = self._pointSize
o.draw(dc, self.printerScale)
#dt= _time.clock()-t
#print o, "time=", dt
def getSymExtent(self, printerScale):
"""Get max width and height of lines and markers symbols for legend"""
self.objects[0]._pointSize = self._pointSize
symExt = self.objects[0].getSymExtent(printerScale)
for o in self.objects[1:]:
o._pointSize = self._pointSize
oSymExt = o.getSymExtent(printerScale)
symExt = _Numeric.maximum(symExt, oSymExt)
return symExt
def getLegendNames(self):
"""Returns list of legend names"""
lst = [None]*len(self)
for i in range(len(self)):
lst[i]= self.objects[i].getLegend()
return lst
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
#-------------------------------------------------------------------------------
# Main window that you will want to import into your application.
class PlotCanvas(wx.Panel):
"""
Subclass of a wx.Panel which holds two scrollbars and the actual
plotting canvas (self.canvas). It allows for simple general plotting
of data with zoom, labels, and automatic axis scaling."""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name="plotCanvas"):
"""Constructs a panel, which can be a child of a frame or
any other non-control window"""
wx.Panel.__init__(self, parent, id, pos, size, style, name)
sizer = wx.FlexGridSizer(2,2,0,0)
self.canvas = wx.Window(self, -1)
self.sb_vert = wx.ScrollBar(self, -1, style=wx.SB_VERTICAL)
self.sb_vert.SetScrollbar(0,1000,1000,1000)
self.sb_hor = wx.ScrollBar(self, -1, style=wx.SB_HORIZONTAL)
self.sb_hor.SetScrollbar(0,1000,1000,1000)
sizer.Add(self.canvas, 1, wx.EXPAND)
sizer.Add(self.sb_vert, 0, wx.EXPAND)
sizer.Add(self.sb_hor, 0, wx.EXPAND)
sizer.Add((0,0))
sizer.AddGrowableRow(0, 1)
sizer.AddGrowableCol(0, 1)
self.sb_vert.Show(False)
self.sb_hor.Show(False)
self.SetSizer(sizer)
self.Fit()
self.border = (1,1)
self.SetBackgroundColour("white")
# Create some mouse events for zooming
self.canvas.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.canvas.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.canvas.Bind(wx.EVT_MOTION, self.OnMotion)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.OnMouseDoubleClick)
self.canvas.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRightDown)
# scrollbar events
self.Bind(wx.EVT_SCROLL_THUMBTRACK, self.OnScroll)
self.Bind(wx.EVT_SCROLL_PAGEUP, self.OnScroll)
self.Bind(wx.EVT_SCROLL_PAGEDOWN, self.OnScroll)
self.Bind(wx.EVT_SCROLL_LINEUP, self.OnScroll)
self.Bind(wx.EVT_SCROLL_LINEDOWN, self.OnScroll)
# set curser as cross-hairs
self.canvas.SetCursor(wx.CROSS_CURSOR)
self.HandCursor = wx.CursorFromImage(Hand.GetImage())
self.GrabHandCursor = wx.CursorFromImage(GrabHand.GetImage())
self.MagCursor = wx.CursorFromImage(MagPlus.GetImage())
# Things for printing
self._print_data = None
self._pageSetupData= None
self.printerScale = 1
self.parent= parent
# scrollbar variables
self._sb_ignore = False
self._adjustingSB = False
self._sb_xfullrange = 0
self._sb_yfullrange = 0
self._sb_xunit = 0
self._sb_yunit = 0
self._dragEnabled = False
self._screenCoordinates = _Numeric.array([0.0, 0.0])
self._logscale = (False, False)
# Zooming variables
self._zoomInFactor = 0.5
self._zoomOutFactor = 2
self._zoomCorner1= _Numeric.array([0.0, 0.0]) # left mouse down corner
self._zoomCorner2= _Numeric.array([0.0, 0.0]) # left mouse up corner
self._zoomEnabled= False
self._hasDragged= False
# Drawing Variables
self.last_draw = None
self._pointScale= 1
self._pointShift= 0
self._xSpec= 'auto'
self._ySpec= 'auto'
self._gridEnabled= False
self._legendEnabled= False
self._titleEnabled= True
self._centerLinesEnabled = False
self._diagonalsEnabled = False
# Fonts
self._fontCache = {}
self._fontSizeAxis= 10
self._fontSizeTitle= 15
self._fontSizeLegend= 7
# pointLabels
self._pointLabelEnabled= False
self.last_PointLabel= None
self._pointLabelFunc= None
self.canvas.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
if sys.platform != "darwin":
self._logicalFunction = wx.EQUIV # (NOT src) XOR dst
else:
self._logicalFunction = wx.COPY # wx.EQUIV not supported on Mac OS X
self._useScientificNotation = False
self._antiAliasingEnabled = False
self._hiResEnabled = False
self._pointSize = (1.0, 1.0)
self._fontScale = 1.0
self.canvas.Bind(wx.EVT_PAINT, self.OnPaint)
self.canvas.Bind(wx.EVT_SIZE, self.OnSize)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None) # sets the initial size based on client size
self._gridColour = wx.NamedColour('black')
def SetCursor(self, cursor):
self.canvas.SetCursor(cursor)
def GetGridColour(self):
return self._gridColour
def SetGridColour(self, colour):
if isinstance(colour, wx.Colour):
self._gridColour = colour
else:
self._gridColour = wx.NamedColour(colour)
# SaveFile
def SaveFile(self, fileName= ''):
"""Saves the file to the type specified in the extension. If no file
name is specified a dialog box is provided. Returns True if sucessful,
otherwise False.
.bmp Save a Windows bitmap file.
.xbm Save an X bitmap file.
.xpm Save an XPM bitmap file.
.png Save a Portable Network Graphics file.
.jpg Save a Joint Photographic Experts Group file.
"""
extensions = {
"bmp": wx.BITMAP_TYPE_BMP, # Save a Windows bitmap file.
"xbm": wx.BITMAP_TYPE_XBM, # Save an X bitmap file.
"xpm": wx.BITMAP_TYPE_XPM, # Save an XPM bitmap file.
"jpg": wx.BITMAP_TYPE_JPEG, # Save a JPG file.
"png": wx.BITMAP_TYPE_PNG, # Save a PNG file.
}
fType = _string.lower(fileName[-3:])
dlg1 = None
while fType not in extensions:
if dlg1: # FileDialog exists: Check for extension
dlg2 = wx.MessageDialog(self, 'File name extension\n'
'must be one of\nbmp, xbm, xpm, png, or jpg',
'File Name Error', wx.OK | wx.ICON_ERROR)
try:
dlg2.ShowModal()
finally:
dlg2.Destroy()
else: # FileDialog doesn't exist: just check one
dlg1 = wx.FileDialog(
self,
"Choose a file with extension bmp, gif, xbm, xpm, png, or jpg", ".", "",
"BMP files (*.bmp)|*.bmp|XBM files (*.xbm)|*.xbm|XPM file (*.xpm)|*.xpm|PNG files (*.png)|*.png|JPG files (*.jpg)|*.jpg",
wx.SAVE|wx.OVERWRITE_PROMPT
)
if dlg1.ShowModal() == wx.ID_OK:
fileName = dlg1.GetPath()
fType = _string.lower(fileName[-3:])
else: # exit without saving
dlg1.Destroy()
return False
if dlg1:
dlg1.Destroy()
# Save Bitmap
res= self._Buffer.SaveFile(fileName, extensions[fType])
return res
@property
def print_data(self):
if not self._print_data:
self._print_data = wx.PrintData()
self._print_data.SetPaperId(wx.PAPER_LETTER)
self._print_data.SetOrientation(wx.LANDSCAPE)
return self._print_data
@property
def pageSetupData(self):
if not self._pageSetupData:
self._pageSetupData= wx.PageSetupDialogData()
self._pageSetupData.SetMarginBottomRight((25,25))
self._pageSetupData.SetMarginTopLeft((25,25))
self._pageSetupData.SetPrintData(self.print_data)
return self._pageSetupData
def PageSetup(self):
"""Brings up the page setup dialog"""
data = self.pageSetupData
data.SetPrintData(self.print_data)
dlg = wx.PageSetupDialog(self.parent, data)
try:
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData() # returns wx.PageSetupDialogData
# updates page parameters from dialog
self.pageSetupData.SetMarginBottomRight(data.GetMarginBottomRight())
self.pageSetupData.SetMarginTopLeft(data.GetMarginTopLeft())
self.pageSetupData.SetPrintData(data.GetPrintData())
self._print_data=wx.PrintData(data.GetPrintData()) # updates print_data
finally:
dlg.Destroy()
def Printout(self, paper=None):
"""Print current plot."""
if paper != None:
self.print_data.SetPaperId(paper)
pdd = wx.PrintDialogData(self.print_data)
printer = wx.Printer(pdd)
out = PlotPrintout(self)
print_ok = printer.Print(self.parent, out)
if print_ok:
self._print_data = wx.PrintData(printer.GetPrintDialogData().GetPrintData())
out.Destroy()
def PrintPreview(self):
"""Print-preview current plot."""
printout = PlotPrintout(self)
printout2 = PlotPrintout(self)
self.preview = wx.PrintPreview(printout, printout2, self.print_data)
if not self.preview.Ok():
wx.MessageDialog(self, "Print Preview failed.\n" \
"Check that default printer is configured\n", \
"Print error", wx.OK|wx.CENTRE).ShowModal()
self.preview.SetZoom(40)
# search up tree to find frame instance
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((600,550))
frame.Centre(wx.BOTH)
frame.Show(True)
def setLogScale(self, logscale):
if type(logscale) != tuple:
raise TypeError, 'logscale must be a tuple of bools, e.g. (False, False)'
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
graphics.setLogScale(logscale)
self.last_draw = (graphics, None, None)
self.SetXSpec('min')
self.SetYSpec('min')
self._logscale = logscale
def getLogScale(self):
return self._logscale
def SetFontSizeAxis(self, point= 10):
"""Set the tick and axis label font size (default is 10 point)"""
self._fontSizeAxis= point
def GetFontSizeAxis(self):
"""Get current tick and axis label font size in points"""
return self._fontSizeAxis
def SetFontSizeTitle(self, point= 15):
"""Set Title font size (default is 15 point)"""
self._fontSizeTitle= point
def GetFontSizeTitle(self):
"""Get current Title font size in points"""
return self._fontSizeTitle
def SetFontSizeLegend(self, point= 7):
"""Set Legend font size (default is 7 point)"""
self._fontSizeLegend= point
def GetFontSizeLegend(self):
"""Get current Legend font size in points"""
return self._fontSizeLegend
def SetShowScrollbars(self, value):
"""Set True to show scrollbars"""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
if value == self.GetShowScrollbars():
return
self.sb_vert.Show(value)
self.sb_hor.Show(value)
wx.CallAfter(self.Layout)
def GetShowScrollbars(self):
"""Set True to show scrollbars"""
return self.sb_vert.IsShown()
def SetUseScientificNotation(self, useScientificNotation):
self._useScientificNotation = useScientificNotation
def GetUseScientificNotation(self):
return self._useScientificNotation
def SetEnableAntiAliasing(self, enableAntiAliasing):
"""Set True to enable anti-aliasing."""
self._antiAliasingEnabled = enableAntiAliasing
self.Redraw()
def GetEnableAntiAliasing(self):
return self._antiAliasingEnabled
def SetEnableHiRes(self, enableHiRes):
"""Set True to enable high-resolution mode when using anti-aliasing."""
self._hiResEnabled = enableHiRes
self.Redraw()
def GetEnableHiRes(self):
return self._hiResEnabled
def SetEnableDrag(self, value):
"""Set True to enable drag."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
if value:
if self.GetEnableZoom():
self.SetEnableZoom(False)
self.SetCursor(self.HandCursor)
else:
self.SetCursor(wx.CROSS_CURSOR)
self._dragEnabled = value
def GetEnableDrag(self):
return self._dragEnabled
def SetEnableZoom(self, value):
"""Set True to enable zooming."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
if value:
if self.GetEnableDrag():
self.SetEnableDrag(False)
self.SetCursor(self.MagCursor)
else:
self.SetCursor(wx.CROSS_CURSOR)
self._zoomEnabled= value
def GetEnableZoom(self):
"""True if zooming enabled."""
return self._zoomEnabled
def SetEnableGrid(self, value):
"""Set True, 'Horizontal' or 'Vertical' to enable grid."""
if value not in [True,False,'Horizontal','Vertical']:
raise TypeError, "Value should be True, False, Horizontal or Vertical"
self._gridEnabled= value
self.Redraw()
def GetEnableGrid(self):
"""True if grid enabled."""
return self._gridEnabled
def SetEnableCenterLines(self, value):
"""Set True, 'Horizontal' or 'Vertical' to enable center line(s)."""
if value not in [True,False,'Horizontal','Vertical']:
raise TypeError, "Value should be True, False, Horizontal or Vertical"
self._centerLinesEnabled= value
self.Redraw()
def GetEnableCenterLines(self):
"""True if grid enabled."""
return self._centerLinesEnabled
def SetEnableDiagonals(self, value):
"""Set True, 'Bottomleft-Topright' or 'Bottomright-Topleft' to enable
center line(s)."""
if value not in [True,False,'Bottomleft-Topright','Bottomright-Topleft']:
raise TypeError, "Value should be True, False, Bottomleft-Topright or Bottomright-Topleft"
self._diagonalsEnabled= value
self.Redraw()
def GetEnableDiagonals(self):
"""True if grid enabled."""
return self._diagonalsEnabled
def SetEnableLegend(self, value):
"""Set True to enable legend."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._legendEnabled= value
self.Redraw()
def GetEnableLegend(self):
"""True if Legend enabled."""
return self._legendEnabled
def SetEnableTitle(self, value):
"""Set True to enable title."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._titleEnabled= value
self.Redraw()
def GetEnableTitle(self):
"""True if title enabled."""
return self._titleEnabled
def SetEnablePointLabel(self, value):
"""Set True to enable pointLabel."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._pointLabelEnabled= value
self.Redraw() #will erase existing pointLabel if present
self.last_PointLabel = None
def GetEnablePointLabel(self):
"""True if pointLabel enabled."""
return self._pointLabelEnabled
def SetPointLabelFunc(self, func):
"""Sets the function with custom code for pointLabel drawing
******** more info needed ***************
"""
self._pointLabelFunc= func
def GetPointLabelFunc(self):
"""Returns pointLabel Drawing Function"""
return self._pointLabelFunc
def Reset(self):
"""Unzoom the plot."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
self._Draw(self.last_draw[0])
def ScrollRight(self, units):
"""Move view right number of axis units."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
xAxis= (xAxis[0]+units, xAxis[1]+units)
self._Draw(graphics,xAxis,yAxis)
def ScrollUp(self, units):
"""Move view up number of axis units."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
yAxis= (yAxis[0]+units, yAxis[1]+units)
self._Draw(graphics,xAxis,yAxis)
def GetXY(self, event):
"""Wrapper around _getXY, which handles log scales"""
x,y = self._getXY(event)
if self.getLogScale()[0]:
x = _Numeric.power(10,x)
if self.getLogScale()[1]:
y = _Numeric.power(10,y)
return x,y
def _getXY(self,event):
"""Takes a mouse event and returns the XY user axis values."""
x,y= self.PositionScreenToUser(event.GetPosition())
return x,y
def PositionUserToScreen(self, pntXY):
"""Converts User position to Screen Coordinates"""
userPos= _Numeric.array(pntXY)
x,y= userPos * self._pointScale + self._pointShift
return x,y
def PositionScreenToUser(self, pntXY):
"""Converts Screen position to User Coordinates"""
screenPos= _Numeric.array(pntXY)
x,y= (screenPos-self._pointShift)/self._pointScale
return x,y
def SetXSpec(self, type= 'auto'):
"""xSpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
* 'none' - shows no axis or tick mark values
* 'min' - shows min bounding box values
* 'auto' - rounds axis range to sensible values
* <number> - like 'min', but with <number> tick marks
"""
self._xSpec= type
def SetYSpec(self, type= 'auto'):
"""ySpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
* 'none' - shows no axis or tick mark values
* 'min' - shows min bounding box values
* 'auto' - rounds axis range to sensible values
* <number> - like 'min', but with <number> tick marks
"""
self._ySpec= type
def GetXSpec(self):
"""Returns current XSpec for axis"""
return self._xSpec
def GetYSpec(self):
"""Returns current YSpec for axis"""
return self._ySpec
def GetXMaxRange(self):
xAxis = self._getXMaxRange()
if self.getLogScale()[0]:
xAxis = _Numeric.power(10,xAxis)
return xAxis
def _getXMaxRange(self):
"""Returns (minX, maxX) x-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() # min, max points of graphics
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) # in user units
return xAxis
def GetYMaxRange(self):
yAxis = self._getYMaxRange()
if self.getLogScale()[1]:
yAxis = _Numeric.power(10,yAxis)
return yAxis
def _getYMaxRange(self):
"""Returns (minY, maxY) y-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() # min, max points of graphics
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
return yAxis
def GetXCurrentRange(self):
xAxis = self._getXCurrentRange()
if self.getLogScale()[0]:
xAxis = _Numeric.power(10,xAxis)
return xAxis
def _getXCurrentRange(self):
"""Returns (minX, maxX) x-axis for currently displayed portion of graph"""
return self.last_draw[1]
def GetYCurrentRange(self):
yAxis = self._getYCurrentRange()
if self.getLogScale()[1]:
yAxis = _Numeric.power(10,yAxis)
return yAxis
def _getYCurrentRange(self):
"""Returns (minY, maxY) y-axis for currently displayed portion of graph"""
return self.last_draw[2]
def Draw(self, graphics, xAxis = None, yAxis = None, dc = None):
"""Wrapper around _Draw, which handles log axes"""
graphics.setLogScale(self.getLogScale())
# check Axis is either tuple or none
if type(xAxis) not in [type(None),tuple]:
raise TypeError, "xAxis should be None or (minX,maxX)"+str(type(xAxis))
if type(yAxis) not in [type(None),tuple]:
raise TypeError, "yAxis should be None or (minY,maxY)"+str(type(xAxis))
# check case for axis = (a,b) where a==b caused by improper zooms
if xAxis != None:
if xAxis[0] == xAxis[1]:
return
if self.getLogScale()[0]:
xAxis = _Numeric.log10(xAxis)
if yAxis != None:
if yAxis[0] == yAxis[1]:
return
if self.getLogScale()[1]:
yAxis = _Numeric.log10(yAxis)
self._Draw(graphics, xAxis, yAxis, dc)
def _Draw(self, graphics, xAxis = None, yAxis = None, dc = None):
"""\
Draw objects in graphics with specified x and y axis.
graphics- instance of PlotGraphics with list of PolyXXX objects
xAxis - tuple with (min, max) axis range to view
yAxis - same as xAxis
dc - drawing context - doesn't have to be specified.
If it's not, the offscreen buffer is used
"""
if dc == None:
# sets new dc and clears it
dc = wx.BufferedDC(wx.ClientDC(self.canvas), self._Buffer)
bbr = wx.Brush(self.GetBackgroundColour(), wx.SOLID)
dc.SetBackground(bbr)
dc.SetBackgroundMode(wx.SOLID)
dc.Clear()
if self._antiAliasingEnabled:
if not isinstance(dc, wx.GCDC):
try:
dc = wx.GCDC(dc)
except Exception, exception:
pass
else:
if self._hiResEnabled:
dc.SetMapMode(wx.MM_TWIPS) # high precision - each logical unit is 1/20 of a point
self._pointSize = tuple(1.0 / lscale for lscale in dc.GetLogicalScale())
self._setSize()
elif self._pointSize != (1.0, 1.0):
self._pointSize = (1.0, 1.0)
self._setSize()
if sys.platform in ("darwin", "win32") or not isinstance(dc, wx.GCDC):
self._fontScale = sum(self._pointSize) / 2.0
else:
# on Linux, we need to correct the font size by a certain factor if wx.GCDC is used,
# to make text the same size as if wx.GCDC weren't used
ppi = dc.GetPPI()
self._fontScale = (96.0 / ppi[0] * self._pointSize[0] + 96.0 / ppi[1] * self._pointSize[1]) / 2.0
graphics._pointSize = self._pointSize
dc.SetTextForeground(self.GetForegroundColour())
dc.SetTextBackground(self.GetBackgroundColour())
dc.BeginDrawing()
# dc.Clear()
# set font size for every thing but title and legend
dc.SetFont(self._getFont(self._fontSizeAxis))
# sizes axis to axis type, create lower left and upper right corners of plot
if xAxis == None or yAxis == None:
# One or both axis not specified in Draw
p1, p2 = graphics.boundingBox() # min, max points of graphics
if xAxis == None:
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) # in user units
if yAxis == None:
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
# Adjust bounding box for axis spec
p1[0],p1[1] = xAxis[0], yAxis[0] # lower left corner user scale (xmin,ymin)
p2[0],p2[1] = xAxis[1], yAxis[1] # upper right corner user scale (xmax,ymax)
else:
# Both axis specified in Draw
p1= _Numeric.array([xAxis[0], yAxis[0]]) # lower left corner user scale (xmin,ymin)
p2= _Numeric.array([xAxis[1], yAxis[1]]) # upper right corner user scale (xmax,ymax)
self.last_draw = (graphics, _Numeric.array(xAxis), _Numeric.array(yAxis)) # saves most recient values
# Get ticks and textExtents for axis if required
if self._xSpec is not 'none':
xticks = self._xticks(xAxis[0], xAxis[1])
xTextExtent = dc.GetTextExtent(xticks[-1][1])# w h of x axis text last number on axis
else:
xticks = None
xTextExtent= (0,0) # No text for ticks
if self._ySpec is not 'none':
yticks = self._yticks(yAxis[0], yAxis[1])
if self.getLogScale()[1]:
yTextExtent = dc.GetTextExtent('-2e-2')
else:
yTextExtentBottom = dc.GetTextExtent(yticks[0][1])
yTextExtentTop = dc.GetTextExtent(yticks[-1][1])
yTextExtent= (max(yTextExtentBottom[0],yTextExtentTop[0]),
max(yTextExtentBottom[1],yTextExtentTop[1]))
else:
yticks = None
yTextExtent= (0,0) # No text for ticks
# TextExtents for Title and Axis Labels
titleWH, xLabelWH, yLabelWH= self._titleLablesWH(dc, graphics)
# TextExtents for Legend
legendBoxWH, legendSymExt, legendTextExt = self._legendWH(dc, graphics)
# room around graph area
rhsW= max(xTextExtent[0], legendBoxWH[0])+5*self._pointSize[0] # use larger of number width or legend width
lhsW= yTextExtent[0]+ yLabelWH[1] + 3*self._pointSize[0]
bottomH= max(xTextExtent[1], yTextExtent[1]/2.)+ xLabelWH[1] + 2*self._pointSize[1]
topH= yTextExtent[1]/2. + titleWH[1]
textSize_scale= _Numeric.array([rhsW+lhsW,bottomH+topH]) # make plot area smaller by text size
textSize_shift= _Numeric.array([lhsW, bottomH]) # shift plot area by this amount
# draw title if requested
if self._titleEnabled:
dc.SetFont(self._getFont(self._fontSizeTitle))
titlePos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- titleWH[0]/2.,
self.plotbox_origin[1]- self.plotbox_size[1])
dc.DrawText(graphics.getTitle(),titlePos[0],titlePos[1])
# draw label text
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabelPos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- xLabelWH[0]/2.,
self.plotbox_origin[1]- xLabelWH[1])
dc.DrawText(graphics.getXLabel(),xLabelPos[0],xLabelPos[1])
yLabelPos= (self.plotbox_origin[0] - 3*self._pointSize[0],
self.plotbox_origin[1]- bottomH- (self.plotbox_size[1]-bottomH-topH)/2.+ yLabelWH[0]/2.)
if graphics.getYLabel(): # bug fix for Linux
dc.DrawRotatedText(graphics.getYLabel(),yLabelPos[0],yLabelPos[1],90)
# drawing legend makers and text
if self._legendEnabled:
self._drawLegend(dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt)
# allow for scaling and shifting plotted points
scale = (self.plotbox_size-textSize_scale) / (p2-p1)* _Numeric.array((1,-1))
shift = -p1*scale + self.plotbox_origin + textSize_shift * _Numeric.array((1,-1))
self._pointScale= scale / self._pointSize # make available for mouse events
self._pointShift= shift / self._pointSize
self._drawAxes(dc, p1, p2, scale, shift, xticks, yticks)
graphics.scaleAndShift(scale, shift)
graphics.setPrinterScale(self.printerScale) # thicken up lines and markers if printing
# set clipping area so drawing does not occur outside axis box
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(p1, p2)
# allow graph to overlap axis lines by adding units to width and height
dc.SetClippingRegion(ptx*self._pointSize[0],pty*self._pointSize[1],rectWidth*self._pointSize[0]+2,rectHeight*self._pointSize[1]+1)
# Draw the lines and markers
#start = _time.clock()
graphics.draw(dc)
# print "entire graphics drawing took: %f second"%(_time.clock() - start)
# remove the clipping region
dc.DestroyClippingRegion()
dc.EndDrawing()
self._adjustScrollbars()
def Redraw(self, dc=None):
"""Redraw the existing plot."""
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
self._Draw(graphics,xAxis,yAxis,dc)
def Clear(self):
"""Erase the window."""
self.last_PointLabel = None #reset pointLabel
dc = wx.BufferedDC(wx.ClientDC(self.canvas), self._Buffer)
bbr = wx.Brush(self.GetBackgroundColour(), wx.SOLID)
dc.SetBackground(bbr)
dc.SetBackgroundMode(wx.SOLID)
dc.Clear()
if self._antiAliasingEnabled:
try:
dc = wx.GCDC(dc)
except Exception, exception:
pass
dc.SetTextForeground(self.GetForegroundColour())
dc.SetTextBackground(self.GetBackgroundColour())
self.last_draw = None
def Zoom(self, Center, Ratio):
""" Zoom on the plot
Centers on the X,Y coords given in Center
Zooms by the Ratio = (Xratio, Yratio) given
"""
self.last_PointLabel = None #reset maker
x,y = Center
if self.last_draw != None:
(graphics, xAxis, yAxis) = self.last_draw
w = (xAxis[1] - xAxis[0]) * Ratio[0]
h = (yAxis[1] - yAxis[0]) * Ratio[1]
xAxis = ( x - w/2, x + w/2 )
yAxis = ( y - h/2, y + h/2 )
self._Draw(graphics, xAxis, yAxis)
def GetClosestPoints(self, pntXY, pointScaled= True):
"""Returns list with
[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
list for each curve.
Returns [] if no curves are being plotted.
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
if self.last_draw == None:
#no graph available
return []
graphics, xAxis, yAxis= self.last_draw
l = []
for curveNum,obj in enumerate(graphics):
#check there are points in the curve
if len(obj.points) == 0:
continue #go to next obj
#[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
cn = [curveNum]+ [obj.getLegend()]+ obj.getClosestPoint( pntXY, pointScaled)
l.append(cn)
return l
def GetClosestPoint(self, pntXY, pointScaled= True):
"""Returns list with
[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
list for only the closest curve.
Returns [] if no curves are being plotted.
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
#closest points on screen based on screen scaling (pointScaled= True)
#list [curveNumber, index, pointXY, scaledXY, distance] for each curve
closestPts= self.GetClosestPoints(pntXY, pointScaled)
if closestPts == []:
return [] #no graph present
#find one with least distance
dists = [c[-1] for c in closestPts]
mdist = min(dists) #Min dist
i = dists.index(mdist) #index for min dist
return closestPts[i] #this is the closest point on closest curve
GetClosetPoint = GetClosestPoint
def UpdatePointLabel(self, mDataDict):
"""Updates the pointLabel point on screen with data contained in
mDataDict.
mDataDict will be passed to your function set by
SetPointLabelFunc. It can contain anything you
want to display on the screen at the scaledXY point
you specify.
This function can be called from parent window with onClick,
onMotion events etc.
"""
if self.last_PointLabel != None:
#compare pointXY
if _Numeric.sometrue(mDataDict["pointXY"] != self.last_PointLabel["pointXY"]):
#closest changed
self._drawPointLabel(self.last_PointLabel) #erase old
self._drawPointLabel(mDataDict) #plot new
else:
#just plot new with no erase
self._drawPointLabel(mDataDict) #plot new
#save for next erase
self.last_PointLabel = mDataDict
# event handlers **********************************
def OnMotion(self, event):
if self._zoomEnabled and event.LeftIsDown():
if self._hasDragged:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # remove old
else:
self._hasDragged= True
self._zoomCorner2[0], self._zoomCorner2[1] = self._getXY(event)
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # add new
elif self._dragEnabled and event.LeftIsDown():
coordinates = event.GetPosition()
newpos, oldpos = map(_Numeric.array, map(self.PositionScreenToUser, [coordinates, self._screenCoordinates]))
dist = newpos-oldpos
self._screenCoordinates = coordinates
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
yAxis -= dist[1]
xAxis -= dist[0]
self._Draw(graphics,xAxis,yAxis)
def OnMouseLeftDown(self,event):
self._zoomCorner1[0], self._zoomCorner1[1]= self._getXY(event)
self._screenCoordinates = _Numeric.array(event.GetPosition())
if self._dragEnabled:
self.SetCursor(self.GrabHandCursor)
self.canvas.CaptureMouse()
def OnMouseLeftUp(self, event):
if self._zoomEnabled:
if self._hasDragged == True:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # remove old
self._zoomCorner2[0], self._zoomCorner2[1]= self._getXY(event)
self._hasDragged = False # reset flag
minX, minY= _Numeric.minimum( self._zoomCorner1, self._zoomCorner2)
maxX, maxY= _Numeric.maximum( self._zoomCorner1, self._zoomCorner2)
self.last_PointLabel = None #reset pointLabel
if self.last_draw != None:
self._Draw(self.last_draw[0], xAxis = (minX,maxX), yAxis = (minY,maxY), dc = None)
#else: # A box has not been drawn, zoom in on a point
## this interfered with the double click, so I've disables it.
# X,Y = self._getXY(event)
# self.Zoom( (X,Y), (self._zoomInFactor,self._zoomInFactor) )
if self._dragEnabled:
self.SetCursor(self.HandCursor)
if self.canvas.HasCapture():
self.canvas.ReleaseMouse()
def OnMouseDoubleClick(self,event):
if self._zoomEnabled:
# Give a little time for the click to be totally finished
# before (possibly) removing the scrollbars and trigering
# size events, etc.
wx.FutureCall(200,self.Reset)
def OnMouseRightDown(self,event):
if self._zoomEnabled:
X,Y = self._getXY(event)
self.Zoom( (X,Y), (self._zoomOutFactor, self._zoomOutFactor) )
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if self.last_PointLabel != None:
self._drawPointLabel(self.last_PointLabel) #erase old
self.last_PointLabel = None
dc = wx.BufferedPaintDC(self.canvas, self._Buffer)
if self._antiAliasingEnabled:
try:
dc = wx.GCDC(dc)
except Exception, exception:
pass
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.canvas.GetClientSize()
Size.width = max(1, Size.width)
Size.height = max(1, Size.height)
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(Size.width, Size.height)
self._setSize()
self.last_PointLabel = None #reset pointLabel
if self.last_draw is None:
self.Clear()
else:
graphics, xSpec, ySpec = self.last_draw
self._Draw(graphics,xSpec,ySpec)
def OnLeave(self, event):
"""Used to erase pointLabel when mouse outside window"""
if self.last_PointLabel != None:
self._drawPointLabel(self.last_PointLabel) #erase old
self.last_PointLabel = None
def OnScroll(self, evt):
if not self._adjustingSB:
self._sb_ignore = True
sbpos = evt.GetPosition()
if evt.GetOrientation() == wx.VERTICAL:
fullrange,pagesize = self.sb_vert.GetRange(),self.sb_vert.GetPageSize()
sbpos = fullrange-pagesize-sbpos
dist = sbpos*self._sb_yunit-(self._getYCurrentRange()[0]-self._sb_yfullrange[0])
self.ScrollUp(dist)
if evt.GetOrientation() == wx.HORIZONTAL:
dist = sbpos*self._sb_xunit-(self._getXCurrentRange()[0]-self._sb_xfullrange[0])
self.ScrollRight(dist)
# Private Methods **************************************************
def _setSize(self, width=None, height=None):
"""DC width and height."""
if width == None:
(self.width,self.height) = self.canvas.GetClientSize()
else:
self.width, self.height= width,height
self.width *= self._pointSize[0] # high precision
self.height *= self._pointSize[1] # high precision
self.plotbox_size = 0.97*_Numeric.array([self.width, self.height])
xo = 0.5*(self.width-self.plotbox_size[0])
yo = self.height-0.5*(self.height-self.plotbox_size[1])
self.plotbox_origin = _Numeric.array([xo, yo])
def _setPrinterScale(self, scale):
"""Used to thicken lines and increase marker size for print out."""
# line thickness on printer is very thin at 600 dot/in. Markers small
self.printerScale= scale
def _printDraw(self, printDC):
"""Used for printing."""
if self.last_draw != None:
graphics, xSpec, ySpec= self.last_draw
self._Draw(graphics,xSpec,ySpec,printDC)
def _drawPointLabel(self, mDataDict):
"""Draws and erases pointLabels"""
width = self._Buffer.GetWidth()
height = self._Buffer.GetHeight()
if sys.platform != "darwin":
tmp_Buffer = wx.EmptyBitmap(width,height)
dcs = wx.MemoryDC()
dcs.SelectObject(tmp_Buffer)
dcs.Clear()
else:
tmp_Buffer = self._Buffer.GetSubBitmap((0, 0, width, height))
dcs = wx.MemoryDC(self._Buffer)
dcs.BeginDrawing()
self._pointLabelFunc(dcs,mDataDict) #custom user pointLabel function
dcs.EndDrawing()
dc = wx.ClientDC( self.canvas )
#this will erase if called twice
dc.Blit(0, 0, width, height, dcs, 0, 0, self._logicalFunction)
if sys.platform == "darwin":
self._Buffer = tmp_Buffer
def _drawLegend(self,dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt):
"""Draws legend symbols and text"""
# top right hand corner of graph box is ref corner
trhc= self.plotbox_origin+ (self.plotbox_size-[rhsW,topH])*[1,-1]
legendLHS= .091* legendBoxWH[0] # border space between legend sym and graph box
lineHeight= max(legendSymExt[1], legendTextExt[1]) * 1.1 #1.1 used as space between lines
dc.SetFont(self._getFont(self._fontSizeLegend))
for i in range(len(graphics)):
o = graphics[i]
s= i*lineHeight
if isinstance(o,PolyMarker):
# draw marker with legend
pnt= (trhc[0]+legendLHS+legendSymExt[0]/2., trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= _Numeric.array([pnt]))
elif isinstance(o,PolyLine):
# draw line with legend
pnt1= (trhc[0]+legendLHS, trhc[1]+s+lineHeight/2.)
pnt2= (trhc[0]+legendLHS+legendSymExt[0], trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= _Numeric.array([pnt1,pnt2]))
else:
raise TypeError, "object is neither PolyMarker or PolyLine instance"
# draw legend txt
pnt= (trhc[0]+legendLHS+legendSymExt[0]+5*self._pointSize[0], trhc[1]+s+lineHeight/2.-legendTextExt[1]/2)
dc.DrawText(o.getLegend(),pnt[0],pnt[1])
dc.SetFont(self._getFont(self._fontSizeAxis)) # reset
def _titleLablesWH(self, dc, graphics):
"""Draws Title and labels and returns width and height for each"""
# TextExtents for Title and Axis Labels
dc.SetFont(self._getFont(self._fontSizeTitle))
if self._titleEnabled:
title= graphics.getTitle()
titleWH= dc.GetTextExtent(title)
else:
titleWH= (0,0)
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabel, yLabel= graphics.getXLabel(),graphics.getYLabel()
xLabelWH= dc.GetTextExtent(xLabel)
yLabelWH= dc.GetTextExtent(yLabel)
return titleWH, xLabelWH, yLabelWH
def _legendWH(self, dc, graphics):
"""Returns the size in screen units for legend box"""
if self._legendEnabled != True:
legendBoxWH= symExt= txtExt= (0,0)
else:
# find max symbol size
symExt= graphics.getSymExtent(self.printerScale)
# find max legend text extent
dc.SetFont(self._getFont(self._fontSizeLegend))
txtList= graphics.getLegendNames()
txtExt= dc.GetTextExtent(txtList[0])
for txt in graphics.getLegendNames()[1:]:
txtExt= _Numeric.maximum(txtExt,dc.GetTextExtent(txt))
maxW= symExt[0]+txtExt[0]
maxH= max(symExt[1],txtExt[1])
# padding .1 for lhs of legend box and space between lines
maxW= maxW* 1.1
maxH= maxH* 1.1 * len(txtList)
dc.SetFont(self._getFont(self._fontSizeAxis))
legendBoxWH= (maxW,maxH)
return (legendBoxWH, symExt, txtExt)
def _drawRubberBand(self, corner1, corner2):
"""Draws/erases rect box from corner1 to corner2"""
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(corner1, corner2)
# draw rectangle
dc = wx.ClientDC( self.canvas )
dc.BeginDrawing()
dc.SetPen(wx.Pen(wx.BLACK))
dc.SetBrush(wx.Brush( wx.WHITE, wx.TRANSPARENT ) )
dc.SetLogicalFunction(wx.INVERT)
dc.DrawRectangle( ptx,pty, rectWidth,rectHeight)
dc.SetLogicalFunction(wx.COPY)
dc.EndDrawing()
def _getFont(self,size):
"""Take font size, adjusts if printing and returns wx.Font"""
s = size*self.printerScale*self._fontScale
of = self.GetFont()
# Linux speed up to get font from cache rather than X font server
key = (int(s), of.GetFamily (), of.GetStyle (), of.GetWeight ())
font = self._fontCache.get (key, None)
if font:
return font # yeah! cache hit
else:
font = wx.Font(int(s), of.GetFamily(), of.GetStyle(), of.GetWeight())
self._fontCache[key] = font
return font
def _point2ClientCoord(self, corner1, corner2):
"""Converts user point coords to client screen int coords x,y,width,height"""
c1= _Numeric.array(corner1)
c2= _Numeric.array(corner2)
# convert to screen coords
pt1= c1*self._pointScale+self._pointShift
pt2= c2*self._pointScale+self._pointShift
# make height and width positive
pul= _Numeric.minimum(pt1,pt2) # Upper left corner
plr= _Numeric.maximum(pt1,pt2) # Lower right corner
rectWidth, rectHeight= plr-pul
ptx,pty= pul
return ptx, pty, rectWidth, rectHeight
def _axisInterval(self, spec, lower, upper):
"""Returns sensible axis range for given spec"""
if spec == 'none' or spec == 'min' or isinstance(spec, (float, int)):
if lower == upper:
return lower-0.5, upper+0.5
else:
return lower, upper
elif spec == 'auto':
range = upper-lower
if range == 0.:
return lower-0.5, upper+0.5
log = _Numeric.log10(range)
power = _Numeric.floor(log)
fraction = log-power
if fraction <= 0.05:
power = power-1
grid = 10.**power
lower = lower - lower % grid
mod = upper % grid
if mod != 0:
upper = upper - mod + grid
return lower, upper
elif type(spec) == type(()):
lower, upper = spec
if lower <= upper:
return lower, upper
else:
return upper, lower
else:
raise ValueError, str(spec) + ': illegal axis specification'
def _drawAxes(self, dc, p1, p2, scale, shift, xticks, yticks):
penWidth= self.printerScale * self._pointSize[0] # increases thickness for printing only
dc.SetPen(wx.Pen(self._gridColour, penWidth))
# set length of tick marks--long ones make grid
if self._gridEnabled:
x,y,width,height= self._point2ClientCoord(p1,p2)
if self._gridEnabled == 'Horizontal':
yTickLength= (width/2.0 +1) * self._pointSize[1]
xTickLength= 3 * self.printerScale * self._pointSize[0]
elif self._gridEnabled == 'Vertical':
yTickLength= 3 * self.printerScale * self._pointSize[1]
xTickLength= (height/2.0 +1) * self._pointSize[0]
else:
yTickLength= (width/2.0 +1) * self._pointSize[1]
xTickLength= (height/2.0 +1) * self._pointSize[0]
else:
yTickLength= 3 * self.printerScale * self._pointSize[1] # lengthens lines for printing
xTickLength= 3 * self.printerScale * self._pointSize[0]
if self._xSpec is not 'none':
lower, upper = p1[0],p2[0]
text = 1
for y, d in [(p1[1], -xTickLength), (p2[1], xTickLength)]: # miny, maxy and tick lengths
for x, label in xticks:
pt = scale*_Numeric.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0],pt[1] + d) # draws tick mark d units
if text:
dc.DrawText(label,pt[0],pt[1]+2*self._pointSize[1])
a1 = scale*_Numeric.array([lower, y])+shift
a2 = scale*_Numeric.array([upper, y])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1]) # draws upper and lower axis line
text = 0 # axis values not drawn on top side
if self._ySpec is not 'none':
lower, upper = p1[1],p2[1]
text = 1
h = dc.GetCharHeight()
for x, d in [(p1[0], -yTickLength), (p2[0], yTickLength)]:
for y, label in yticks:
pt = scale*_Numeric.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0]-d,pt[1])
if text:
dc.DrawText(label,pt[0]-dc.GetTextExtent(label)[0]-3*self._pointSize[0],
pt[1]-0.75*h)
a1 = scale*_Numeric.array([x, lower])+shift
a2 = scale*_Numeric.array([x, upper])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1])
text = 0 # axis values not drawn on right side
if self._centerLinesEnabled:
if self._centerLinesEnabled in ('Horizontal', True):
y1 = scale[1]*p1[1]+shift[1]
y2 = scale[1]*p2[1]+shift[1]
y = (y1 - y2) / 2.0 + y2
dc.DrawLine(scale[0] * p1[0] + shift[0], y, scale[0] * p2[0] + shift[0], y)
if self._centerLinesEnabled in ('Vertical', True):
x1 = scale[0]*p1[0]+shift[0]
x2 = scale[0]*p2[0]+shift[0]
x = (x1 - x2) / 2.0 + x2
dc.DrawLine(x, scale[1] * p1[1] + shift[1], x, scale[1] * p2[1] + shift[1])
if self._diagonalsEnabled:
if self._diagonalsEnabled in ('Bottomleft-Topright', True):
dc.DrawLine(scale[0] * p1[0] + shift[0], scale[1] * p1[1] + shift[1], scale[0] * p2[0] + shift[0], scale[1] * p2[1] + shift[1])
if self._diagonalsEnabled in ('Bottomright-Topleft', True):
dc.DrawLine(scale[0] * p1[0] + shift[0], scale[1] * p2[1] + shift[1], scale[0] * p2[0] + shift[0], scale[1] * p1[1] + shift[1])
def _xticks(self, *args):
if self._logscale[0]:
return self._logticks(*args)
else:
attr = {'numticks': self._xSpec}
return self._ticks(*args, **attr)
def _yticks(self, *args):
if self._logscale[1]:
return self._logticks(*args)
else:
attr = {'numticks': self._ySpec}
return self._ticks(*args, **attr)
def _logticks(self, lower, upper):
#lower,upper = map(_Numeric.log10,[lower,upper])
#print 'logticks',lower,upper
ticks = []
mag = _Numeric.power(10,_Numeric.floor(lower))
if upper-lower > 6:
t = _Numeric.power(10,_Numeric.ceil(lower))
base = _Numeric.power(10,_Numeric.floor((upper-lower)/6))
def inc(t):
return t*base-t
else:
t = _Numeric.ceil(_Numeric.power(10,lower)/mag)*mag
def inc(t):
return 10**int(_Numeric.floor(_Numeric.log10(t)+1e-16))
majortick = int(_Numeric.log10(mag))
while t <= pow(10,upper):
if majortick != int(_Numeric.floor(_Numeric.log10(t)+1e-16)):
majortick = int(_Numeric.floor(_Numeric.log10(t)+1e-16))
ticklabel = '1e%d'%majortick
else:
if upper-lower < 2:
minortick = int(t/pow(10,majortick)+.5)
ticklabel = '%de%d'%(minortick,majortick)
else:
ticklabel = ''
ticks.append((_Numeric.log10(t), ticklabel))
t += inc(t)
if len(ticks) == 0:
ticks = [(0,'')]
return ticks
def _ticks(self, lower, upper, numticks=None):
if isinstance(numticks, (float, int)):
ideal = (upper-lower)/float(numticks)
else:
ideal = (upper-lower)/7.
log = _Numeric.log10(ideal)
power = _Numeric.floor(log)
if isinstance(numticks, (float, int)):
grid = ideal
else:
fraction = log-power
factor = 1.
error = fraction
for f, lf in self._multiples:
e = _Numeric.fabs(fraction-lf)
if e < error:
error = e
factor = f
grid = factor * 10.**power
if self._useScientificNotation and (power > 4 or power < -4):
format = '%+7.1e'
elif power >= 0:
digits = max(1, int(power))
format = '%' + `digits`+'.0f'
else:
digits = -int(power)
format = '%'+`digits+2`+'.'+`digits`+'f'
ticks = []
t = -grid*_Numeric.floor(-lower/grid)
while t <= upper:
if t == -0:
t = 0
ticks.append( (t, format % (t,)) )
t = t + grid
return ticks
_multiples = [(2., _Numeric.log10(2.)), (5., _Numeric.log10(5.))]
def _adjustScrollbars(self):
if self._sb_ignore:
self._sb_ignore = False
return
if not self.GetShowScrollbars():
return
self._adjustingSB = True
needScrollbars = False
# horizontal scrollbar
r_current = self._getXCurrentRange()
r_max = list(self._getXMaxRange())
sbfullrange = float(self.sb_hor.GetRange())
r_max[0] = min(r_max[0],r_current[0])
r_max[1] = max(r_max[1],r_current[1])
self._sb_xfullrange = r_max
unit = (r_max[1]-r_max[0])/float(self.sb_hor.GetRange())
pos = int((r_current[0]-r_max[0])/unit)
if pos >= 0:
pagesize = int((r_current[1]-r_current[0])/unit)
self.sb_hor.SetScrollbar(pos, pagesize, sbfullrange, pagesize)
self._sb_xunit = unit
needScrollbars = needScrollbars or (pagesize != sbfullrange)
else:
self.sb_hor.SetScrollbar(0, 1000, 1000, 1000)
# vertical scrollbar
r_current = self._getYCurrentRange()
r_max = list(self._getYMaxRange())
sbfullrange = float(self.sb_vert.GetRange())
r_max[0] = min(r_max[0],r_current[0])
r_max[1] = max(r_max[1],r_current[1])
self._sb_yfullrange = r_max
unit = (r_max[1]-r_max[0])/sbfullrange
pos = int((r_current[0]-r_max[0])/unit)
if pos >= 0:
pagesize = int((r_current[1]-r_current[0])/unit)
pos = (sbfullrange-1-pos-pagesize)
self.sb_vert.SetScrollbar(pos, pagesize, sbfullrange, pagesize)
self._sb_yunit = unit
needScrollbars = needScrollbars or (pagesize != sbfullrange)
else:
self.sb_vert.SetScrollbar(0, 1000, 1000, 1000)
self.SetShowScrollbars(needScrollbars)
self._adjustingSB = False
#-------------------------------------------------------------------------------
# Used to layout the printer page
class PlotPrintout(wx.Printout):
"""Controls how the plot is made in printing and previewing"""
# Do not change method names in this class,
# we have to override wx.Printout methods here!
def __init__(self, graph):
"""graph is instance of plotCanvas to be printed or previewed"""
wx.Printout.__init__(self)
self.graph = graph
def HasPage(self, page):
if page == 1:
return True
else:
return False
def GetPageInfo(self):
return (1, 1, 1, 1) # disable page numbers
def OnPrintPage(self, page):
dc = self.GetDC() # allows using floats for certain functions
## print "PPI Printer",self.GetPPIPrinter()
## print "PPI Screen", self.GetPPIScreen()
## print "DC GetSize", dc.GetSize()
## print "GetPageSizePixels", self.GetPageSizePixels()
# Note PPIScreen does not give the correct number
# Calulate everything for printer and then scale for preview
PPIPrinter= self.GetPPIPrinter() # printer dots/inch (w,h)
#PPIScreen= self.GetPPIScreen() # screen dots/inch (w,h)
dcSize= dc.GetSize() # DC size
if self.graph._antiAliasingEnabled and not isinstance(dc, wx.GCDC):
try:
dc = wx.GCDC(dc)
except Exception, exception:
pass
else:
if self.graph._hiResEnabled:
dc.SetMapMode(wx.MM_TWIPS) # high precision - each logical unit is 1/20 of a point
pageSize= self.GetPageSizePixels() # page size in terms of pixcels
clientDcSize= self.graph.GetClientSize()
# find what the margins are (mm)
margLeftSize,margTopSize= self.graph.pageSetupData.GetMarginTopLeft()
margRightSize, margBottomSize= self.graph.pageSetupData.GetMarginBottomRight()
# calculate offset and scale for dc
pixLeft= margLeftSize*PPIPrinter[0]/25.4 # mm*(dots/in)/(mm/in)
pixRight= margRightSize*PPIPrinter[0]/25.4
pixTop= margTopSize*PPIPrinter[1]/25.4
pixBottom= margBottomSize*PPIPrinter[1]/25.4
plotAreaW= pageSize[0]-(pixLeft+pixRight)
plotAreaH= pageSize[1]-(pixTop+pixBottom)
# ratio offset and scale to screen size if preview
if self.IsPreview():
ratioW= float(dcSize[0])/pageSize[0]
ratioH= float(dcSize[1])/pageSize[1]
pixLeft *= ratioW
pixTop *= ratioH
plotAreaW *= ratioW
plotAreaH *= ratioH
# rescale plot to page or preview plot area
self.graph._setSize(plotAreaW,plotAreaH)
# Set offset and scale
dc.SetDeviceOrigin(pixLeft,pixTop)
# Thicken up pens and increase marker size for printing
ratioW= float(plotAreaW)/clientDcSize[0]
ratioH= float(plotAreaH)/clientDcSize[1]
aveScale= (ratioW+ratioH)/2
if self.graph._antiAliasingEnabled and not self.IsPreview():
scale = dc.GetUserScale()
dc.SetUserScale(scale[0] / self.graph._pointSize[0], scale[1] / self.graph._pointSize[1])
self.graph._setPrinterScale(aveScale) # tickens up pens for printing
self.graph._printDraw(dc)
# rescale back to original
self.graph._setSize()
self.graph._setPrinterScale(1)
self.graph.Redraw() #to get point label scale and shift correct
return True
#----------------------------------------------------------------------
from wx.lib.embeddedimage import PyEmbeddedImage
MagPlus = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAAOFJ"
"REFUeJy1VdEOxCAIo27//8XbuKfuPASGZ0Zisoi2FJABbZM3bY8c13lo5GvbjioBPAUEB0Yc"
"VZ0iGRRc56Ee8DcikEgrJD8EFpzRegQASiRtBtzuA0hrdRPYQxaEKyJPG6IHyiK3xnNZvUSS"
"NvUuzgYh0il4y14nCFPk5XgmNbRbQbVotGo9msj47G3UXJ7fuz8Q8FAGEu0/PbZh2D3NoshU"
"1VUydBGVZKMimlGeErdNGUmf/x7YpjMjcf8HVYvS2adr6aFVlCy/5Ijk9q8SeCR9isJR8SeJ"
"8pv7S0Wu2Acr0qdj3w7DRAAAAABJRU5ErkJggg==")
GrabHand = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAARFJ"
"REFUeJy1VdESgzAIS2j//4s3s5fRQ6Rad5M7H0oxCZhWSpK1TjwUBCBJAIBItL1fijlfe1yJ"
"8noCGC9KgrXO7f0SyZEDAF/H2opsAHv9V/548nplT5Jo7YAFQKQ1RMWzmHUS96suqdBrHkuV"
"uxpdJjCS8CfGXWdJ2glzcquKSR5c46QOtCpgNyIHj6oieAXg3282QvMX45hy8a8H0VonJZUO"
"clesjOPg/dhBTq64o1Kacz4Ri2x5RKsf8+wcWQaJJL+A+xRcZHeQeBKjK+5EFiVJ4xy4x2Mn"
"1Vk4U5/DWmfPieiqbye7a3tV/cCsWKu76K76KUFFchVnhigJ/hmktelm/m3e3b8k+Ec8PqLH"
"CT4JRfyK9o1xYwAAAABJRU5ErkJggg==")
Hand = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAARBJ"
"REFUeJytluECwiAIhDn1/Z942/UnjCGoq+6XNeWDC1xAqbKr6zyo61Ibds60J8GBT0yS3IEM"
"ABuIpJTa4IOLiAAQksuKyixLH1ShHgTgZl8KiALxOsODPoEMkgJ25Su6zoO3ZrjRnI96OLIq"
"k7dsqOCboDa4XV/nwQEQVeFtmMnvbSJja+oagKBUaLn9hzd7VipRa9ostIv0O1uhzzaqNJxk"
"hViwDVxqg51kksMg9r2rDDIFwHCap130FBhdMzeAfWg//6Ki5WWQrHSv6EIUeVs0g3wT3J7r"
"FmWQp/JJDXeRh2TXcJa91zAH2uN2mvXFsrIrsjS8rnftWmWfAiLIStuD9m9h9belvzgS/1fP"
"X7075IwDENteAAAAAElFTkSuQmCC")
#---------------------------------------------------------------------------
# if running standalone...
#
# ...a sample implementation using the above
#
def _draw1Objects():
# 100 points sin function, plotted as green circles
data1 = 2.*_Numeric.pi*_Numeric.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = _Numeric.sin(data1[:,0])
markers1 = PolyMarker(data1, legend='Green Markers', colour='green', marker='circle',size=1)
# 50 points cos function, plotted as red line
data1 = 2.*_Numeric.pi*_Numeric.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = _Numeric.cos(data1[:,0])
lines = PolySpline(data1, legend= 'Red Line', colour='red')
# A few more points...
pi = _Numeric.pi
markers2 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Legend', colour='blue',
marker='cross')
return PlotGraphics([markers1, lines, markers2],"Graph Title", "X Axis", "Y Axis")
def _draw2Objects():
# 100 points sin function, plotted as green dots
data1 = 2.*_Numeric.pi*_Numeric.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = _Numeric.sin(data1[:,0])
line1 = PolySpline(data1, legend='Green Line', colour='green', width=6, style=wx.DOT)
# 50 points cos function, plotted as red dot-dash
data1 = 2.*_Numeric.pi*_Numeric.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = _Numeric.cos(data1[:,0])
line2 = PolySpline(data1, legend='Red Line', colour='red', width=3, style= wx.DOT_DASH)
# A few more points...
pi = _Numeric.pi
markers1 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Hatch Square', colour='blue', width= 3, size= 6,
fillcolour= 'red', fillstyle= wx.CROSSDIAG_HATCH,
marker='square')
return PlotGraphics([markers1, line1, line2], "Big Markers with Different Line Styles")
def _draw3Objects():
markerList= ['circle', 'dot', 'square', 'triangle', 'triangle_down',
'cross', 'plus', 'circle']
m=[]
for i in range(len(markerList)):
m.append(PolyMarker([(2*i+.5,i+.5)], legend=markerList[i], colour='blue',
marker=markerList[i]))
return PlotGraphics(m, "Selection of Markers", "Minimal Axis", "No Axis")
def _draw4Objects():
# 25,000 point line
data1 = _Numeric.arange(5e5,1e6,10)
data1.shape = (25000, 2)
line1 = PolyLine(data1, legend='Wide Line', colour='green', width=5)
# A few more points...
markers2 = PolyMarker(data1, legend='Square', colour='blue',
marker='square')
return PlotGraphics([line1, markers2], "25,000 Points", "Value X", "")
def _draw5Objects():
# Empty graph with axis defined but no points/lines
points=[]
line1 = PolyLine(points, legend='Wide Line', colour='green', width=5)
return PlotGraphics([line1], "Empty Plot With Just Axes", "Value X", "Value Y")
def _draw6Objects():
# Bar graph
points1=[(1,0), (1,10)]
line1 = PolyLine(points1, colour='green', legend='Feb.', width=10)
points1g=[(2,0), (2,4)]
line1g = PolyLine(points1g, colour='red', legend='Mar.', width=10)
points1b=[(3,0), (3,6)]
line1b = PolyLine(points1b, colour='blue', legend='Apr.', width=10)
points2=[(4,0), (4,12)]
line2 = PolyLine(points2, colour='Yellow', legend='May', width=10)
points2g=[(5,0), (5,8)]
line2g = PolyLine(points2g, colour='orange', legend='June', width=10)
points2b=[(6,0), (6,4)]
line2b = PolyLine(points2b, colour='brown', legend='July', width=10)
return PlotGraphics([line1, line1g, line1b, line2, line2g, line2b],
"Bar Graph - (Turn on Grid, Legend)", "Months", "Number of Students")
def _draw7Objects():
# Empty graph with axis defined but no points/lines
x = _Numeric.arange(1,1000,1)
y1 = 4.5*x**2
y2 = 2.2*x**3
points1 = _Numeric.transpose([x,y1])
points2 = _Numeric.transpose([x,y2])
line1 = PolyLine(points1, legend='quadratic', colour='blue', width=1)
line2 = PolyLine(points2, legend='cubic', colour='red', width=1)
return PlotGraphics([line1,line2], "double log plot", "Value X", "Value Y")
class TestFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title,
wx.DefaultPosition, (600, 400))
# Now Create the menu bar and items
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
menu.Append(200, 'Page Setup...', 'Setup the printer page')
self.Bind(wx.EVT_MENU, self.OnFilePageSetup, id=200)
menu.Append(201, 'Print Preview...', 'Show the current plot on page')
self.Bind(wx.EVT_MENU, self.OnFilePrintPreview, id=201)
menu.Append(202, 'Print...', 'Print the current plot')
self.Bind(wx.EVT_MENU, self.OnFilePrint, id=202)
menu.Append(203, 'Save Plot...', 'Save current plot')
self.Bind(wx.EVT_MENU, self.OnSaveFile, id=203)
menu.Append(205, 'E&xit', 'Enough of this already!')
self.Bind(wx.EVT_MENU, self.OnFileExit, id=205)
self.mainmenu.Append(menu, '&File')
menu = wx.Menu()
menu.Append(206, 'Draw1', 'Draw plots1')
self.Bind(wx.EVT_MENU,self.OnPlotDraw1, id=206)
menu.Append(207, 'Draw2', 'Draw plots2')
self.Bind(wx.EVT_MENU,self.OnPlotDraw2, id=207)
menu.Append(208, 'Draw3', 'Draw plots3')
self.Bind(wx.EVT_MENU,self.OnPlotDraw3, id=208)
menu.Append(209, 'Draw4', 'Draw plots4')
self.Bind(wx.EVT_MENU,self.OnPlotDraw4, id=209)
menu.Append(210, 'Draw5', 'Draw plots5')
self.Bind(wx.EVT_MENU,self.OnPlotDraw5, id=210)
menu.Append(260, 'Draw6', 'Draw plots6')
self.Bind(wx.EVT_MENU,self.OnPlotDraw6, id=260)
menu.Append(261, 'Draw7', 'Draw plots7')
self.Bind(wx.EVT_MENU,self.OnPlotDraw7, id=261)
menu.Append(211, '&Redraw', 'Redraw plots')
self.Bind(wx.EVT_MENU,self.OnPlotRedraw, id=211)
menu.Append(212, '&Clear', 'Clear canvas')
self.Bind(wx.EVT_MENU,self.OnPlotClear, id=212)
menu.Append(213, '&Scale', 'Scale canvas')
self.Bind(wx.EVT_MENU,self.OnPlotScale, id=213)
menu.Append(214, 'Enable &Zoom', 'Enable Mouse Zoom', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableZoom, id=214)
menu.Append(215, 'Enable &Grid', 'Turn on Grid', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableGrid, id=215)
menu.Append(217, 'Enable &Drag', 'Activates dragging mode', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableDrag, id=217)
menu.Append(220, 'Enable &Legend', 'Turn on Legend', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableLegend, id=220)
menu.Append(222, 'Enable &Point Label', 'Show Closest Point', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnablePointLabel, id=222)
menu.Append(223, 'Enable &Anti-Aliasing', 'Smooth output', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableAntiAliasing, id=223)
menu.Append(224, 'Enable &High-Resolution AA', 'Draw in higher resolution', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableHiRes, id=224)
menu.Append(226, 'Enable Center Lines', 'Draw center lines', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableCenterLines, id=226)
menu.Append(227, 'Enable Diagonal Lines', 'Draw diagonal lines', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableDiagonals, id=227)
menu.Append(231, 'Set Gray Background', 'Change background colour to gray')
self.Bind(wx.EVT_MENU,self.OnBackgroundGray, id=231)
menu.Append(232, 'Set &White Background', 'Change background colour to white')
self.Bind(wx.EVT_MENU,self.OnBackgroundWhite, id=232)
menu.Append(233, 'Set Red Label Text', 'Change label text colour to red')
self.Bind(wx.EVT_MENU,self.OnForegroundRed, id=233)
menu.Append(234, 'Set &Black Label Text', 'Change label text colour to black')
self.Bind(wx.EVT_MENU,self.OnForegroundBlack, id=234)
menu.Append(225, 'Scroll Up 1', 'Move View Up 1 Unit')
self.Bind(wx.EVT_MENU,self.OnScrUp, id=225)
menu.Append(230, 'Scroll Rt 2', 'Move View Right 2 Units')
self.Bind(wx.EVT_MENU,self.OnScrRt, id=230)
menu.Append(235, '&Plot Reset', 'Reset to original plot')
self.Bind(wx.EVT_MENU,self.OnReset, id=235)
self.mainmenu.Append(menu, '&Plot')
menu = wx.Menu()
menu.Append(300, '&About', 'About this thing...')
self.Bind(wx.EVT_MENU, self.OnHelpAbout, id=300)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
# A status bar to tell people what's happening
self.CreateStatusBar(1)
self.client = PlotCanvas(self)
#define the function for drawing pointLabels
self.client.SetPointLabelFunc(self.DrawPointLabel)
# Create mouse event for showing cursor coords in status bar
self.client.canvas.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
# Show closest point when enabled
self.client.canvas.Bind(wx.EVT_MOTION, self.OnMotion)
self.Show(True)
def DrawPointLabel(self, dc, mDataDict):
"""This is the fuction that defines how the pointLabels are plotted
dc - DC that will be passed
mDataDict - Dictionary of data that you want to use for the pointLabel
As an example I have decided I want a box at the curve point
with some text information about the curve plotted below.
Any wxDC method can be used.
"""
# ----------
dc.SetPen(wx.Pen(wx.BLACK))
dc.SetBrush(wx.Brush( wx.BLACK, wx.SOLID ) )
sx, sy = mDataDict["scaledXY"] #scaled x,y of closest point
dc.DrawRectangle( sx-5,sy-5, 10, 10) #10by10 square centered on point
px,py = mDataDict["pointXY"]
cNum = mDataDict["curveNum"]
pntIn = mDataDict["pIndex"]
legend = mDataDict["legend"]
#make a string to display
s = "Crv# %i, '%s', Pt. (%.2f,%.2f), PtInd %i" %(cNum, legend, px, py, pntIn)
dc.DrawText(s, sx , sy+1)
# -----------
def OnMouseLeftDown(self,event):
s= "Left Mouse Down at Point: (%.4f, %.4f)" % self.client._getXY(event)
self.SetStatusText(s)
event.Skip() #allows plotCanvas OnMouseLeftDown to be called
def OnMotion(self, event):
#show closest point (when enbled)
if self.client.GetEnablePointLabel() == True:
#make up dict with info for the pointLabel
#I've decided to mark the closest point on the closest curve
dlst= self.client.GetClosestPoint( self.client._getXY(event), pointScaled= True)
if dlst != []: #returns [] if none
curveNum, legend, pIndex, pointXY, scaledXY, distance = dlst
#make up dictionary to pass to my user function (see DrawPointLabel)
mDataDict= {"curveNum":curveNum, "legend":legend, "pIndex":pIndex,\
"pointXY":pointXY, "scaledXY":scaledXY}
#pass dict to update the pointLabel
self.client.UpdatePointLabel(mDataDict)
event.Skip() #go to next handler
def OnFilePageSetup(self, event):
self.client.PageSetup()
def OnFilePrintPreview(self, event):
self.client.PrintPreview()
def OnFilePrint(self, event):
self.client.Printout()
def OnSaveFile(self, event):
self.client.SaveFile()
def OnFileExit(self, event):
self.Close()
def OnPlotDraw1(self, event):
self.resetDefaults()
self.client.Draw(_draw1Objects())
def OnPlotDraw2(self, event):
self.resetDefaults()
self.client.Draw(_draw2Objects())
def OnPlotDraw3(self, event):
self.resetDefaults()
self.client.SetFont(wx.Font(10,wx.SCRIPT,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(20)
self.client.SetFontSizeLegend(12)
self.client.SetXSpec('min')
self.client.SetYSpec('none')
self.client.Draw(_draw3Objects())
def OnPlotDraw4(self, event):
self.resetDefaults()
drawObj= _draw4Objects()
self.client.Draw(drawObj)
## # profile
## start = _time.clock()
## for x in range(10):
## self.client.Draw(drawObj)
## print "10 plots of Draw4 took: %f sec."%(_time.clock() - start)
## # profile end
def OnPlotDraw5(self, event):
# Empty plot with just axes
self.resetDefaults()
drawObj= _draw5Objects()
# make the axis X= (0,5), Y=(0,10)
# (default with None is X= (-1,1), Y= (-1,1))
self.client.Draw(drawObj, xAxis= (0,5), yAxis= (0,10))
def OnPlotDraw6(self, event):
#Bar Graph Example
self.resetDefaults()
#self.client.SetEnableLegend(True) #turn on Legend
#self.client.SetEnableGrid(True) #turn on Grid
self.client.SetXSpec('none') #turns off x-axis scale
self.client.SetYSpec('auto')
self.client.Draw(_draw6Objects(), xAxis= (0,7))
def OnPlotDraw7(self, event):
#log scale example
self.resetDefaults()
self.client.setLogScale((True,True))
self.client.Draw(_draw7Objects())
def OnPlotRedraw(self,event):
self.client.Redraw()
def OnPlotClear(self,event):
self.client.Clear()
def OnPlotScale(self, event):
if self.client.last_draw != None:
graphics, xAxis, yAxis= self.client.last_draw
self.client.Draw(graphics,(1,3.05),(0,1))
def OnEnableZoom(self, event):
self.client.SetEnableZoom(event.IsChecked())
self.mainmenu.Check(217, not event.IsChecked())
def OnEnableGrid(self, event):
self.client.SetEnableGrid(event.IsChecked())
def OnEnableDrag(self, event):
self.client.SetEnableDrag(event.IsChecked())
self.mainmenu.Check(214, not event.IsChecked())
def OnEnableLegend(self, event):
self.client.SetEnableLegend(event.IsChecked())
def OnEnablePointLabel(self, event):
self.client.SetEnablePointLabel(event.IsChecked())
def OnEnableAntiAliasing(self, event):
self.client.SetEnableAntiAliasing(event.IsChecked())
def OnEnableHiRes(self, event):
self.client.SetEnableHiRes(event.IsChecked())
def OnEnableCenterLines(self, event):
self.client.SetEnableCenterLines(event.IsChecked())
def OnEnableDiagonals(self, event):
self.client.SetEnableDiagonals(event.IsChecked())
def OnBackgroundGray(self, event):
self.client.SetBackgroundColour("#CCCCCC")
self.client.Redraw()
def OnBackgroundWhite(self, event):
self.client.SetBackgroundColour("white")
self.client.Redraw()
def OnForegroundRed(self, event):
self.client.SetForegroundColour("red")
self.client.Redraw()
def OnForegroundBlack(self, event):
self.client.SetForegroundColour("black")
self.client.Redraw()
def OnScrUp(self, event):
self.client.ScrollUp(1)
def OnScrRt(self,event):
self.client.ScrollRight(2)
def OnReset(self,event):
self.client.Reset()
def OnHelpAbout(self, event):
from wx.lib.dialogs import ScrolledMessageDialog
about = ScrolledMessageDialog(self, __doc__, "About...")
about.ShowModal()
def resetDefaults(self):
"""Just to reset the fonts back to the PlotCanvas defaults"""
self.client.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(10)
self.client.SetFontSizeLegend(7)
self.client.setLogScale((False,False))
self.client.SetXSpec('auto')
self.client.SetYSpec('auto')
def __test():
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = TestFrame(None, -1, "PlotCanvas")
#frame.Show(True)
self.SetTopWindow(frame)
return True
app = MyApp(0)
app.MainLoop()
if __name__ == '__main__':
__test()
| mit | 3,829,642,802,606,390,000 | 38.771865 | 143 | 0.566765 | false |
supriyantomaftuh/python_api | python_api/marklogic/connection.py | 1 | 7675 | #
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Paul Hoehne 03/01/2015 Initial development
#
import json, logging, requests, time
from http.client import BadStatusLine
from marklogic.exceptions import UnexpectedManagementAPIResponse
from marklogic.exceptions import UnauthorizedAPIRequest
from requests.auth import HTTPDigestAuth
from requests.exceptions import ConnectionError
from requests.packages.urllib3.exceptions import ProtocolError
"""
Connection related classes and method to connect to MarkLogic.
"""
class Connection:
"""
The connection class encapsulates the information to connect to
a MarkLogic server.
"""
def __init__(self, host, auth,
protocol="http", port=8000, management_port=8002,
root="manage", version="v2"):
self.host = host
self.auth = auth
self.protocol = protocol
self.port = port
self.management_port = management_port
self.root = root
self.version = version
self.logger = logging.getLogger("marklogic")
# You'd expect parameters to be a dictionary, but then it couldn't
# have repeated keys, so it's an array.
def uri(self, relation, name=None,
protocol=None, host=None, port=None, root=None, version=None,
properties="/properties", parameters=None):
if protocol is None:
protocol = self.protocol
if host is None:
host = self.host
if port is None:
port = self.management_port
if root is None:
root = self.root
if version is None:
version = self.version
if name is None:
name = ""
else:
name = "/" + name
if properties is not None:
name = name + properties
uri = "{0}://{1}:{2}/{3}/{4}/{5}{6}" \
.format(protocol, host, port, root, version, relation, name)
if parameters is not None:
uri = uri + "?" + "&".join(parameters)
return uri
def head(self, uri, accept="application/json"):
self.logger.debug("Getting {0}...".format(uri))
self.response = requests.head(uri, auth=self.auth)
return self._response()
def get(self, uri, accept="application/json"):
headers = {'accept': accept}
self.logger.debug("Getting {0}...".format(uri))
self.response = requests.get(uri, auth=self.auth, headers=headers)
return self._response()
def post(self, uri, payload=None, etag=None,
content_type="application/json", accept="application/json"):
headers = {'content-type': content_type,
'accept': accept}
if etag is not None:
headers['if-match'] = etag
self.logger.debug("Posting to {0}...".format(uri))
if payload is None:
self.response = requests.post(uri, auth=self.auth, headers=headers)
else:
if content_type == "application/json":
self.response = requests.post(uri, json=payload,
auth=self.auth, headers=headers)
else:
self.response = requests.post(uri, data=payload,
auth=self.auth, headers=headers)
return self._response()
def put(self, uri, payload=None, etag=None,
content_type="application/json", accept="application/json"):
headers = {'content-type': content_type,
'accept': accept}
if etag is not None:
headers['if-match'] = etag
self.logger.debug("Putting to {0}...".format(uri))
if payload is None:
self.response = requests.put(uri, auth=self.auth, headers=headers)
else:
self.response = requests.put(uri, json=payload,
auth=self.auth, headers=headers)
return self._response()
def delete(self, uri, payload=None, etag=None,
content_type="application/json", accept="application/json"):
headers = {'content-type': content_type,
'accept': accept}
if etag is not None:
headers['if-match'] = etag
self.logger.debug("Deleting {0}...".format(uri))
if payload is None:
self.response = requests.delete(uri, auth=self.auth, headers=headers)
else:
self.response = requests.delete(uri, json=payload,
auth=self.auth, headers=headers)
return self._response()
def _response(self):
response = self.response
if response.status_code < 300:
pass
elif response.status_code == 404:
pass
elif response.status_code == 401:
raise UnauthorizedAPIRequest(response.text)
else:
raise UnexpectedManagementAPIResponse(response.text)
if response.status_code == 202:
data = json.loads(response.text)
# restart isn't in data, for example, if you execute a shutdown
if "restart" in data:
self.wait_for_restart(data["restart"]["last-startup"][0]["value"])
return response
def wait_for_restart(self, last_startup, timestamp_uri="/admin/v1/timestamp"):
"""
Wait for the host to restart.
:param last_startup: The last startup time reported in the restart message
"""
uri = "{0}://{1}:8001{2}".format(self.protocol, self.host,
timestamp_uri)
done = False
count = 24
while not done:
try:
self.logger.debug("Waiting for restart of {0}".format(self.host))
response = requests.get(uri, auth=self.auth,
headers={'accept': 'application/json'})
done = response.status_code == 200 and response.text != last_startup
except TypeError:
self.logger.debug("{0}: {1}".format(response.status_code,
response.text))
pass
except BadStatusLine:
self.logger.debug("{0}: {1}".format(response.status_code,
response.text))
pass
except ProtocolError:
self.logger.debug("{0}: {1}".format(response.status_code,
response.text))
pass
except ConnectionError:
self.logger.debug("Connection error...")
pass
time.sleep(4) # Sleep one more time even after success...
count -= 1
if count <= 0:
raise UnexpectedManagementAPIResponse("Restart hung?")
self.logger.debug("{0} restarted".format(self.host))
@classmethod
def make_connection(cls, host, username, password):
return Connection(host, HTTPDigestAuth(username, password))
| apache-2.0 | 8,772,053,235,185,808,000 | 35.374408 | 84 | 0.569251 | false |
Juanvulcano/zulip | zerver/tests/test_realm_filters.py | 3 | 3104 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from zerver.lib.actions import get_realm_by_string_id, do_add_realm_filter
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import RealmFilter
import ujson
class RealmFilterTest(ZulipTestCase):
def test_list(self):
# type: () -> None
self.login("[email protected]")
realm = get_realm_by_string_id('zulip')
do_add_realm_filter(
realm,
"#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s")
result = self.client_get("/json/realm/filters")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
content = ujson.loads(result.content)
self.assertEqual(len(content["filters"]), 1)
def test_create(self):
# type: () -> None
self.login("[email protected]")
data = {"pattern": "", "url_format_string": "https://realm.com/my_realm_filter/%(id)s"}
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'This field cannot be blank.')
data['pattern'] = '$a'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)')
data['pattern'] = 'ZUL-(?P<id>\d++)'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)')
data['pattern'] = 'ZUL-(?P<id>\d+)'
data['url_format_string'] = '$fgfg'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'URL format string must be in the following format: `https://example.com/%(\\w+)s`')
data['url_format_string'] = 'https://realm.com/my_realm_filter/%(id)s'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_success(result)
def test_not_realm_admin(self):
# type: () -> None
self.login("[email protected]")
result = self.client_post("/json/realm/filters")
self.assert_json_error(result, 'Must be a realm administrator')
result = self.client_delete("/json/realm/filters/15")
self.assert_json_error(result, 'Must be a realm administrator')
def test_delete(self):
# type: () -> None
self.login("[email protected]")
realm = get_realm_by_string_id('zulip')
filter_id = do_add_realm_filter(
realm,
"#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s")
filters_count = RealmFilter.objects.count()
result = self.client_delete("/json/realm/filters/{0}".format(filter_id + 1))
self.assert_json_error(result, 'Filter not found')
result = self.client_delete("/json/realm/filters/{0}".format(filter_id))
self.assert_json_success(result)
self.assertEqual(RealmFilter.objects.count(), filters_count - 1)
| apache-2.0 | 8,295,215,289,874,825,000 | 42.111111 | 125 | 0.615979 | false |
scampion/pimpy | pimpy/video/__init__.py | 1 | 5165 | """
pimpy.video : video objects class
.. module:: video
:synopsis: Tools for video
:platform: Unix, Mac, Windows
.. moduleauthor:: Sebastien Campion <[email protected]>
"""
# pimpy
# Copyright (C) 2010 Sebastien Campion <[email protected]>
#
# pimpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pimpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pimpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#import shotdetector as sd
import logging
import tempfile
import numpy
from urlparse import urlparse
from pimpy.video.factory import FeatureFactory
from pimpy.video.goodshotdetector import ShotDetector
from pimpy.video.frameextractor import FrameExtractor
from pimpy.discover import GstFile
import pimpy.hdf5
class Video:
log = logging.getLogger('pimpy.video')
framenumber = 0
signature = None
histogram = None
cuts = None
def __init__(self,uri,begin=0,end=None,hdf5file=None):
"""
Video object
:param uri: input uri like file:///home/...
:type: string
:param begin: in second
:type: float
:param end: in second
:type: float
"""
self.uri = uri
self.begin = begin
self.end = end
self.log.debug("Video object created : %s" % self.uri)
if hdf5file == None :
hdf5file = tempfile.NamedTemporaryFile(suffix='.hdf5').name
#elif os.path.exists(hdf5file):
# self.log.debug("Load data from hdf5file %s "% hdf5file)
# self.load(hdf5file)
mp = uri.replace("file://","")
self.hdf5 = pimpy.hdf5.File(hdf5file,mediafile_path=mp)
self.log.debug("hdf5 file %s" % hdf5file)
if begin is not None :
self.hdf5.attrs['begin'] = begin
if end is not None :
self.hdf5.attrs['end'] = end
#discovering metadata
mp = urlparse(uri).path
GstFile([mp],self.hdf5.attrs).run()
def list_features(self):
u"""
Return list of available features
:rtype: list of string
"""
return FeatureFactory.list()
def get_feature(self,name,**kwargs):
u"""
compute feature
:param name: feature name
:type string:
:param kwargs : optional pyofile
(string) to produce pynocchio file (hdf5 file format
"""
f = FeatureFactory.get_feature(name,**kwargs)
return f.get(self)
def getcuts(self):
u"""
video cut detection using GoodShotDetector class.
:param smoothbyhisto: if true, we remove cut if histogram difference isn't sufficient
:type: boolean
:rtype: list of frame number where cut was detected
"""
if 'goodshotdetection' not in self.hdf5['visual'].keys():
sd = ShotDetector()
cuts, diss = sd.process(self.uri)
cuts.sort()
diss.sort()
gsd = self.hdf5['visual'].create_group('goodshotdetection')
gsd.create_dataset('cuts', data=cuts)
gsd.create_dataset('dissolves', data=diss)
gsd = self.hdf5['visual']['goodshotdetection']
return map(numpy.array, (gsd['cuts'],gsd['dissolves']))
def getkeyframesid(self,cuts=None):
u"""
key frame are defined as the middle frame in a cut segment
:rtype: list of key frame number
"""
if cuts == None :
cuts,diss = self.getcuts()
#middle keyframe : start_frame + duration / 2
keys = []
prev_cut= cuts[0]
for i in cuts[1:] :
key_frame = prev_cut+(i-prev_cut)/2
if key_frame not in keys :
keys.append(key_frame)
prev_cut = i
return keys
def getkeyframes(self):
u"""
Useful to debug or save frame
:rtype: list of key frame object
"""
fe = FrameExtractor(self)
return fe.getframes(self.getkeyframesid())
# Frame Class
from pimpy.image import Image
class Frame(Image):
u"""
This class describes an video frame based on Image class
:class attributes:
* .. attribute:: video
The video instance (pimpy.video.video)where this frame come from
* .. attribute:: framenumber
The raw data of the image
* .. attribute:: width
The width of the image
* .. attribute:: height
The width of the image
"""
video = None
framenumber = -1
start = -1
duration = -1
| agpl-3.0 | 6,681,638,386,241,560,000 | 27.535912 | 94 | 0.598064 | false |
dmohankudo/APIFuzzing | FuzzAPI_git.py | 1 | 4405 | # -*- coding: utf-8 -*-
'''
Set Up:
::python 3(preferred) flavor of (miniconda) installed
Usage:
::mal_file -> a file name containing malicious strings: 'xss.txt'
OR
-> a list of values haing malicious strings : ["alert}>'><script>alert(<fin2000>)</script>", "<script>alert(<fin2000>)</script>", ...]
Each malicious string is set for all the keys in the json structure and posted
Dependency: UtilsLibFuzzing.py in the same directory as that of script
'''
import concurrent.futures as cf
import json
import logging
import requests
import time
from UtilsLibFuzzing import Utils, detection, logParsing
from requests.exceptions import ConnectionError
requests.packages.urllib3.disable_warnings() # supress https warnings
######################################################################
######################################################################
u = Utils() # create instance
dt = detection() # create instance
lg = logParsing()
master_log_file = 'log.txt'
logging.basicConfig(filename=master_log_file, filemode='w+', level=logging.INFO)
#######################################################################
def post_request(url, postdata):
'''this method is passed into the async function
e.g.
executor.submit(post_request, #: function that makes the request
target_url, #: End point url ( arguments to teh function) post_request
postdata)) #: postdata ( argument to the function post_request
if the arguments of this function changes that need to reflected.
'''
try:
print(time.ctime())
with requests.session() as s:
resp = s.post(url=api_url_enroll,
json=postdata,
verify=False)
return resp
except ConnectionError:
print('-' * 20)
print('----Connection Issues---')
print('-' * 20)
return None
def process_resp(resp):
print(resp)
result = ''
if resp:
try:
req = json.dumps(json.loads(resp.request.body), ensure_ascii=False)
except:
req = resp.request.body
code = resp.status_code
log_prefix = dt.detect_in_response(resp, http_status_codes=[200, 201], result_prefix='PASS')
else:
result, resp, code, req = 'UNKNOWN', 'NA', 'NA', 'NA'
log_prefix = 'network_issue-UNKNOWN'
logging.info(time.ctime())
logging.info('{l}-result:-resp:{resp}-request body:{req}-status:{code}'.format(#result=result,
resp=resp,
req=req,
code=code,
l=log_prefix))
def execute_async(no_of_parallel_req,
target_url,
mal_source,
orig_json):
'''this can fire multiple simultaneous requests using async '''
set_all_requests = set()
with cf.ThreadPoolExecutor(max_workers=no_of_parallel_req) as executor:
for postdata, val, key in u.postdata_generator_with_insecure_values_ee(orig_json, mal_source):
set_all_requests.add(executor.submit(post_request, #: function that makes the request
target_url, #: End point url
postdata)) #: post data with malicious data
for future in set_all_requests:
resp = future.result()
process_resp(resp)
def process_log(log_file):
lg.parse(log_file)
logging.info('log parsing finished')
logging.disable = True
## execution starts here ############
if __name__ == '__main__':
print('hi')
a = {1:'hi'}
api_url_enroll = 'https://digiXXX.com'
mal_file = 'all-attacks-unix.txt'
execute_async(no_of_parallel_req=6,
target_url=api_url_enroll,
mal_source=mal_file,
orig_json=a)
process_log(master_log_file)
print('bye')
| apache-2.0 | -7,637,848,054,684,910,000 | 35.649573 | 149 | 0.503065 | false |
dsemi/Flexget | flexget/plugins/metainfo/trakt_lookup.py | 1 | 16011 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import, print_function
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import functools
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
try:
from flexget.plugins.internal.api_trakt import ApiTrakt, list_actors, get_translations_dict
lookup_series = ApiTrakt.lookup_series
lookup_movie = ApiTrakt.lookup_movie
except ImportError:
raise plugin.DependencyError(issued_by='trakt_lookup', missing='api_trakt',
message='trakt_lookup requires the `api_trakt` plugin')
log = logging.getLogger('trakt_lookup')
class PluginTraktLookup(object):
"""Retrieves trakt information for entries. Uses series_name,
series_season, series_episode from series plugin.
Example:
trakt_lookup: yes
Primarily used for passing trakt information to other plugins.
Among these is the IMDB url for the series.
This information is provided (via entry):
series info:
trakt_series_name
trakt_series_runtime
trakt_series_first_aired_epoch
trakt_series_first_aired_iso
trakt_series_air_time
trakt_series_content_rating
trakt_series_genres
trakt_series_banner_url
trakt_series_fanart_url
trakt_series_imdb_url
trakt_series_trakt_url
trakt_series_images
imdb_id
tvdb_id
trakt_series_actors
trakt_series_country
trakt_series_year
trakt_series_tvrage_id
trakt_series_status
trakt_series_overview
trakt_ep_name
trakt_ep_season
trakt_ep_number
trakt_ep_overview
trakt_ep_first_aired_epoch
trakt_ep_first_aired_iso
trakt_ep_image_url
trakt_ep_id
trakt_ep_tvdb_id
trakt_ep_images
"""
# Series info
series_map = {
'trakt_series_name': 'title',
'trakt_series_year': 'year',
'imdb_id': 'imdb_id',
'tvdb_id': 'tvdb_id',
'tmdb_id': 'tmdb_id',
'trakt_show_id': 'id',
'trakt_slug': 'slug',
'tvrage_id': 'tvrage_id',
'trakt_trailer': 'trailer',
'trakt_homepage': 'homepage',
'trakt_series_runtime': 'runtime',
'trakt_series_first_aired': 'first_aired',
'trakt_series_air_time': 'air_time',
'trakt_series_air_day': 'air_day',
'trakt_series_content_rating': 'certification',
'trakt_genres': lambda i: [db_genre.name for db_genre in i.genres],
'trakt_series_network': 'network',
'imdb_url': lambda series: series.imdb_id and 'http://www.imdb.com/title/%s' % series.imdb_id,
'trakt_series_url': lambda series: series.slug and 'http://trakt.tv/shows/%s' % series.slug,
'trakt_series_country': 'country',
'trakt_series_status': 'status',
'trakt_series_overview': 'overview',
'trakt_series_rating': 'rating',
'trakt_series_votes': 'votes',
'trakt_series_language': 'language',
'trakt_series_aired_episodes': 'aired_episodes',
'trakt_series_episodes': lambda show: [episodes.title for episodes in show.episodes],
'trakt_series_poster_full': 'image_poster_full',
'trakt_series_poster_medium': 'image_poster_medium',
'trakt_series_poster_thumb': 'image_poster_thumb',
'trakt_series_thumb': 'image_thumb_full',
'trakt_languages': 'translation_languages',
}
series_actor_map = {
'trakt_actors': lambda show: list_actors(show.actors),
}
show_translate_map = {
'trakt_translations': lambda show: get_translations_dict(show.translations, 'show'),
}
# Episode info
episode_map = {
'trakt_ep_name': 'title',
'trakt_ep_imdb_id': 'imdb_id',
'trakt_ep_tvdb_id': 'tvdb_id',
'trakt_ep_tmdb_id': 'tmdb_id',
'trakt_ep_tvrage': 'tvrage_id',
'trakt_episode_id': 'id',
'trakt_ep_first_aired': 'first_aired',
'trakt_ep_overview': 'overview',
'trakt_ep_abs_number': 'number_abs',
'trakt_season': 'season',
'trakt_episode': 'number',
'trakt_ep_id': lambda ep: 'S%02dE%02d' % (ep.season, ep.number),
'trakt_ep_screenshot_full': 'image_screenshot_full',
'trakt_ep_screenshot_medium': 'image_screenshot_medium',
'trakt_ep_screenshot_thumb': 'image_screenshot_thumb',
}
# Movie info
movie_map = {
'movie_name': 'title',
'movie_year': 'year',
'trakt_movie_name': 'title',
'trakt_movie_year': 'year',
'trakt_movie_id': 'id',
'trakt_slug': 'slug',
'imdb_id': 'imdb_id',
'tmdb_id': 'tmdb_id',
'trakt_tagline': 'tagline',
'trakt_overview': 'overview',
'trakt_released': 'released',
'trakt_runtime': 'runtime',
'trakt_rating': 'rating',
'trakt_votes': 'votes',
'trakt_homepage': 'homepage',
'trakt_trailer': 'trailer',
'trakt_language': 'language',
'trakt_genres': lambda i: [db_genre.name for db_genre in i.genres],
'trakt_languages': 'translation_languages',
'trakt_fanart_full': 'image_fanart_full',
'trakt_fanart_medium': 'image_fanart_medium',
'trakt_fanart_thumb': 'image_fanart_thumb',
'trakt_poster_full': 'image_poster_full',
'trakt_poster_medium': 'image_poster_medium',
'trakt_poster_thumb': 'image_poster_thumb',
'trakt_logo': 'image_logo_full',
'trakt_clearart': 'image_clearart_full',
'trakt_banner': 'image_banner_full',
'trakt_thumb': 'image_thumb_full'
}
movie_translate_map = {
'trakt_translations': lambda movie: get_translations_dict(movie.translations, 'movie'),
}
movie_actor_map = {
'trakt_actors': lambda movie: list_actors(movie.actors),
}
schema = {'oneOf': [
{
'type': 'object',
'properties': {
'account': {'type': 'string'},
'username': {'type': 'string'},
},
'anyOf': [{'required': ['username']}, {'required': ['account']}],
'error_anyOf': 'At least one of `username` or `account` options are needed.',
'additionalProperties': False
},
{
'type': 'boolean'
}
]}
def lazy_series_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'title': entry.get('series_name', eval_lazy=False),
'year': entry.get('year', eval_lazy=False),
'trakt_id': entry.get('trakt_show_id', eval_lazy=False),
'tvdb_id': entry.get('tvdb_id', eval_lazy=False),
'tmdb_id': entry.get('tmdb_id', eval_lazy=False),
'session': session}
try:
series = lookup_series(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.series_map, series)
return entry
def lazy_series_actor_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'trakt_id': entry.get('trakt_show_id', eval_lazy=False),
'title': entry.get('series_name', eval_lazy=False),
'session': session}
try:
series = lookup_series(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.series_actor_map, series)
return entry
def lazy_series_translate_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'trakt_id': entry.get('trakt_movie_id', eval_lazy=False),
'title': entry.get('series_name', eval_lazy=False),
'session': session}
try:
series = lookup_series(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.show_translate_map, series)
return entry
def lazy_episode_lookup(self, entry):
with Session(expire_on_commit=False) as session:
lookupargs = {'title': entry.get('series_name', eval_lazy=False),
'trakt_id': entry.get('trakt_show_id', eval_lazy=False),
'session': session}
try:
series = lookup_series(**lookupargs)
episode = series.get_episode(entry['series_season'], entry['series_episode'], session)
except LookupError as e:
log.debug('Error looking up trakt episode information for %s: %s', entry['title'], e.args[0])
else:
entry.update_using_map(self.episode_map, episode)
return entry
def lazy_movie_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'title': entry.get('title', eval_lazy=False),
'year': entry.get('year', eval_lazy=False),
'trakt_id': entry.get('trakt_movie_id', eval_lazy=False),
'trakt_slug': entry.get('trakt_movie_slug', eval_lazy=False),
'tmdb_id': entry.get('tmdb_id', eval_lazy=False),
'imdb_id': entry.get('imdb_id', eval_lazy=False),
'session': session}
try:
movie = lookup_movie(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.movie_map, movie)
return entry
def lazy_movie_actor_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'trakt_id': entry.get('trakt_movie_id', eval_lazy=False),
'title': entry.get('title', eval_lazy=False),
'session': session}
try:
movie = lookup_movie(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.movie_actor_map, movie)
return entry
def lazy_movie_translate_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session() as session:
lookupargs = {'trakt_id': entry.get('trakt_movie_id', eval_lazy=False),
'title': entry.get('movie_name', eval_lazy=False),
'session': session}
try:
movie = lookup_movie(**lookupargs)
except LookupError as e:
log.debug(e.args[0])
else:
entry.update_using_map(self.movie_translate_map, movie)
return entry
def lazy_collected_lookup(self, config, style, entry):
"""Does the lookup for this entry and populates the entry fields."""
if style == 'show' or style == 'episode':
lookup = lookup_series
trakt_id = entry.get('trakt_show_id', eval_lazy=True)
else:
lookup = lookup_movie
trakt_id = entry.get('trakt_movie_id', eval_lazy=True)
with Session() as session:
lookupargs = {'trakt_id': trakt_id,
'session': session}
try:
item = lookup(**lookupargs)
if style == 'episode':
item = item.get_episode(entry['series_season'], entry['series_episode'], session)
collected = ApiTrakt.collected(style, item, entry.get('title'), username=config.get('username'),
account=config.get('account'))
except LookupError as e:
log.debug(e.args[0])
else:
entry['trakt_collected'] = collected
return entry
def lazy_watched_lookup(self, config, style, entry):
"""Does the lookup for this entry and populates the entry fields."""
if style == 'show' or style == 'episode':
lookup = lookup_series
trakt_id = entry.get('trakt_show_id', eval_lazy=True)
else:
lookup = lookup_movie
trakt_id = entry.get('trakt_movie_id', eval_lazy=True)
with Session() as session:
lookupargs = {'trakt_id': trakt_id,
'session': session}
try:
item = lookup(**lookupargs)
if style == 'episode':
item = item.get_episode(entry['series_season'], entry['series_episode'], session)
watched = ApiTrakt.watched(style, item, entry.get('title'), username=config.get('username'),
account=config.get('account'))
except LookupError as e:
log.debug(e.args[0])
else:
entry['trakt_watched'] = watched
return entry
# Run after series and metainfo series
@plugin.priority(110)
def on_task_metainfo(self, task, config):
if not config:
return
if isinstance(config, bool):
config = dict()
for entry in task.entries:
if entry.get('series_name') or entry.get('tvdb_id', eval_lazy=False):
style = 'show'
entry.register_lazy_func(self.lazy_series_lookup, self.series_map)
# TODO cleaner way to do this?
entry.register_lazy_func(self.lazy_series_actor_lookup, self.series_actor_map)
entry.register_lazy_func(self.lazy_series_translate_lookup, self.show_translate_map)
if 'series_season' in entry and 'series_episode' in entry:
entry.register_lazy_func(self.lazy_episode_lookup, self.episode_map)
style = 'episode'
if config.get('username') or config.get('account'):
collected_lookup = functools.partial(self.lazy_collected_lookup, config, style)
watched_lookup = functools.partial(self.lazy_watched_lookup, config, style)
entry.register_lazy_func(collected_lookup, ['trakt_collected'])
entry.register_lazy_func(watched_lookup, ['trakt_watched'])
else:
entry.register_lazy_func(self.lazy_movie_lookup, self.movie_map)
# TODO cleaner way to do this?
entry.register_lazy_func(self.lazy_movie_actor_lookup, self.movie_actor_map)
entry.register_lazy_func(self.lazy_movie_translate_lookup, self.movie_translate_map)
if config.get('username') or config.get('account'):
collected_lookup = functools.partial(self.lazy_collected_lookup, config, 'movie')
watched_lookup = functools.partial(self.lazy_watched_lookup, config, 'movie')
entry.register_lazy_func(collected_lookup, ['trakt_collected'])
entry.register_lazy_func(watched_lookup, ['trakt_watched'])
@property
def series_identifier(self):
"""Returns the plugin main identifier type"""
return 'trakt_show_id'
@property
def movie_identifier(self):
"""Returns the plugin main identifier type"""
return 'trakt_movie_id'
@event('plugin.register')
def register_plugin():
plugin.register(PluginTraktLookup, 'trakt_lookup', api_ver=3, groups=['series_metainfo', 'movie_metainfo'])
| mit | -4,050,092,070,684,412,000 | 39.948849 | 112 | 0.564112 | false |
tonyseek/rsocks | rsocks/server.py | 1 | 3849 | from __future__ import unicode_literals
from .green import socket, socks, listen, serve, wrap_ssl, GreenPool
from .utils import parse_proxy_uri, printable_uri, get_logger
__all__ = ['ReverseProxyServer']
class Server(object):
"""The template class for writting custom server."""
def __init__(self, concurrency=1000):
self.logger = get_logger().getChild('servers')
self.server = None
self.concurrency = concurrency
def listen(self, address):
"""Listens to a host and port.
:param address: The ``('127.0.0.1', 2222)`` liked tuple.
"""
self.server = listen(address)
self.logger.info('Listening %s:%d' % address)
def loop(self):
"""Runs the server loop.
To stop the running server, you can call ``sys.exit()`` in
:meth:`.handle` or press `CTRL - C`.
"""
if self.server is None:
raise RuntimeError('Server should listen to a address')
self.logger.info('Starting server...')
try:
serve(self.server, self.handle_incoming, self.concurrency)
except (SystemExit, KeyboardInterrupt):
self.logger.info('Stoping server...')
def handle_incoming(self, client_sock, client_addr):
raise NotImplementedError
class ReverseProxyServer(Server):
"""The reverse proxy server which has SOCKS 4/5 support.
:param upstream: The address (2-tuple) of upstream address.
"""
def __init__(self, upstream, use_ssl=False, chunk_size=32384):
super(ReverseProxyServer, self).__init__()
self.upstream = upstream
self.use_ssl = use_ssl
self.chunk_size = chunk_size
self.proxy_server = None
self.proxy_timeout = 15.0
def set_proxy(self, uri, timeout=None):
if timeout:
self.proxy_timeout = timeout
self.proxy_server = parse_proxy_uri(uri)
self.logger.info('Using proxy server %s' % printable_uri(uri))
def handle_incoming(self, client_sock, client_addr):
self.logger.info('Connection from %s:%d' % client_addr)
try:
upstream_sock = self._connect_to_upstream()
except (socket.error, socks.ProxyError) as e:
self.logger.exception(e)
return
pool = GreenPool()
pool.spawn_n(self._forward, client_sock, upstream_sock, 'w')
pool.spawn_n(self._forward, upstream_sock, client_sock, 'r')
pool.waitall()
drop_socket(upstream_sock)
drop_socket(client_sock)
def _connect_to_upstream(self):
if self.proxy_server:
upstream_sock = socks.socksocket()
upstream_sock.set_proxy(**self.proxy_server)
else:
upstream_sock = socket.socket()
try:
upstream_sock.settimeout(self.proxy_timeout)
upstream_sock.connect(self.upstream)
if self.use_ssl:
upstream_sock = wrap_ssl(upstream_sock)
except:
drop_socket(upstream_sock)
raise
self.logger.info(
'Connected to upstream %s:%d' % self.upstream)
return upstream_sock
def _forward(self, src, dst, direction):
assert direction in ('w', 'r')
while True:
try:
data = src.recv(self.chunk_size)
except socket.error as e:
if e.args and e.args[0] == 'timed out':
self.logger.debug('%s TIMEOUT' % direction)
return
if not data:
self.logger.debug('%s EOF' % direction)
return
self.logger.debug('%s %r bytes' % (direction, len(data)))
dst.sendall(data)
def drop_socket(sock):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
sock.close()
| mit | -2,348,855,042,301,571,000 | 30.040323 | 70 | 0.58171 | false |
arista-eosplus/ansible | lib/ansible/playbook/play.py | 7 | 12269 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, Become):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_name = FieldAttribute(isa='string', default='', always_post_validate=True)
# TODO: generalize connection
_accelerate = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_port = FieldAttribute(isa='int', default=5099, always_post_validate=True)
# Connection
_fact_path = FieldAttribute(isa='string', default=None)
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='barelist', default=None, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=None, always_post_validate=True)
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=[], priority=99)
_vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True)
_vault_password = FieldAttribute(isa='string', always_post_validate=True)
# Role Attributes
_roles = FieldAttribute(isa='list', default=[], priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=[])
_pre_tasks = FieldAttribute(isa='list', default=[])
_post_tasks = FieldAttribute(isa='list', default=[])
_tasks = FieldAttribute(isa='list', default=[])
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=[], always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self._attributes.get('name')
@staticmethod
def load(data, variable_manager=None, loader=None):
if ('name' not in data or data['name'] is None) and 'hosts' in data:
if isinstance(data['hosts'], list):
data['name'] = ','.join(data['hosts'])
else:
data['name'] = data['hosts']
p = Play()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
assert isinstance(ds, dict)
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. "
"The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
return roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
for prompt_data in new_ds:
if 'name' not in prompt_data:
display.deprecated("Using the 'short form' for vars_prompt has been deprecated", version="2.7")
for vname, prompt in prompt_data.items():
vars_prompts.append(dict(
name=vname,
prompt=prompt,
default=None,
private=None,
confirm=None,
encrypt=None,
salt_size=None,
salt=None,
))
else:
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
return new_me
| gpl-3.0 | -8,869,523,384,699,161,000 | 35.843844 | 137 | 0.610645 | false |
vbannai/neutron | neutron/openstack/common/rpc/service.py | 5 | 2774 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import dispatcher as rpc_dispatcher
from neutron.openstack.common import service
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
self.serializer)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
| apache-2.0 | 3,255,709,238,133,598,700 | 35.986667 | 78 | 0.663302 | false |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/model_regress/models.py | 12 | 2290 | # coding: utf-8
from django.db import models
CHOICES = (
(1, 'first'),
(2, 'second'),
)
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date','headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __unicode__(self):
return self.headline
class Movie(models.Model):
#5218: Test models with non-default primary keys / AutoFields
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField()
__test__ = {'API_TESTS': """
(NOTE: Part of the regression test here is merely parsing the model
declaration. The verbose_name, in particular, did not always work.)
An empty choice field should return None for the display name.
>>> from datetime import datetime
>>> a = Article(headline="Look at me!", pub_date=datetime.now())
>>> a.save()
>>> a.get_status_display() is None
True
Empty strings should be returned as Unicode
>>> a2 = Article.objects.get(pk=a.id)
>>> a2.misc_data
u''
# TextFields can hold more than 4000 characters (this was broken in Oracle).
>>> a3 = Article(headline="Really, really big", pub_date=datetime.now())
>>> a3.article_text = "ABCDE" * 1000
>>> a3.save()
>>> a4 = Article.objects.get(pk=a3.id)
>>> len(a4.article_text)
5000
# #659 regression test
>>> import datetime
>>> p = Party.objects.create(when = datetime.datetime(1999, 12, 31))
>>> p = Party.objects.create(when = datetime.datetime(1998, 12, 31))
>>> p = Party.objects.create(when = datetime.datetime(1999, 1, 1))
>>> [p.when for p in Party.objects.filter(when__month = 2)]
[]
>>> [p.when for p in Party.objects.filter(when__month = 1)]
[datetime.date(1999, 1, 1)]
>>> [p.when for p in Party.objects.filter(when__month = 12)]
[datetime.date(1999, 12, 31), datetime.date(1998, 12, 31)]
>>> [p.when for p in Party.objects.filter(when__year = 1998)]
[datetime.date(1998, 12, 31)]
"""
}
| agpl-3.0 | 7,384,850,044,108,882,000 | 30.777778 | 76 | 0.675699 | false |
tfrayner/vcf_walker | utils/extract_sample_loci.py | 1 | 2225 | #!/usr/bin/env python
'''
Script to extract variant loci known to be mutated in one of the
indicated samples, and omit all other loci.
'''
import re
import vcf
from vcf_walker.vcf_meta import VcfMeta, LOGGER
class SampleExtractor(VcfMeta):
def filter(self, outfile, samppatt):
sampre = re.compile(samppatt)
with open(outfile, 'w') as out_fh:
vcf_writer = vcf.Writer(out_fh, self.reader)
for record in self.reader:
if any([ sampre.search(call.sample)
and call.data.GT not in ('.', None)
for call in record.samples ]):
vcf_writer.write_record(record)
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser(description=\
"Extract variants from a VCF based on sample names.")
PARSER.add_argument('-i', '--input-file', dest='infile', type=str,
required=True, help="The name of the input VCF file.")
PARSER.add_argument('-o', '--output-file', dest='outfile', type=str,
required=True, help="The name of the output VCF file.")
PARSER.add_argument('-s', '--sample-pattern-file', dest='sampfile', type=str,
required=True, help="A file containing sample ID patterns,"
+ " one per line (commented lines will be ignored).")
ARGS = PARSER.parse_args()
with open(ARGS.sampfile) as sfh:
sample_ids = [ x.strip() for x in sfh if not re.match(r'#', x) ]
# Build a general sample ID regex with field delimiters based on
# what's in the supplied IDs.
omit = ''
if any([re.search(r'[a-zA-Z]', x) for x in sample_ids]):
omit += 'a-zA-Z'
if any([re.search(r'\d', x) for x in sample_ids]):
omit += '0-9'
for test in (r'_', r'-'):
if any([re.search(test, x) for x in sample_ids]):
omit += test
# Wrap each sample ID in negative look-ahead and -behind assertions
# to avoid things like do888 matching do8888.
samppatt = r'(' + r'|'.join([ r'(?<![' + omit + r'])' + idstr + r'(?![' + omit + r'])'
for idstr in sample_ids ]) + r')'
FILTER = SampleExtractor(infile=ARGS.infile)
FILTER.filter(ARGS.outfile, samppatt=samppatt)
| gpl-3.0 | 851,339,215,786,639,200 | 32.208955 | 88 | 0.602247 | false |
alexhayes/python-eway | eway/managed_service_types.py | 1 | 35109 | import ZSI
import ZSI.TCcompound
from ZSI.schema import LocalElementDeclaration, ElementDeclaration, TypeDefinition, GTD, GED
################################################
# targetNamespace
# https://www.eway.com.au/gateway/managedpayment
################################################
class Eway:
targetNamespace = "https://www.eway.com.au/gateway/managedpayment"
class EwayHeader(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "eWAYHeader")
def __init__(self, pname=None, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.eWAYHeader.schema
TClist = [ZSI.TC.String(pname=(ns,"eWAYCustomerID"), aname="customer_id", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Username"), aname="username", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Password"), aname="password", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
else:
# attribute handling code
self.attribute_typecode_dict[("http://www.w3.org/2001/XMLSchema","anyAttribute")] = ZSI.TC.AnyElement()
if not pname:
pname = ("https://www.eway.com.au/gateway/managedpayment","eWAYHeader")
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._eWAYCustomerID = None
self._Username = None
self._Password = None
return
Holder.__name__ = "eWAYHeader_Holder"
self.pyclass = Holder
class CreditCard(TypeDefinition):
#complexType/complexContent extension
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "CreditCard")
def __init__(self, pname, ofwhat=(), extend=False, restrict=False, attributes=None, **kw):
ns = Eway.CreditCard.schema
TClist = [ZSI.TC.String(pname=(ns,"CCName"), aname="name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCExpiryMonth"), aname="expiry_month", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCExpiryYear"), aname="expiry_year", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
attributes = self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
if Eway.ManagedCustomer not in Eway.CreditCard.__bases__:
bases = list(Eway.CreditCard.__bases__)
bases.insert(0, Eway.ManagedCustomer)
Eway.CreditCard.__bases__ = tuple(bases)
Eway.ManagedCustomer.__init__(self, pname, ofwhat=TClist, extend=True, attributes=attributes, **kw)
class ManagedCustomer(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ManagedCustomer")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ManagedCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"ManagedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerTitle"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerFirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerLastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerCompany"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerJobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerEmail"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerAddress"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerSuburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerState"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerCountry"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPhone1"), aname="phone1", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerPhone2"), aname="phone2", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerFax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerURL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerComments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.reference = None
self.title = None
self.first_name = None
self.last_name = None
self.company = None
self.job_description = None
self.email = None
self.address = None
self.suburb = None
self.state = None
self.postcode = None
self.country = None
self.phone1 = None
self.phone2 = None
self.fax = None
self.url = None
self.comments = None
return
Holder.__name__ = "ManagedCustomer_Holder"
self.pyclass = Holder
class CCPaymentResponse(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "CCPaymentResponse")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.CCPaymentResponse.schema
TClist = [ZSI.TC.String(pname=(ns,"ewayTrxnError"), aname="transaction_error", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayTrxnStatus"), aname="transaction_status", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayTrxnNumber"), aname="transaction_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayReturnAmount"), aname="return_amount", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ewayAuthCode"), aname="auth_code", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.transaction_erro = None
self.transaction_status = None
self.transaction_number = None
self.return_amount = None
self.auth_code = None
return
Holder.__name__ = "CCPaymentResponse_Holder"
self.pyclass = Holder
class ArrayOfManagedTransaction(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ArrayOfManagedTransaction")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ArrayOfManagedTransaction.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","ManagedTransaction",lazy=False)(pname=(ns,"ManagedTransaction"), aname="managed_transaction", minOccurs=0, maxOccurs="unbounded", nillable=True, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.managed_transaction = []
return
Holder.__name__ = "ArrayOfManagedTransaction_Holder"
self.pyclass = Holder
class ManagedTransaction(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = "https://www.eway.com.au/gateway/managedpayment"
type = (schema, "ManagedTransaction")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = Eway.ManagedTransaction.schema
TClist = [ZSI.TCnumbers.Iint(pname=(ns,"TotalAmount"), aname="total_amount", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"Result"), aname="result", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"ResponseText"), aname="response_text", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCtimes.gDateTime(pname=(ns,"TransactionDate"), aname="transaction_date", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"ewayTrxnNumber"), aname="transaction_number", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.total_amount = None
self.result = None
self.response_text = None
self.transaction_date = None
self.transaction_number = None
return
Holder.__name__ = "ManagedTransaction_Holder"
self.pyclass = Holder
class CreateCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "CreateCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.CreateCustomer.schema
TClist = [ZSI.TC.String(pname=(ns,"Title"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"FirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"LastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Address"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Suburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"State"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Company"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"PostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Country"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Email"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Fax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Phone"), aname="phone", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Mobile"), aname="mobile", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"JobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Comments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"URL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="card_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNameOnCard"), aname="card_holder_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryMonth"), aname="card_expiry_month", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryYear"), aname="card_expiry_year", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","CreateCustomer")
kw["aname"] = "_CreateCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
self.title = None
self.first_name = None
self.last_name = None
self.address = None
self.suburb = None
self.state = None
self.company = None
self.postcode = None
self.company = None
self.email = None
self.fax = None
self.phone = None
self.mobile = None
self.customer_reference = None
self.job_description = None
self.comments = None
self.url = None
self.card_number = None
self.card_holder_name = None
self.card_expiry_month = None
self.card_expiry_year = None
return
Holder.__name__ = "CreateCustomer_Holder"
self.pyclass = Holder
class CreateCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "CreateCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.CreateCustomerResponse.schema
TClist = [ZSI.TC.String(pname=(ns,"CreateCustomerResult"), aname="create_customer_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","CreateCustomerResponse")
kw["aname"] = "_CreateCustomerResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
self.create_customer_result = None
return
Holder.__name__ = "CreateCustomerResponse_Holder"
self.pyclass = Holder
class eWAYHeader(ElementDeclaration):
literal = "eWAYHeader"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","eWAYHeader")
kw["aname"] = "_eWAYHeader"
if Eway.eWAYHeader not in Eway.eWAYHeader.__bases__:
bases = list(Eway.eWAYHeader.__bases__)
bases.insert(0, Eway.eWAYHeader)
Eway.eWAYHeader.__bases__ = tuple(bases)
Eway.eWAYHeader.__init__(self, **kw)
if self.pyclass is not None: self.pyclass.__name__ = "eWAYHeader_Holder"
class UpdateCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "UpdateCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.UpdateCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Title"), aname="title", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"FirstName"), aname="first_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"LastName"), aname="last_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Address"), aname="address", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Suburb"), aname="suburb", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"State"), aname="state", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Company"), aname="company", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"PostCode"), aname="postcode", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Country"), aname="country", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Email"), aname="email", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Fax"), aname="fax", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Phone"), aname="phone", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Mobile"), aname="mobile", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CustomerRef"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"JobDesc"), aname="job_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"Comments"), aname="comments", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"URL"), aname="url", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNumber"), aname="card_number", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"CCNameOnCard"), aname="card_holder_name", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryMonth"), aname="card_expiry_month", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"CCExpiryYear"), aname="card_expiry_year", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","UpdateCustomer")
kw["aname"] = "_UpdateCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.title = None
self.first_name = None
self.last_name = None
self.address = None
self.suburb = None
self.state = None
self.company = None
self.postcode = None
self.country = None
self.email = None
self.fax = None
self.phone = None
self.mobile = None
self.customer_reference = None
self.job_description = None
self.comments = None
self.url = None
self.card_number = None
self.card_holder_name = None
self.card_expiry_month = None
self.card_expiry_year = None
return
Holder.__name__ = "UpdateCustomer_Holder"
self.pyclass = Holder
class UpdateCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "UpdateCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.UpdateCustomerResponse.schema
TClist = [ZSI.TC.Boolean(pname=(ns,"UpdateCustomerResult"), aname="update_customer_result", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","UpdateCustomerResponse")
kw["aname"] = "update_customer_response"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.update_customer_result = None
return
Holder.__name__ = "UpdateCustomerResponse_Holder"
self.pyclass = Holder
class QueryCustomer(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomer"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomer.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomer")
kw["aname"] = "_QueryCustomer"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
return
Holder.__name__ = "QueryCustomer_Holder"
self.pyclass = Holder
class QueryCustomerResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CreditCard",lazy=False)(pname=(ns,"QueryCustomerResult"), aname="query_customer_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerResponse")
kw["aname"] = "_QueryCustomerResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_customer_result = None
return
Holder.__name__ = "QueryCustomerResponse_Holder"
self.pyclass = Holder
class QueryCustomerByReference(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerByReference"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerByReference.schema
TClist = [ZSI.TC.String(pname=(ns,"CustomerReference"), aname="customer_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerByReference")
kw["aname"] = "_QueryCustomerByReference"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.customer_reference = None
return
Holder.__name__ = "QueryCustomerByReference_Holder"
self.pyclass = Holder
class QueryCustomerByReferenceResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryCustomerByReferenceResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryCustomerByReferenceResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CreditCard",lazy=False)(pname=(ns,"QueryCustomerByReferenceResult"), aname="query_customer_by_reference_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryCustomerByReferenceResponse")
kw["aname"] = "_QueryCustomerByReferenceResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_customer_by_reference_result = None
return
Holder.__name__ = "QueryCustomerByReferenceResponse_Holder"
self.pyclass = Holder
class ProcessPayment(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "ProcessPayment"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.ProcessPayment.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TCnumbers.Iint(pname=(ns,"amount"), aname="amount", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"invoiceReference"), aname="invoice_reference", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")),
ZSI.TC.String(pname=(ns,"invoiceDescription"), aname="invoice_description", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","ProcessPayment")
kw["aname"] = "_ProcessPayment"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
self.amount = None
self.invoice_reference = None
self.invoice_description = None
return
Holder.__name__ = "ProcessPayment_Holder"
self.pyclass = Holder
class ProcessPaymentResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "ProcessPaymentResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.ProcessPaymentResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","CCPaymentResponse",lazy=False)(pname=(ns,"ewayResponse"), aname="response", minOccurs=1, maxOccurs=1, nillable=True, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","ProcessPaymentResponse")
kw["aname"] = "_ProcessPaymentResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.response = None
return
Holder.__name__ = "ProcessPaymentResponse_Holder"
self.pyclass = Holder
class QueryPayment(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryPayment"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryPayment.schema
TClist = [ZSI.TCnumbers.Ilong(pname=(ns,"managedCustomerID"), aname="id", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryPayment")
kw["aname"] = "_QueryPayment"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.id = None
return
Holder.__name__ = "QueryPayment_Holder"
self.pyclass = Holder
class QueryPaymentResponse(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "QueryPaymentResponse"
schema = "https://www.eway.com.au/gateway/managedpayment"
def __init__(self, **kw):
ns = Eway.QueryPaymentResponse.schema
TClist = [GTD("https://www.eway.com.au/gateway/managedpayment","ArrayOfManagedTransaction",lazy=False)(pname=(ns,"QueryPaymentResult"), aname="query_payment_result", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("https://www.eway.com.au/gateway/managedpayment","QueryPaymentResponse")
kw["aname"] = "_QueryPaymentResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self.query_payment_result = None
return
Holder.__name__ = "QueryPaymentResponse_Holder"
self.pyclass = Holder
| apache-2.0 | -963,339,123,505,914,900 | 67.305447 | 271 | 0.587485 | false |
Shougo/python-client | pynvim/msgpack_rpc/msgpack_stream.py | 2 | 2006 | """Msgpack handling in the event loop pipeline."""
import logging
from msgpack import Packer, Unpacker
from pynvim.compat import unicode_errors_default
logger = logging.getLogger(__name__)
debug, info, warn = (logger.debug, logger.info, logger.warning,)
class MsgpackStream(object):
"""Two-way msgpack stream that wraps a event loop byte stream.
This wraps the event loop interface for reading/writing bytes and
exposes an interface for reading/writing msgpack documents.
"""
def __init__(self, event_loop):
"""Wrap `event_loop` on a msgpack-aware interface."""
self.loop = event_loop
self._packer = Packer(unicode_errors=unicode_errors_default)
self._unpacker = Unpacker(unicode_errors=unicode_errors_default)
self._message_cb = None
def threadsafe_call(self, fn):
"""Wrapper around `BaseEventLoop.threadsafe_call`."""
self.loop.threadsafe_call(fn)
def send(self, msg):
"""Queue `msg` for sending to Nvim."""
debug('sent %s', msg)
self.loop.send(self._packer.pack(msg))
def run(self, message_cb):
"""Run the event loop to receive messages from Nvim.
While the event loop is running, `message_cb` will be called whenever
a message has been successfully parsed from the input stream.
"""
self._message_cb = message_cb
self.loop.run(self._on_data)
self._message_cb = None
def stop(self):
"""Stop the event loop."""
self.loop.stop()
def close(self):
"""Close the event loop."""
self.loop.close()
def _on_data(self, data):
self._unpacker.feed(data)
while True:
try:
debug('waiting for message...')
msg = next(self._unpacker)
debug('received message: %s', msg)
self._message_cb(msg)
except StopIteration:
debug('unpacker needs more data...')
break
| apache-2.0 | -4,655,828,085,260,947,000 | 30.34375 | 77 | 0.608175 | false |
bobsummerwill/solidity | docs/conf.py | 2 | 8535 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Solidity documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 7 12:32:57 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
def setup(sphinx):
thisdir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, thisdir + '/utils')
from pygments_lexer_solidity import SolidityLexer
sphinx.add_lexer('Solidity', SolidityLexer())
sphinx.add_stylesheet('css/custom.css')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Solidity'
copyright = '2016-2019, Ethereum'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../CMakeLists.txt', 'r') as f:
version = re.search('PROJECT_VERSION "([^"]+)"', f.read()).group(1)
# The full version, including alpha/beta/rc tags.
if os.path.isfile('../prerelease.txt') != True or os.path.getsize('../prerelease.txt') == 0:
release = version
else:
# This is a prerelease version
release = version + '-develop'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'contracts', 'types', 'examples']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'Solidity'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Soliditydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'solidity.tex', 'Solidity Documentation', 'Ethereum', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | -8,138,761,273,294,447,000 | 30.847015 | 92 | 0.705917 | false |
jonnylamb/debexpo | debexpo/tests/functional/test_login.py | 3 | 2526 | from debexpo.tests import *
from pylons import session
class TestLoginController(TestController):
def setUp(self):
self._setup_models()
self._setup_example_user()
def tearDown(self):
self._remove_example_user()
def test_index(self):
response = self.app.get(url(controller='login', action='index'))
self.assertEquals(response.status_int, 200)
self.assertEquals(len(response.lxml.xpath(
'//input[@type="text" and @name="email"]')), 1)
self.assertEquals(len(response.lxml.xpath(
'//input[@type="password" and @name="password"]')), 1)
def test__login(self):
response = self.app.post(url(controller='login', action='index'),
{'email': '[email protected]',
'password': 'wrongpassword',
'commit': 'submit'})
self.assertTrue(response.status_int, 200)
self.assertEquals(len(response.lxml.xpath(
'//span[@class="error-message"]')), 1)
response = self.app.post(url(controller='login', action='index'),
self._AUTHDATA)
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
def test__login_path_before_login(self):
response = self.app.get(url(controller='packages', action='my'))
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url(controller='login', action='index'),
self._AUTHDATA)
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url(controller='packages', action='my')))
def test_logout_loggedout(self):
response = self.app.get(url(controller='login', action='logout'))
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('index')))
def test_logout_loggedin(self):
response = self.app.post(url(controller='login', action='index'),
self._AUTHDATA)
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
response = self.app.get(url(controller='login', action='logout'))
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('index')))
| mit | -4,887,738,753,579,944,000 | 45.777778 | 92 | 0.600554 | false |
MechCoder/sympy | sympy/concrete/tests/test_gosper.py | 100 | 7423 | """Tests for Gosper's algorithm for hypergeometric summation. """
from sympy import binomial, factorial, gamma, Poly, S, simplify, sqrt, exp, log, Symbol, pi
from sympy.abc import a, b, j, k, m, n, r, x
from sympy.concrete.gosper import gosper_normal, gosper_sum, gosper_term
def test_gosper_normal():
assert gosper_normal(4*n + 5, 2*(4*n + 1)*(2*n + 3), n) == \
(Poly(S(1)/4, n), Poly(n + S(3)/2), Poly(n + S(1)/4))
def test_gosper_term():
assert gosper_term((4*k + 1)*factorial(
k)/factorial(2*k + 1), k) == (-k - S(1)/2)/(k + S(1)/4)
def test_gosper_sum():
assert gosper_sum(1, (k, 0, n)) == 1 + n
assert gosper_sum(k, (k, 0, n)) == n*(1 + n)/2
assert gosper_sum(k**2, (k, 0, n)) == n*(1 + n)*(1 + 2*n)/6
assert gosper_sum(k**3, (k, 0, n)) == n**2*(1 + n)**2/4
assert gosper_sum(2**k, (k, 0, n)) == 2*2**n - 1
assert gosper_sum(factorial(k), (k, 0, n)) is None
assert gosper_sum(binomial(n, k), (k, 0, n)) is None
assert gosper_sum(factorial(k)/k**2, (k, 0, n)) is None
assert gosper_sum((k - 3)*factorial(k), (k, 0, n)) is None
assert gosper_sum(k*factorial(k), k) == factorial(k)
assert gosper_sum(
k*factorial(k), (k, 0, n)) == n*factorial(n) + factorial(n) - 1
assert gosper_sum((-1)**k*binomial(n, k), (k, 0, n)) == 0
assert gosper_sum((
-1)**k*binomial(n, k), (k, 0, m)) == -(-1)**m*(m - n)*binomial(n, m)/n
assert gosper_sum((4*k + 1)*factorial(k)/factorial(2*k + 1), (k, 0, n)) == \
(2*factorial(2*n + 1) - factorial(n))/factorial(2*n + 1)
# issue 6033:
assert gosper_sum(
n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b)), \
(n, 0, m)) == -a*b*(exp(m*log(a))*exp(m*log(b))*factorial(a)* \
factorial(b) - factorial(a + m)*factorial(b + m))/(factorial(a)* \
factorial(b)*factorial(a + m)*factorial(b + m))
def test_gosper_sum_indefinite():
assert gosper_sum(k, k) == k*(k - 1)/2
assert gosper_sum(k**2, k) == k*(k - 1)*(2*k - 1)/6
assert gosper_sum(1/(k*(k + 1)), k) == -1/k
assert gosper_sum(-(27*k**4 + 158*k**3 + 430*k**2 + 678*k + 445)*gamma(2*k + 4)/(3*(3*k + 7)*gamma(3*k + 6)), k) == \
(3*k + 5)*(k**2 + 2*k + 5)*gamma(2*k + 4)/gamma(3*k + 6)
def test_gosper_sum_parametric():
assert gosper_sum(binomial(S(1)/2, m - j + 1)*binomial(S(1)/2, m + j), (j, 1, n)) == \
n*(1 + m - n)*(-1 + 2*m + 2*n)*binomial(S(1)/2, 1 + m - n)* \
binomial(S(1)/2, m + n)/(m*(1 + 2*m))
def test_gosper_sum_algebraic():
assert gosper_sum(
n**2 + sqrt(2), (n, 0, m)) == (m + 1)*(2*m**2 + m + 6*sqrt(2))/6
def test_gosper_sum_iterated():
f1 = binomial(2*k, k)/4**k
f2 = (1 + 2*n)*binomial(2*n, n)/4**n
f3 = (1 + 2*n)*(3 + 2*n)*binomial(2*n, n)/(3*4**n)
f4 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*binomial(2*n, n)/(15*4**n)
f5 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*(7 + 2*n)*binomial(2*n, n)/(105*4**n)
assert gosper_sum(f1, (k, 0, n)) == f2
assert gosper_sum(f2, (n, 0, n)) == f3
assert gosper_sum(f3, (n, 0, n)) == f4
assert gosper_sum(f4, (n, 0, n)) == f5
# the AeqB tests test expressions given in
# www.math.upenn.edu/~wilf/AeqB.pdf
def test_gosper_sum_AeqB_part1():
f1a = n**4
f1b = n**3*2**n
f1c = 1/(n**2 + sqrt(5)*n - 1)
f1d = n**4*4**n/binomial(2*n, n)
f1e = factorial(3*n)/(factorial(n)*factorial(n + 1)*factorial(n + 2)*27**n)
f1f = binomial(2*n, n)**2/((n + 1)*4**(2*n))
f1g = (4*n - 1)*binomial(2*n, n)**2/((2*n - 1)**2*4**(2*n))
f1h = n*factorial(n - S(1)/2)**2/factorial(n + 1)**2
g1a = m*(m + 1)*(2*m + 1)*(3*m**2 + 3*m - 1)/30
g1b = 26 + 2**(m + 1)*(m**3 - 3*m**2 + 9*m - 13)
g1c = (m + 1)*(m*(m**2 - 7*m + 3)*sqrt(5) - (
3*m**3 - 7*m**2 + 19*m - 6))/(2*m**3*sqrt(5) + m**4 + 5*m**2 - 1)/6
g1d = -S(2)/231 + 2*4**m*(m + 1)*(63*m**4 + 112*m**3 + 18*m**2 -
22*m + 3)/(693*binomial(2*m, m))
g1e = -S(9)/2 + (81*m**2 + 261*m + 200)*factorial(
3*m + 2)/(40*27**m*factorial(m)*factorial(m + 1)*factorial(m + 2))
g1f = (2*m + 1)**2*binomial(2*m, m)**2/(4**(2*m)*(m + 1))
g1g = -binomial(2*m, m)**2/4**(2*m)
g1h = 4*pi -(2*m + 1)**2*(3*m + 4)*factorial(m - S(1)/2)**2/factorial(m + 1)**2
g = gosper_sum(f1a, (n, 0, m))
assert g is not None and simplify(g - g1a) == 0
g = gosper_sum(f1b, (n, 0, m))
assert g is not None and simplify(g - g1b) == 0
g = gosper_sum(f1c, (n, 0, m))
assert g is not None and simplify(g - g1c) == 0
g = gosper_sum(f1d, (n, 0, m))
assert g is not None and simplify(g - g1d) == 0
g = gosper_sum(f1e, (n, 0, m))
assert g is not None and simplify(g - g1e) == 0
g = gosper_sum(f1f, (n, 0, m))
assert g is not None and simplify(g - g1f) == 0
g = gosper_sum(f1g, (n, 0, m))
assert g is not None and simplify(g - g1g) == 0
g = gosper_sum(f1h, (n, 0, m))
# need to call rewrite(gamma) here because we have terms involving
# factorial(1/2)
assert g is not None and simplify(g - g1h).rewrite(gamma) == 0
def test_gosper_sum_AeqB_part2():
f2a = n**2*a**n
f2b = (n - r/2)*binomial(r, n)
f2c = factorial(n - 1)**2/(factorial(n - x)*factorial(n + x))
g2a = -a*(a + 1)/(a - 1)**3 + a**(
m + 1)*(a**2*m**2 - 2*a*m**2 + m**2 - 2*a*m + 2*m + a + 1)/(a - 1)**3
g2b = (m - r)*binomial(r, m)/2
ff = factorial(1 - x)*factorial(1 + x)
g2c = 1/ff*(
1 - 1/x**2) + factorial(m)**2/(x**2*factorial(m - x)*factorial(m + x))
g = gosper_sum(f2a, (n, 0, m))
assert g is not None and simplify(g - g2a) == 0
g = gosper_sum(f2b, (n, 0, m))
assert g is not None and simplify(g - g2b) == 0
g = gosper_sum(f2c, (n, 1, m))
assert g is not None and simplify(g - g2c) == 0
def test_gosper_nan():
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
n = Symbol('n', integer=True)
m = Symbol('m', integer=True)
f2d = n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b))
g2d = 1/(factorial(a - 1)*factorial(
b - 1)) - a**(m + 1)*b**(m + 1)/(factorial(a + m)*factorial(b + m))
g = gosper_sum(f2d, (n, 0, m))
assert simplify(g - g2d) == 0
def test_gosper_sum_AeqB_part3():
f3a = 1/n**4
f3b = (6*n + 3)/(4*n**4 + 8*n**3 + 8*n**2 + 4*n + 3)
f3c = 2**n*(n**2 - 2*n - 1)/(n**2*(n + 1)**2)
f3d = n**2*4**n/((n + 1)*(n + 2))
f3e = 2**n/(n + 1)
f3f = 4*(n - 1)*(n**2 - 2*n - 1)/(n**2*(n + 1)**2*(n - 2)**2*(n - 3)**2)
f3g = (n**4 - 14*n**2 - 24*n - 9)*2**n/(n**2*(n + 1)**2*(n + 2)**2*
(n + 3)**2)
# g3a -> no closed form
g3b = m*(m + 2)/(2*m**2 + 4*m + 3)
g3c = 2**m/m**2 - 2
g3d = S(2)/3 + 4**(m + 1)*(m - 1)/(m + 2)/3
# g3e -> no closed form
g3f = -(-S(1)/16 + 1/((m - 2)**2*(m + 1)**2)) # the AeqB key is wrong
g3g = -S(2)/9 + 2**(m + 1)/((m + 1)**2*(m + 3)**2)
g = gosper_sum(f3a, (n, 1, m))
assert g is None
g = gosper_sum(f3b, (n, 1, m))
assert g is not None and simplify(g - g3b) == 0
g = gosper_sum(f3c, (n, 1, m - 1))
assert g is not None and simplify(g - g3c) == 0
g = gosper_sum(f3d, (n, 1, m))
assert g is not None and simplify(g - g3d) == 0
g = gosper_sum(f3e, (n, 0, m - 1))
assert g is None
g = gosper_sum(f3f, (n, 4, m))
assert g is not None and simplify(g - g3f) == 0
g = gosper_sum(f3g, (n, 1, m))
assert g is not None and simplify(g - g3g) == 0
| bsd-3-clause | 2,186,736,378,180,230,000 | 37.661458 | 121 | 0.494813 | false |
muk-it/muk_dms | muk_dms_thumbnails/__manifest__.py | 1 | 1714 | ###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Thumbnails
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
{
"name": 'MuK Documents Thumbnails',
"summary": """Automatic File Thumbnails""",
"version": '12.0.2.0.0',
"category": 'Document Management',
"license": "LGPL-3",
"website": "http://www.mukit.at",
'live_test_url': 'https://mukit.at/r/SgN',
"author": "MuK IT",
"contributors": [
"Mathias Markl <[email protected]>",
],
"depends": [
'muk_dms',
],
"data": [
"views/storage.xml",
"views/res_config_settings.xml",
"data/ir_cron.xml",
],
"demo": [
],
"qweb": [
"static/src/xml/*.xml",
],
"images": [
'static/description/banner.png'
],
"application": False,
"installable": True,
}
| lgpl-3.0 | 124,497,207,057,240,900 | 31.339623 | 83 | 0.553676 | false |
tectronics/arsenalsuite | python/pythondotnet/pythonnet/src/tests/test_constructors.py | 10 | 2189 | # ===========================================================================
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
# ===========================================================================
import sys, os, string, unittest, types
import Python.Test as Test
import System
class ConstructorTests(unittest.TestCase):
"""Test CLR class constructor support."""
def testEnumConstructor(self):
"""Test enum constructor args"""
from System import TypeCode
from Python.Test import EnumConstructorTest
ob = EnumConstructorTest(TypeCode.Int32)
self.failUnless(ob.value == TypeCode.Int32)
def testFlagsConstructor(self):
"""Test flags constructor args"""
from Python.Test import FlagsConstructorTest
from System.IO import FileAccess
flags = FileAccess.Read | FileAccess.Write
ob = FlagsConstructorTest(flags)
self.failUnless(ob.value == flags)
def testStructConstructor(self):
"""Test struct constructor args"""
from System import Guid
from Python.Test import StructConstructorTest
guid = Guid.NewGuid()
ob = StructConstructorTest(guid)
self.failUnless(ob.value == guid)
def testSubclassConstructor(self):
"""Test subclass constructor args"""
from Python.Test import SubclassConstructorTest
from System.Windows.Forms import Form, Control
class sub(Form):
pass
form = sub()
ob = SubclassConstructorTest(form)
self.failUnless(isinstance(ob.value, Control))
def test_suite():
return unittest.makeSuite(ConstructorTests)
def main():
unittest.TextTestRunner().run(test_suite())
if __name__ == '__main__':
main()
| gpl-2.0 | 7,870,192,741,875,254,000 | 29.724638 | 77 | 0.612152 | false |
dbbhattacharya/kitsune | kitsune/offline/tests/test__utils.py | 1 | 9768 | # -*- coding: utf-8 -*-
import time
from nose.tools import eq_
from kitsune.offline import utils
from kitsune.products.tests import product, topic
from kitsune.sumo.tests import TestCase
from kitsune.wiki.tests import document, revision
def _create_doc(title='', product=None, topic=None, is_archived=False):
title = 'test ' + title if title else 'test'
doc = document(title=title, save=True, is_archived=is_archived)
revision(summary='summary', is_approved=True, document=doc, save=True)
if is_archived:
expected = {
'key': 'en-US~' + doc.slug,
'title': doc.title,
'archived': True,
'slug': doc.slug
}
else:
updated = time.mktime(doc.current_revision.created.timetuple())
expected = {
'key': 'en-US~' + doc.slug,
'title': title,
'html': doc.html,
'updated': updated,
'slug': doc.slug,
'id': doc.id,
'archived': False
}
if product:
doc.products.add(product)
if topic:
doc.topics.add(topic)
return doc, expected
def _create_product_bundle(prefix='moo'):
p = product(title=prefix + 'firefox', save=True)
t1 = topic(title=prefix + 'topic1', product=p, save=True)
t2 = topic(title=prefix + 'topic2', product=p, save=True)
doc1, expected_doc1 = _create_doc(title=prefix + 'doc1',
product=p, topic=t1)
doc2, expected_doc2 = _create_doc(title=prefix + 'doc2',
product=p, topic=t2)
expected_locale_doc = {
'key': u'en-US',
'name': u'English',
'products': [{
'slug': p.slug,
'name': p.title
}]
}
expected_topic1 = {
'key': 'en-US~' + p.slug + '~' + t1.slug,
'name': t1.title,
'docs': [doc1.slug],
'product': p.slug,
'slug': t1.slug,
'children': []
}
expected_topic2 = {
'key': 'en-US~' + p.slug + '~' + t2.slug,
'name': t2.title,
'docs': [doc2.slug],
'product': p.slug,
'slug': t2.slug,
'children': []
}
return p, {
'doc1': expected_doc1,
'doc2': expected_doc2,
'locale': expected_locale_doc,
'topic1': expected_topic1,
'topic2': expected_topic2
}
class OfflineWikiDataGenerationTest(TestCase):
def test_serialize_document(self):
doc, expected = _create_doc()
serialized = utils.serialize_document_for_offline(doc)
eq_(expected, serialized)
def test_serialized_archived_document(self):
doc, expected = _create_doc(is_archived=True)
serialized = utils.serialize_document_for_offline(doc)
eq_(expected, serialized)
def test_bundle_for_product(self):
p, expected_bundle = _create_product_bundle()
bundle = utils.bundle_for_product(p, 'en-US')
assert 'locales' in bundle
eq_(1, len(bundle['locales']))
eq_(expected_bundle['locale'], bundle['locales'].values()[0])
assert 'topics' in bundle
eq_(2, len(bundle['topics']))
topics = sorted(bundle['topics'].values(), key=lambda t: t['slug'])
eq_(expected_bundle['topic1'], topics[0])
eq_(expected_bundle['topic2'], topics[1])
assert 'docs' in bundle
docs = sorted(bundle['docs'].values(), key=lambda d: d['title'])
eq_(expected_bundle['doc1'], docs[0])
eq_(expected_bundle['doc2'], docs[1])
assert 'indexes' in bundle
eq_(1, len(bundle['indexes']))
assert 'en-US~moofirefox' in bundle['indexes']
assert 'index' in bundle['indexes']['en-US~moofirefox']
eq_(u'en-US~moofirefox', bundle['indexes']['en-US~moofirefox']['key'])
def test_merge_bundles(self):
p1, expected_bundle1 = _create_product_bundle()
p2, expected_bundle2 = _create_product_bundle('yay')
bundle1 = utils.bundle_for_product(p1, 'en-US')
bundle2 = utils.bundle_for_product(p2, 'en-US')
merged = utils.merge_bundles(bundle1, bundle2)
assert 'locales' in merged
eq_(1, len(merged['locales']))
expected_locale = expected_bundle1['locale']
expected_locale['products'] += expected_bundle2['locale']['products']
eq_(expected_locale, merged['locales'][0])
assert 'topics' in merged
eq_(4, len(merged['topics']))
merged['topics'].sort(key=lambda t: t['slug'])
eq_(expected_bundle1['topic1'], merged['topics'][0])
eq_(expected_bundle1['topic2'], merged['topics'][1])
eq_(expected_bundle2['topic1'], merged['topics'][2])
eq_(expected_bundle2['topic2'], merged['topics'][3])
assert 'docs' in merged
eq_(4, len(merged['docs']))
merged['docs'].sort(key=lambda d: d['title'])
eq_(expected_bundle1['doc1'], merged['docs'][0])
eq_(expected_bundle1['doc2'], merged['docs'][1])
eq_(expected_bundle2['doc1'], merged['docs'][2])
eq_(expected_bundle2['doc2'], merged['docs'][3])
eq_(2, len(merged['indexes']))
merged['indexes'].sort(key=lambda i: i['key'])
eq_('en-US~moofirefox', merged['indexes'][0]['key'])
eq_('en-US~yayfirefox', merged['indexes'][1]['key'])
def test_index_generation(self):
p = product(title='firefox', save=True)
t = topic(title='topic1', product=p, save=True)
doc = document(title='firefox bookmarks',
locale='en-US', save=True)
revision(is_approved=True,
summary='this is an article about firefox bookmarks',
document=doc, save=True)
doc.products.add(p)
doc.topics.add(t)
doc2 = document(title='private browsing',
locale='en-US', save=True)
revision(is_approved=True,
summary='this is an article about private browsing',
document=doc2, save=True)
doc2.products.add(p)
doc2.topics.add(t)
bundle = utils.bundle_for_product(p, 'en-US')
index = bundle['indexes']['en-US~firefox']['index']
words_in_both = ('this', 'is', 'an', 'article', 'about')
for word in words_in_both:
assert word in index
eq_(2, len(index[word]))
eq_(2, len(index[word][0]))
eq_(2, len(index[word][1]))
assert 'firefox' in index
eq_(1, len(index['firefox']))
# Yeah. 'firefox' in this corpus _better_ score higher than 'this'.
assert index['firefox'][0][1] > index['this'][0][1]
assert 'bookmarks' in index
eq_(1, len(index['bookmarks']))
assert index['bookmarks'][0][1] > index['this'][0][1]
assert 'private' in index
eq_(1, len(index['private']))
assert index['private'][0][1] > index['this'][0][1]
assert 'browsing' in index
eq_(1, len(index['browsing']))
assert index['browsing'][0][1] > index['this'][0][1]
def test_archived_articles_in_bundle(self):
p = product(title='firefox', save=True)
t1 = topic(title='topic1', product=p, save=True)
doc = document(title='test', is_archived=True,
locale='en-US', save=True)
revision(is_approved=True, document=doc, save=True)
doc.products.add(p)
doc.topics.add(t1)
bundle = utils.bundle_for_product(p, 'en-US')
eq_(1, len(bundle['docs']))
doc = bundle['docs'].values()[0]
eq_(True, doc['archived'])
assert 'html' not in doc
eq_(1, len(bundle['topics']))
def test_redirect_articles_in_bundle(self):
p = product(title='firefox', save=True)
t1 = topic(title='topic1', product=p, save=True)
doc = document(title='test2', locale='en-US', save=True)
revision(is_approved=True,
document=doc,
save=True)
doc.products.add(p)
doc.topics.add(t1)
doc = document(title='test', locale='en-US', save=True)
revision(is_approved=True, document=doc, content=u'REDIRECT [[doc2]]',
save=True)
doc.products.add(p)
doc.topics.add(t1)
bundle = utils.bundle_for_product(p, 'en-US')
eq_(1, len(bundle['docs']))
doc = bundle['docs'].values()[0]
eq_('test2', doc['title'])
def test_bogus_articles_in_bundle(self):
p = product(title='firefox', save=True)
topic(title='topic1', product=p, save=True)
# Document with no revision should be fun
doc = document(title='test2', locale='en-US', save=True)
bundle = utils.bundle_for_product(p, 'en-US')
eq_(0, len(bundle['docs']))
eq_(0, len(bundle['topics']))
# article with no html.
revision(document=doc, content='', save=True)
bundle = utils.bundle_for_product(p, 'en-US')
eq_(0, len(bundle['docs']))
eq_(0, len(bundle['topics']))
def test_other_languages(self):
p = product(title='firefox', save=True)
t1 = topic(title='topic1', product=p, save=True)
doc = document(title='test', locale='en-US', save=True)
revision(is_approved=True, document=doc, save=True)
doc.products.add(p)
doc.topics.add(t1)
translated_doc = document(title=u'测试', locale='zh-CN', parent=doc,
save=True)
revision(is_approved=True, document=translated_doc, save=True)
bundle = utils.bundle_for_product(p, 'zh-CN')
eq_(1, len(bundle['docs']))
doc = bundle['docs'].values()[0]
eq_(u'测试', doc['title'])
| bsd-3-clause | -2,715,970,311,140,869,600 | 31.751678 | 78 | 0.55543 | false |
anotherjesse/nova | nova/objectstore/stored.py | 1 | 1869 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Properties of an object stored within a bucket.
"""
import os
import nova.crypto
from nova import exception
class Object(object):
def __init__(self, bucket, key):
""" wrapper class of an existing key """
self.bucket = bucket
self.key = key
self.path = bucket._object_path(key)
if not os.path.isfile(self.path):
raise exception.NotFound
def __repr__(self):
return "<Object %s/%s>" % (self.bucket, self.key)
@property
def md5(self):
""" computes the MD5 of the contents of file """
with open(self.path, "r") as f:
return nova.crypto.compute_md5(f)
@property
def mtime(self):
""" mtime of file """
return os.path.getmtime(self.path)
def read(self):
""" read all contents of key into memory and return """
return self.file.read()
@property
def file(self):
""" return a file object for the key """
return open(self.path, 'rb')
def delete(self):
""" deletes the file """
os.unlink(self.path)
| apache-2.0 | -4,742,385,313,724,289,000 | 28.666667 | 78 | 0.644195 | false |
NathanW2/QGIS | tests/src/python/test_qgscoordinateformatter.py | 8 | 32256 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsCoordinateFormatter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/07/2014'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
from qgis.testing import unittest
from qgis.core import QgsCoordinateFormatter
class TestQgsCoordinateFormatter(unittest.TestCase):
def testFormatXPair(self):
"""Test formatting x as pair"""
self.assertEqual(QgsCoordinateFormatter.formatX(20, QgsCoordinateFormatter.FormatPair, 0), '20')
self.assertEqual(QgsCoordinateFormatter.formatX(-20, QgsCoordinateFormatter.FormatPair, 0), '-20')
self.assertEqual(QgsCoordinateFormatter.formatX(20.11111111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.111')
self.assertEqual(QgsCoordinateFormatter.formatX(20.11161111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.112')
self.assertEqual(QgsCoordinateFormatter.formatX(20, QgsCoordinateFormatter.FormatPair, 3), '20.000')
self.assertEqual(QgsCoordinateFormatter.formatX(float('inf'), QgsCoordinateFormatter.FormatPair, 3), 'infinite')
def testFormatYPair(self):
"""Test formatting y as pair"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatPair, 0), '20')
self.assertEqual(QgsCoordinateFormatter.formatY(-20, QgsCoordinateFormatter.FormatPair, 0), '-20')
self.assertEqual(QgsCoordinateFormatter.formatY(20.11111111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.111')
self.assertEqual(QgsCoordinateFormatter.formatY(20.11161111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.112')
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatPair, 3), '20.000')
self.assertEqual(QgsCoordinateFormatter.formatY(float('inf'), QgsCoordinateFormatter.FormatPair, 3), 'infinite')
def testAsPair(self):
"""Test formatting x/y as pair"""
self.assertEqual(QgsCoordinateFormatter.asPair(20, 30, 0), '20,30')
self.assertEqual(QgsCoordinateFormatter.asPair(20, -30, 0), '20,-30')
self.assertEqual(QgsCoordinateFormatter.asPair(20.111, 10.999, 0), '20,11')
self.assertEqual(QgsCoordinateFormatter.asPair(20.111, 10.999, 2), '20.11,11.00')
self.assertEqual(QgsCoordinateFormatter.asPair(20, 10, 2), '20.00,10.00')
self.assertEqual(QgsCoordinateFormatter.asPair(20, -10, 2), '20.00,-10.00')
def testFormatXFormatDegreesMinutesSeconds(self):
"""Test formatting x as DMS"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"80°0′0.00″E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"80°0′0.0000″E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"80°7′24.4444″E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 0), u"80°7′24″E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"179°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"179°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"179°59′59.99640″E")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"179°59′59.99640″W")
# test rounding does not create seconds >= 60
self.assertEqual(QgsCoordinateFormatter.formatX(99.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"100°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(89.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"90°0′0.00″E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"80°0′0.00″")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00360″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0′0.00360″")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"80°00′00.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(85.44, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"85°26′24.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″E")
def testFormatYFormatDegreesMinutesSeconds(self):
"""Test formatting y as DMS"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"20°0′0.00″N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"20°0′0.0000″N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"20°7′24.4444″N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 0), u"20°7′24″N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"89°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"89°0′0.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″N")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″S")
# test rounding does not create seconds >= 60
self.assertEqual(QgsCoordinateFormatter.formatY(89.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"90°0′0.00″N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"20°0′0.00″")
# test 0 latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# test near zero lat/long
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00360″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0′0.00360″")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"20°00′00.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(85.44, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"85°26′24.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″N")
def testFormatXDegreesMinutes(self):
"""Test formatting x as DM"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"80°0.00′E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"80°0.0000′E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"80°7.4074′E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 0), u"80°7′E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"179°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"179°0.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′E")
# test rounding does not create minutes >= 60
self.assertEqual(QgsCoordinateFormatter.formatX(99.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"100°0.00′E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
# should also be no directional suffix for 180 degree longitudes within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"179°59.99994′W")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"179°59.99994′E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"80°0.00′")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0.00006′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0.00006′")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"80°00.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′E")
def testFormatYDegreesMinutes(self):
"""Test formatting y as DM"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"20°0.00′N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"20°0.0000′N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"20°7.4074′N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 0), u"20°7′N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"89°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"89°0.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′N")
# test rounding does not create minutes >= 60
self.assertEqual(QgsCoordinateFormatter.formatY(79.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"80°0.00′N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"20°0.00′")
# test 0 latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# test near zero latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0.00006′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0.00006′")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"20°00.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′N")
def testFormatXDegrees(self):
"""Test formatting x as decimal degrees"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"80.00°E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"80.0000°E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"80.1235°E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 0), u"80°E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"179.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"179.00°E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
# should also be no directional suffix for 180 degree longitudes within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6), u"179.999999°W")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDecimalDegrees, 6), u"179.999999°E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"80.00°")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"0.000001°")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"-0.000001°")
def testFormatYDegrees(self):
"""Test formatting y as decimal degrees"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"20.00°N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"20.0000°N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"20.1235°N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 0), u"20°N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"89.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"89.00°N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"80.00°")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# test near zero latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"0.000001°")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"-0.000001°")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 2,592,582,362,035,366,000 | 88.818697 | 178 | 0.77099 | false |
mooncoindev/mooncoin | share/seeds/generate-seeds.py | 1 | 4298 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 44664)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 55664)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | 4,856,943,736,252,471,000 | 30.837037 | 98 | 0.576082 | false |
kubostech/kubos-cli | kubos/utils/status_spinner.py | 1 | 2295 | # Kubos SDK
# Copyright (C) 2016 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import time
class StatusSpinner(threading.Thread):
def __init__(self, interval):
threading.Thread.__init__(self)
self.interval = interval
self.stop_lock = threading.Lock()
self.stop_signal = False
def stop(self):
with self.stop_lock:
self.stop_signal = True
def run(self):
spinner = self.get_spinner()
while True:
sys.stdout.write("%s" % spinner.next())
sys.stdout.flush()
sys.stdout.write('\b')
with self.stop_lock:
if self.stop_signal:
return
time.sleep(self.interval)
def get_spinner(self):
while True:
for symbol in '|/-\\':
yield symbol
def start_spinner():
""" Start a status spinner that prints a spinning character to stdout.
This method starts a thread, and writes to stdout from that thread, so
using this method introduces concerns of thread safe access to stdout.
The spinner will lock stdout_lock when writing to stdout, and all
other writers to stdout should do the same to prevent interleaving
stdout output.
Returns the StatusSpinner object, to be later passed to
stop_spinner(spinner) when the spinner should stop.
"""
spinner = StatusSpinner(0.1)
spinner.daemon = True
spinner.start()
return spinner
def stop_spinner(spinner):
""" Stops the provided StatusSpinner.
This method blocks on the status spinner thread exiting, and the caller
can be guaranteed that the thread is terminated once this method returns.
"""
spinner.stop()
spinner.join()
| apache-2.0 | -2,279,987,537,932,540,400 | 29.197368 | 77 | 0.664052 | false |
andrei-karalionak/ggrc-core | test/selenium/src/lib/constants/element.py | 1 | 4723 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module containing element labels or properties e.g. date formatting"""
# pylint: disable=too-few-public-methods
from lib.constants import objects
# size of the header in px
SIZE_HEADER = 50
class LandingPage(object):
BUTTON_LOGIN = "Login"
PROGRAM_INFO_TAB = "Program Info"
class PageHeader(object):
# dropdown toggle
PROPLE_LIST_WIDGET = "Admin Dashboard"
class Lhn(object):
"""Labels in the LHN menu"""
class __metaclass__(type):
def __init__(self, *args):
for object_ in objects.ALL_PLURAL:
setattr(self, object_, object_.lower())
self.DIRECTIVES_MEMBERS = (
self.REGULATIONS,
self.POLICIES,
self.STANDARDS,
self.CONTRACTS,
self.CLAUSES,
self.SECTIONS)
self.CONTROLS_OR_OBJECTIVES_MEMBERS = (
self.CONTROLS,
self.OBJECTIVES)
self.PEOPLE_OR_GROUPS_MEMBERS = (
self.PEOPLE,
self.ORG_GROUPS,
self.VENDORS,
self.ACCESS_GROUPS)
self.ASSETS_OR_BUSINESS_MEMBERS = (
self.SYSTEMS,
self.PROCESSES,
self.DATA_ASSETS,
self.PRODUCTS,
self.PROJECTS,
self.FACILITIES,
self.MARKETS)
self.RISKS_OR_THREATS_MEMBERS = (
self.RISKS,
self.THREATS)
CONTROLS_OR_OBJECTIVES = "controls_or_objectives"
PEOPLE_OR_GROUPS = "people_or_groups"
ASSETS_OR_BUSINESS = "assets_or_business"
RISKS_OR_THREATS = "risks_or_threats"
class ModalLhnCreateProgram(object):
"""Modal for creating a Program."""
# create new program
DATE_FORMATTING = "%d/%m/%Y"
OBJECT_REVIEW = "Object Review"
PRIVATE_PROGRAM = "Private Program"
DESCRIPTION = "Description"
NOTES = "Notes"
MANAGER = "Manager"
PROGRAM_URL = "Program URL"
STATE = "State"
PRIMARY_CONTACT = "Primary Contact"
SECONDARY_CONTACT = "Secondary Contact"
REFERENCE_URL = "Reference URL"
CODE = "Code"
EFFECTIVE_DATE = "Effective Date"
STOP_DATE = "Stop Date"
class WidgetBar(object):
"""Labels specific for a generic widget bar"""
# identifier for the object's info page
INFO = "Info"
class __metaclass__(type):
def __init__(self, *args):
for object_ in objects.ALL_PLURAL:
setattr(self, object_, object_.lower())
class WidgetProgramInfo(object):
"""Labels specific to program info widget"""
SUBMIT_FOR_REVIEW = "Submit For Review"
# state in lhn_modal create new page
DRAFT = "Draft"
FINAL = "Final"
EFFECTIVE = "Effective"
INEFFECTIVE = "Ineffective"
LAUNCHED = "Launched"
NOT_LAUNCHED = "Not Launched"
IN_SCOPE = "In Scope"
NOT_IN_SCOPE = "Not in Scope"
DEPRECATED = "Deprecated"
# button settings dropdown elements
EDIT_PROGRAM = "Edit Program"
GET_PERMALINK = "Get permalink"
DELETE = "Delete"
BUTTON_SETTINGS_DROPDOWN_ITEMS = [EDIT_PROGRAM, GET_PERMALINK, DELETE]
ALERT_LINK_COPIED = "Link has been copied to your clipboard."
class AdminRolesWidget(object):
"""Role scopes for Roles widget at Admin dashboard"""
_SYS_SCOPE = "SYSTEM"
_PRG_SCOPE = "PRIVATE PROGRAM"
_WF_SCOPE = "WORKFLOW"
ROLE_SCOPE_ADMINISTRATOR = ("Administrator", "ADMIN")
ROLE_SCOPE_CREATOR = ("Creator", _SYS_SCOPE)
ROLE_SCOPE_EDITOR = ("Editor", _SYS_SCOPE)
ROLE_SCOPE_READER = ("Reader", _SYS_SCOPE)
ROLE_SCOPE_PROGRAM_EDITOR = ("Program Editor", _PRG_SCOPE)
ROLE_SCOPE_PROGRAM_MANAGER = ("Program Manager", _PRG_SCOPE)
ROLE_SCOPE_PROGRAM_READER = ("Program Reader", _PRG_SCOPE)
ROLE_SCOPE_WORKFLOW_MEMBER = ("Workflow Member", _WF_SCOPE)
ROLE_SCOPE_WORKFLOW_MANAGER = ("Workflow Manager", _WF_SCOPE)
ROLE_SCOPES_LIST = [ROLE_SCOPE_ADMINISTRATOR,
ROLE_SCOPE_CREATOR,
ROLE_SCOPE_EDITOR,
ROLE_SCOPE_PROGRAM_EDITOR,
ROLE_SCOPE_PROGRAM_MANAGER,
ROLE_SCOPE_PROGRAM_READER,
ROLE_SCOPE_READER,
ROLE_SCOPE_WORKFLOW_MEMBER,
ROLE_SCOPE_WORKFLOW_MANAGER]
ROLE_SCOPES_DICT = dict(ROLE_SCOPES_LIST)
class AdminEventsWidget(object):
"""Label and regular expression for Event widget at Admin dashboard"""
TREE_VIEW_HEADER = "Events"
TREE_VIEW_ROW_REGEXP = r"^.+\s(by.+)\son\s" + \
r"(\d{2}/\d{2}/\d{4}\s\d{2}:\d{2}:\d{2}\s[A,P]M)"
class AttributesTypes(object):
"""Possible types of Custom Attributes."""
TEXT = "Text"
RICH_TEXT = "Rich Text"
DATE = "Date"
CHECKBOX = "Checkbox"
DROPDOWN = "Dropdown"
PERSON = "Map:Person"
ALL_TYPES = (TEXT, RICH_TEXT, DATE, CHECKBOX, DROPDOWN, PERSON)
| apache-2.0 | -2,136,699,178,413,161,200 | 27.97546 | 78 | 0.638154 | false |
aio-libs/aiohttp-debugtoolbar | aiohttp_debugtoolbar/panels/traceback.py | 1 | 2027 | import re
from .base import DebugPanel
from ..tbtools.tbtools import Traceback
from ..utils import escape, APP_KEY, STATIC_ROUTE_NAME
from ..utils import ROOT_ROUTE_NAME
__all__ = ['TracebackPanel']
class TracebackPanel(DebugPanel):
name = 'Traceback'
template = 'traceback.jinja2'
title = 'Traceback'
nav_title = title
def __init__(self, request):
super().__init__(request)
self.exc_history = request.app[APP_KEY]['exc_history']
@property
def has_content(self):
if self._request.get('pdbt_tb'):
return True
return False
async def process_response(self, response):
if not self.has_content:
return
traceback = self._request['pdbt_tb']
exc = escape(traceback.exception)
summary = Traceback.render_summary(traceback, self._request.app,
include_title=False)
token = self.request.app[APP_KEY]['pdtb_token']
url = '' # self.request.route_url(EXC_ROUTE_NAME, _query=qs)
evalex = self.exc_history.eval_exc
self.data = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': None,
'title': exc,
'exception': exc,
'exception_type': escape(traceback.exception_type),
'summary': summary,
'plaintext': traceback.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext),
'traceback_id': traceback.id,
'token': token,
'url': url,
}
def render_content(self, request):
return super(TracebackPanel, self).render_content(request)
def render_vars(self, request):
static_path = self._request.app.router[STATIC_ROUTE_NAME] \
.url_for(filename='')
root_path = self.request.app.router[ROOT_ROUTE_NAME].url_for()
return {
'static_path': static_path,
'root_path': root_path
}
| apache-2.0 | -2,017,070,609,646,823,700 | 30.184615 | 72 | 0.568821 | false |
n0trax/ansible | lib/ansible/plugins/action/win_copy.py | 10 | 21791 | # This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import os.path
import tempfile
import traceback
import zipfile
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = os.path.realpath(filepath)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
r_files['files'].append(
{
"src": filepath,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, filepath)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp()
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w")
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_path = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp_path, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
original_basename=source_rel,
mode="single"
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy", module_args=copy_args, task_vars=task_vars)
self._remove_tmp_path(tmp_path)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars):
# create local zip file containing all the files and directories that
# need to be copied to the server
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
if self._play_context.check_mode:
module_return = dict(changed=True)
os.remove(zip_path)
os.removedirs(os.path.dirname(zip_path))
return module_return
# send zip file to remote, file must end in .zip so Com Shell.Application works
tmp_path = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp_path, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
mode="explode"
)
)
copy_args.pop('content', None)
os.remove(zip_path)
os.removedirs(os.path.dirname(zip_path))
module_return = self._execute_module(module_args=copy_args, task_vars=task_vars)
self._remove_tmp_path(tmp_path)
return module_return
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
follow = boolean(self._task.args.get('follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
mode="remote",
dest=dest,
src=source,
force=force
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, local_follow=follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source)
source_files['files'].append(
dict(
src=source,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks']
)
)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args, task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
copy_result = self._copy_single_file(file_src, dest, file_dest, task_vars)
result['changed'] = True
if copy_result.get('failed') is True:
result['failed'] = True
result['msg'] = "failed to copy file %s: %s" % (file_src, copy_result['msg'])
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'], source_files['directories'], task_vars))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content temp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
return result
| gpl-3.0 | 147,487,347,866,603,360 | 42.84507 | 115 | 0.558258 | false |
hagabbar/pycbc_copy | pycbc/events/stat.py | 1 | 19147 | # Copyright (C) 2016 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This modules contains functions for calculating coincident ranking
statistic values
"""
import numpy
from . import events
def get_newsnr(trigs):
"""
Calculate newsnr ('reweighted SNR') for a trigs object
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', and 'chisq' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'] - 2.
newsnr = events.newsnr(trigs['snr'], trigs['chisq'] / dof)
return numpy.array(newsnr, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto(trigs):
"""
Calculate newsnr re-weigthed by the sine-gaussian veto
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', and 'chisq' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'] - 2.
nsnr_sg = events.newsnr_sgveto(trigs['snr'], trigs['chisq'] / dof, trigs['sg_chisq'])
return numpy.array(nsnr_sg, ndmin=1, dtype=numpy.float32)
class Stat(object):
""" Base class which should be extended to provide a coincident statistic"""
def __init__(self, files):
"""Create a statistic class instance
Parameters
----------
files: list of strs
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
"""
import h5py
self.files = {}
for filename in files:
f = h5py.File(filename, 'r')
stat = f.attrs['stat']
self.files[stat] = f
# Provide the dtype of the single detector method's output
# This is used by background estimation codes that need to maintain
# a buffer of such values.
self.single_dtype = numpy.float32
class NewSNRStatistic(Stat):
""" Calculate the NewSNR coincident detection statistic """
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
Parameters
----------
trigs: dict of numpy.ndarrays
Returns
-------
numpy.ndarray
The array of single detector values
"""
return get_newsnr(trigs)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: (unused in this statistic)
step: (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
return (s0**2. + s1**2.) ** 0.5
class NewSNRSGStatistic(NewSNRStatistic):
""" Calculate the NewSNRSG coincident detection statistic """
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
Parameters
----------
trigs: dict of numpy.ndarrays
Returns
-------
numpy.ndarray
The array of single detector values
"""
return get_newsnr_sgveto(trigs)
class NetworkSNRStatistic(NewSNRStatistic):
"""Same as the NewSNR statistic, but just sum of squares of SNRs"""
def single(self, trigs):
return trigs['snr']
class NewSNRCutStatistic(NewSNRStatistic):
"""Same as the NewSNR statistic, but demonstrates a cut of the triggers"""
def single(self, trigs):
"""Calculate the single detector statistic.
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary of the single detector trigger information. 'chisq_dof',
'snr', and 'chisq' are required keys
Returns
-------
newsnr: numpy.ndarray
Array of single detector values
"""
newsnr = get_newsnr(trigs)
rchisq = trigs['chisq'] / (2. * trigs['chisq_dof'] - 2.)
newsnr[numpy.logical_and(newsnr < 10, rchisq > 2)] = -1
return newsnr
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: (unused in this statistic)
step: (unused in this statistic)
Returns
-------
cstat: numpy.ndarray
Array of coincident ranking statistic values
"""
cstat = (s0**2. + s1**2.) ** 0.5
cstat[s0==-1] = 0
cstat[s1==-1] = 0
return cstat
class PhaseTDStatistic(NewSNRStatistic):
"""Statistic that re-weights combined newsnr using coinc parameters.
The weighting is based on the PDF of time delays, phase differences and
amplitude ratios between triggers in different ifos.
"""
def __init__(self, files):
NewSNRStatistic.__init__(self, files)
self.hist = self.files['phasetd_newsnr']['map'][:]
#normalize so that peak has no effect on newsnr
self.hist = self.hist / float(self.hist.max())
self.hist = numpy.log(self.hist)
# Bin boundaries are stored in the hdf file
self.tbins = self.files['phasetd_newsnr']['tbins'][:]
self.pbins = self.files['phasetd_newsnr']['pbins'][:]
self.sbins = self.files['phasetd_newsnr']['sbins'][:]
self.rbins = self.files['phasetd_newsnr']['rbins'][:]
self.single_dtype = [('snglstat', numpy.float32),
('coa_phase', numpy.float32),
('end_time', numpy.float64),
('sigmasq', numpy.float32),
('snr', numpy.float32)]
def single(self, trigs):
"""
Calculate the single detector statistic and assemble other parameters
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq', 'coa_phase', 'end_time', and 'sigmasq'
are required keys.
Returns
-------
numpy.ndarray
Array of single detector parameter values
"""
sngl_stat = get_newsnr(trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase']
singles['end_time'] = trigs['end_time']
singles['sigmasq'] = trigs['sigmasq']
singles['snr'] = trigs['snr']
return numpy.array(singles, ndmin=1)
def logsignalrate(self, s0, s1, slide, step):
"""Calculate the normalized log rate density of signals via lookup"""
td = numpy.array(s0['end_time'] - s1['end_time'] - slide*step, ndmin=1)
pd = numpy.array((s0['coa_phase'] - s1['coa_phase']) % \
(2. * numpy.pi), ndmin=1)
rd = numpy.array((s0['sigmasq'] / s1['sigmasq']) ** 0.5, ndmin=1)
sn0 = numpy.array(s0['snr'], ndmin=1)
sn1 = numpy.array(s1['snr'], ndmin=1)
snr0 = sn0 * 1
snr1 = sn1 * 1
snr0[rd > 1] = sn1[rd > 1]
snr1[rd > 1] = sn0[rd > 1]
rd[rd > 1] = 1. / rd[rd > 1]
# Find which bin each coinc falls into
tv = numpy.searchsorted(self.tbins, td) - 1
pv = numpy.searchsorted(self.pbins, pd) - 1
s0v = numpy.searchsorted(self.sbins, snr0) - 1
s1v = numpy.searchsorted(self.sbins, snr1) - 1
rv = numpy.searchsorted(self.rbins, rd) - 1
# Enforce that points fits into the bin boundaries: if a point lies
# outside the boundaries it is pushed back to the nearest bin.
tv[tv < 0] = 0
tv[tv >= len(self.tbins) - 1] = len(self.tbins) - 2
pv[pv < 0] = 0
pv[pv >= len(self.pbins) - 1] = len(self.pbins) - 2
s0v[s0v < 0] = 0
s0v[s0v >= len(self.sbins) - 1] = len(self.sbins) - 2
s1v[s1v < 0] = 0
s1v[s1v >= len(self.sbins) - 1] = len(self.sbins) - 2
rv[rv < 0] = 0
rv[rv >= len(self.rbins) - 1] = len(self.rbins) - 2
return self.hist[tv, pv, s0v, s1v, rv]
def coinc(self, s0, s1, slide, step):
"""
Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: numpy.ndarray
Array of ints. These represent the multiple of the timeslide
interval to bring a pair of single detector triggers into coincidence.
step: float
The timeslide interval in seconds.
Returns
-------
coinc_stat: numpy.ndarray
An array of the coincident ranking statistic values
"""
rstat = s0['snglstat']**2. + s1['snglstat']**2.
cstat = rstat + 2. * self.logsignalrate(s0, s1, slide, step)
cstat[cstat < 0] = 0
return cstat ** 0.5
class ExpFitStatistic(NewSNRStatistic):
"""Detection statistic using an exponential falloff noise model.
Statistic approximates the negative log noise coinc rate density per
template over single-ifo newsnr values.
"""
def __init__(self, files):
if not len(files):
raise RuntimeError("Can't find any statistic files !")
NewSNRStatistic.__init__(self, files)
# the stat file attributes are hard-coded as '%{ifo}-fit_coeffs'
parsed_attrs = [f.split('-') for f in self.files.keys()]
self.ifos = [at[0] for at in parsed_attrs if
(len(at) == 2 and at[1] == 'fit_coeffs')]
if not len(self.ifos):
raise RuntimeError("None of the statistic files has the required "
"attribute called {ifo}-fit_coeffs !")
self.fits_by_tid = {}
self.alphamax = {}
for i in self.ifos:
self.fits_by_tid[i] = self.assign_fits(i)
self.get_ref_vals(i)
self.get_newsnr = get_newsnr
def assign_fits(self, ifo):
coeff_file = self.files[ifo+'-fit_coeffs']
template_id = coeff_file['template_id'][:]
alphas = coeff_file['fit_coeff'][:]
lambdas = coeff_file['count_above_thresh'][:]
# the template_ids and fit coeffs are stored in an arbitrary order
# create new arrays in template_id order for easier recall
tid_sort = numpy.argsort(template_id)
return {'alpha':alphas[tid_sort], 'lambda':lambdas[tid_sort],
'thresh':coeff_file.attrs['stat_threshold']}
def get_ref_vals(self, ifo):
self.alphamax[ifo] = self.fits_by_tid[ifo]['alpha'].max()
def find_fits(self, trigs):
"""Get fit coeffs for a specific ifo and template id"""
tnum = trigs.template_num
# fits_by_tid is a dictionary of dictionaries of arrays
# indexed by ifo / coefficient name / template_id
alphai = self.fits_by_tid[trigs.ifo]['alpha'][tnum]
lambdai = self.fits_by_tid[trigs.ifo]['lambda'][tnum]
thresh = self.fits_by_tid[trigs.ifo]['thresh']
return alphai, lambdai, thresh
def lognoiserate(self, trigs):
"""
Calculate the log noise rate density over single-ifo newsnr
Read in single trigger information, make the newsnr statistic
and rescale by the fitted coefficients alpha and lambda
"""
alphai, lambdai, thresh = self.find_fits(trigs)
newsnr = self.get_newsnr(trigs)
# alphai is constant of proportionality between single-ifo newsnr and
# negative log noise likelihood in given template
# lambdai is rate of trigs in given template compared to average
# thresh is stat threshold used in given ifo
lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \
numpy.log(lambdai)
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
def single(self, trigs):
"""Single-detector statistic, here just equal to the log noise rate"""
return self.lognoiserate(trigs)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the final coinc ranking statistic"""
# Approximate log likelihood ratio by summing single-ifo negative
# log noise likelihoods
loglr = - s0 - s1
# add squares of threshold stat values via idealized Gaussian formula
threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos]
loglr += sum([t**2. / 2. for t in threshes])
# convert back to a coinc-SNR-like statistic
# via log likelihood ratio \propto rho_c^2 / 2
return (2. * loglr) ** 0.5
class ExpFitCombinedSNR(ExpFitStatistic):
"""Reworking of ExpFitStatistic designed to resemble network SNR
Use a monotonic function of the negative log noise rate density which
approximates combined (new)snr for coincs with similar newsnr in each ifo
"""
def __init__(self, files):
ExpFitStatistic.__init__(self, files)
# for low-mass templates the exponential slope alpha \approx 6
self.alpharef = 6.
def use_alphamax(self):
# take reference slope as the harmonic mean of individual ifo slopes
inv_alphas = [1./self.alphamax[i] for i in self.ifos]
self.alpharef = (sum(inv_alphas)/len(inv_alphas))**-1
print self.alpharef
def single(self, trigs):
logr_n = self.lognoiserate(trigs)
_, _, thresh = self.find_fits(trigs)
# shift by log of reference slope alpha
logr_n += -1. * numpy.log(self.alpharef)
# add threshold and rescale by reference slope
stat = thresh - (logr_n / self.alpharef)
return numpy.array(stat, ndmin=1, dtype=numpy.float32)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
# scale by 1/sqrt(2) to resemble network SNR
return (s0 + s1) / (2.**0.5)
class PhaseTDExpFitStatistic(PhaseTDStatistic, ExpFitCombinedSNR):
"""Statistic combining exponential noise model with signal histogram PDF"""
def __init__(self, files):
# read in both foreground PDF and background fit info
ExpFitCombinedSNR.__init__(self, files)
# need the self.single_dtype value from PhaseTDStatistic
PhaseTDStatistic.__init__(self, files)
def single(self, trigs):
# same single-ifo stat as ExpFitCombinedSNR
sngl_stat = ExpFitCombinedSNR.single(self, trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase']
singles['end_time'] = trigs['end_time']
singles['sigmasq'] = trigs['sigmasq']
singles['snr'] = trigs['snr']
return numpy.array(singles, ndmin=1)
def coinc(self, s0, s1, slide, step):
# logsignalrate function inherited from PhaseTDStatistic
logr_s = self.logsignalrate(s0, s1, slide, step)
# rescale by ExpFitCombinedSNR reference slope as for sngl stat
cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef
# cut off underflowing and very small values
cstat[cstat < 8.] = 8.
# scale to resemble network SNR
return cstat / (2.**0.5)
class PhaseTDExpFitSGStatistic(PhaseTDExpFitStatistic):
"""Statistic combining exponential noise model with signal histogram PDF
and adding the sine-Gaussian veto to the single detector ranking
"""
def __init__(self, files):
PhaseTDExpFitStatistic.__init__(self, files)
self.get_newsnr = get_newsnr_sgveto
class MaxContTradNewSNRStatistic(NewSNRStatistic):
"""Combination of NewSNR with the power chisq and auto chisq"""
def single(self, trigs):
""" Calculate the single detector statistic.
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary of the single detector trigger information. 'chisq_dof',
'snr', 'cont_chisq', 'cont_chisq_dof', and 'chisq' are required arrays
for this statistic.
Returns
-------
stat: numpy.ndarray
The array of single detector values
"""
chisq_newsnr = get_newsnr(trigs)
rautochisq = trigs['cont_chisq'] / trigs['cont_chisq_dof']
autochisq_newsnr = events.newsnr(trigs['snr'], rautochisq)
return numpy.array(numpy.minimum(chisq_newsnr, autochisq_newsnr,
dtype=numpy.float32), ndmin=1, copy=False)
statistic_dict = {
'newsnr': NewSNRStatistic,
'network_snr': NetworkSNRStatistic,
'newsnr_cut': NewSNRCutStatistic,
'phasetd_newsnr': PhaseTDStatistic,
'exp_fit_stat': ExpFitStatistic,
'exp_fit_csnr': ExpFitCombinedSNR,
'phasetd_exp_fit_stat': PhaseTDExpFitStatistic,
'max_cont_trad_newsnr': MaxContTradNewSNRStatistic,
'phasetd_exp_fit_stat_sgveto': PhaseTDExpFitSGStatistic,
'newsnr_sgveto': NewSNRSGStatistic
}
def get_statistic(stat):
"""
Error-handling sugar around dict lookup
Parameters
----------
stat : string
Name of the statistic
Returns
-------
class
Subclass of Stat base class
Raises
------
RuntimeError
If the string is not recognized as corresponding to a Stat subclass
"""
try:
return statistic_dict[stat]
except KeyError:
raise RuntimeError('%s is not an available detection statistic' % stat)
| gpl-3.0 | -6,008,263,459,739,687,000 | 33.876138 | 89 | 0.604429 | false |
brad-kaiser/spark | python/pyspark/sql/context.py | 10 | 21640 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since, _NoValue
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.udf import UDFRegistration
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return self.sparkSession.udf
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@since(1.2)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 2,975,415,205,048,406,000 | 38.345455 | 100 | 0.629852 | false |
DraXus/andaluciapeople | tracking/models.py | 1 | 2276 | from django.db import models
from django.contrib.auth.models import User
from tracking import utils
from datetime import datetime, timedelta
class VisitorManager(models.Manager):
def active(self, timeout=None):
if not timeout:
timeout = utils.get_timeout()
now = datetime.now()
cutoff = now - timedelta(minutes=timeout)
return self.get_query_set().filter(last_update__gte=cutoff)
class Visitor(models.Model):
session_key = models.CharField(max_length=40)
ip_address = models.CharField(max_length=20)
user = models.ForeignKey(User, null=True)
user_agent = models.CharField(max_length=255)
referrer = models.CharField(max_length=255)
url = models.CharField(max_length=255)
page_views = models.PositiveIntegerField(default=0)
session_start = models.DateTimeField()
last_update = models.DateTimeField()
objects = VisitorManager()
def _time_on_site(self):
if self.session_start:
seconds = (self.last_update - self.session_start).seconds
hours = seconds / 3600
seconds -= hours * 3600
minutes = seconds / 60
seconds -= minutes * 60
return u'%i:%02i:%02i' % (hours, minutes, seconds)
else:
return u'unknown'
time_on_site = property(_time_on_site)
class Meta:
ordering = ['-last_update']
unique_together = ['session_key', 'ip_address']
class UntrackedUserAgent(models.Model):
keyword = models.CharField(max_length=100, help_text='Part or all of a user-agent string. For example, "Googlebot" here will be found in "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" and that visitor will not be tracked.')
def __unicode__(self):
return self.keyword
class Meta:
ordering = ['keyword']
verbose_name = 'Untracked User-Agent'
verbose_name_plural = 'Untracked User-Agents'
class BannedIP(models.Model):
ip_address = models.IPAddressField('IP Address', help_text='The IP address that should be banned')
def __unicode__(self):
return self.ip_address
class Meta:
ordering = ['ip_address']
verbose_name = 'Banned IP'
verbose_name_plural = 'Banned IPs' | agpl-3.0 | 5,725,694,590,756,589,000 | 32.985075 | 256 | 0.650264 | false |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/IPython/core/tests/test_pylabtools.py | 2 | 7728 | """Tests for pylab tools module.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import UnsupportedOperation, BytesIO
import matplotlib
matplotlib.use('Agg')
from matplotlib.figure import Figure
from nose import SkipTest
import nose.tools as nt
from matplotlib import pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.display import _PNG, _JPEG
from .. import pylabtools as pt
from IPython.testing import decorators as dec
def test_figure_to_svg():
# simple empty-figure test
fig = plt.figure()
nt.assert_equal(pt.print_figure(fig, 'svg'), None)
plt.close('all')
# simple check for at least svg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
svg = pt.print_figure(fig, 'svg')[:100].lower()
nt.assert_in(u'doctype svg', svg)
def _check_pil_jpeg_bytes():
"""Skip if PIL can't write JPEGs to BytesIO objects"""
# PIL's JPEG plugin can't write to BytesIO objects
# Pillow fixes this
from PIL import Image
buf = BytesIO()
img = Image.new("RGB", (4,4))
try:
img.save(buf, 'jpeg')
except Exception as e:
ename = e.__class__.__name__
raise SkipTest("PIL can't write JPEG to BytesIO: %s: %s" % (ename, e))
@dec.skip_without("PIL.Image")
def test_figure_to_jpeg():
_check_pil_jpeg_bytes()
# simple check for at least jpeg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
jpeg = pt.print_figure(fig, 'jpeg', pil_kwargs={'optimize': 50})[:100].lower()
assert jpeg.startswith(_JPEG)
def test_retina_figure():
# simple empty-figure test
fig = plt.figure()
nt.assert_equal(pt.retina_figure(fig), None)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
png, md = pt.retina_figure(fig)
assert png.startswith(_PNG)
nt.assert_in('width', md)
nt.assert_in('height', md)
_fmt_mime_map = {
'png': 'image/png',
'jpeg': 'image/jpeg',
'pdf': 'application/pdf',
'retina': 'image/png',
'svg': 'image/svg+xml',
}
def test_select_figure_formats_str():
ip = get_ipython()
for fmt, active_mime in _fmt_mime_map.items():
pt.select_figure_formats(ip, fmt)
for mime, f in ip.display_formatter.formatters.items():
if mime == active_mime:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_kwargs():
ip = get_ipython()
kwargs = dict(quality=10, bbox_inches='tight')
pt.select_figure_formats(ip, 'png', **kwargs)
formatter = ip.display_formatter.formatters['image/png']
f = formatter.lookup_by_type(Figure)
cell = f.__closure__[0].cell_contents
nt.assert_equal(cell, kwargs)
# check that the formatter doesn't raise
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
formatter.enabled = True
png = formatter(fig)
assert png.startswith(_PNG)
def test_select_figure_formats_set():
ip = get_ipython()
for fmts in [
{'png', 'svg'},
['png'],
('jpeg', 'pdf', 'retina'),
{'svg'},
]:
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_bad():
ip = get_ipython()
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, 'foo')
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, {'png', 'foo'})
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, ['retina', 'pdf', 'bar', 'bad'])
def test_import_pylab():
ns = {}
pt.import_pylab(ns, import_all=False)
nt.assert_true('plt' in ns)
nt.assert_equal(ns['np'], np)
class TestPylabSwitch(object):
class Shell(InteractiveShell):
def enable_gui(self, gui):
pass
def setup(self):
import matplotlib
def act_mpl(backend):
matplotlib.rcParams['backend'] = backend
# Save rcParams since they get modified
self._saved_rcParams = matplotlib.rcParams
self._saved_rcParamsOrig = matplotlib.rcParamsOrig
matplotlib.rcParams = dict(backend='Qt4Agg')
matplotlib.rcParamsOrig = dict(backend='Qt4Agg')
# Mock out functions
self._save_am = pt.activate_matplotlib
pt.activate_matplotlib = act_mpl
self._save_ip = pt.import_pylab
pt.import_pylab = lambda *a,**kw:None
self._save_cis = pt.configure_inline_support
pt.configure_inline_support = lambda *a,**kw:None
def teardown(self):
pt.activate_matplotlib = self._save_am
pt.import_pylab = self._save_ip
pt.configure_inline_support = self._save_cis
import matplotlib
matplotlib.rcParams = self._saved_rcParams
matplotlib.rcParamsOrig = self._saved_rcParamsOrig
def test_qt(self):
s = self.Shell()
gui, backend = s.enable_matplotlib(None)
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib()
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline_twice(self):
"Using '%matplotlib inline' twice should not reset formatters"
ip = self.Shell()
gui, backend = ip.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
fmts = {'png'}
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
gui, backend = ip.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_qt_gtk(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('gtk')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_no_gui_backends():
for k in ['agg', 'svg', 'pdf', 'ps']:
assert k not in pt.backend2gui
def test_figure_no_canvas():
fig = Figure()
fig.canvas = None
pt.print_figure(fig)
| gpl-2.0 | 3,916,106,620,152,399,000 | 29.1875 | 82 | 0.608178 | false |
wido/cloudstack | test/integration/smoke/test_certauthority_root.py | 2 | 9341 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from OpenSSL.crypto import FILETYPE_PEM, verify, X509
PUBKEY_VERIFY=True
try:
from OpenSSL.crypto import load_publickey
except ImportError:
PUBKEY_VERIFY=False
class TestCARootProvider(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestCARootProvider, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.cleanup = []
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def getUpSystemVMHosts(self, hostId=None):
hosts = list_hosts(
self.apiclient,
type='SecondaryStorageVM',
state='Up',
resourcestate='Enabled',
id=hostId
)
return hosts
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_list_ca_providers(self):
"""
Tests default ca providers list
"""
cmd = listCAProviders.listCAProvidersCmd()
response = self.apiclient.listCAProviders(cmd)
self.assertEqual(len(response), 1)
self.assertEqual(response[0].name, 'root')
def getCaCertificate(self):
cmd = listCaCertificate.listCaCertificateCmd()
cmd.provider = 'root'
response = self.apiclient.listCaCertificate(cmd)
return response.cacertificates.certificate
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_list_ca_certificate(self):
"""
Tests the ca certificate
"""
certificate = self.getCaCertificate()
self.assertTrue(len(certificate) > 0)
cert = x509.load_pem_x509_certificate(str(certificate), default_backend())
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.issuer.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'ca.cloudstack.apache.org')
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_issue_certificate_without_csr(self):
"""
Tests issuance of a certificate
"""
cmd = issueCertificate.issueCertificateCmd()
cmd.domain = 'apache.org,cloudstack.apache.org'
cmd.ipaddress = '10.1.1.1,10.2.2.2'
cmd.provider = 'root'
response = self.apiclient.issueCertificate(cmd)
self.assertTrue(len(response.privatekey) > 0)
self.assertTrue(len(response.cacertificates) > 0)
self.assertTrue(len(response.certificate) > 0)
cert = x509.load_pem_x509_certificate(str(response.certificate), default_backend())
# Validate basic certificate attributes
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'apache.org')
# Validate alternative names
altNames = cert.extensions.get_extension_for_oid(x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
for domain in cmd.domain.split(','):
self.assertTrue(domain in altNames.value.get_values_for_type(x509.DNSName))
for address in cmd.ipaddress.split(','):
self.assertTrue(address in map(lambda x: str(x), altNames.value.get_values_for_type(x509.IPAddress)))
# Validate certificate against CA public key
global PUBKEY_VERIFY
if not PUBKEY_VERIFY:
return
caCert = x509.load_pem_x509_certificate(str(self.getCaCertificate()), default_backend())
x = X509()
x.set_pubkey(load_publickey(FILETYPE_PEM, str(caCert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo))))
verify(x, cert.signature, cert.tbs_certificate_bytes, cert.signature_hash_algorithm.name)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_issue_certificate_with_csr(self):
"""
Tests issuance of a certificate
"""
cmd = issueCertificate.issueCertificateCmd()
cmd.csr = "-----BEGIN CERTIFICATE REQUEST-----\nMIIBHjCByQIBADBkMQswCQYDVQQGEwJJTjELMAkGA1UECAwCSFIxETAPBgNVBAcM\nCEd1cnVncmFtMQ8wDQYDVQQKDAZBcGFjaGUxEzARBgNVBAsMCkNsb3VkU3RhY2sx\nDzANBgNVBAMMBnYtMS1WTTBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQD46KFWKYrJ\nF43Y1oqWUfrl4mj4Qm05Bgsi6nuigZv7ufiAKK0nO4iJKdRa2hFMUvBi2/bU3IyY\nNvg7cdJsn4K9AgMBAAGgADANBgkqhkiG9w0BAQUFAANBAIta9glu/ZSjA/ncyXix\nyDOyAKmXXxsRIsdrEuIzakUuJS7C8IG0FjUbDyIaiwWQa5x+Lt4oMqCmpNqRzaGP\nfOo=\n-----END CERTIFICATE REQUEST-----"
cmd.provider = 'root'
response = self.apiclient.issueCertificate(cmd)
self.assertTrue(response.privatekey is None)
self.assertTrue(len(response.cacertificates) > 0)
self.assertTrue(len(response.certificate) > 0)
cert = x509.load_pem_x509_certificate(str(response.certificate), default_backend())
# Validate basic certificate attributes
self.assertEqual(cert.signature_hash_algorithm.name, 'sha256')
self.assertEqual(cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[0].value, 'v-1-VM')
# Validate certificate against CA public key
global PUBKEY_VERIFY
if not PUBKEY_VERIFY:
return
caCert = x509.load_pem_x509_certificate(str(self.getCaCertificate()), default_backend())
x = X509()
x.set_pubkey(load_publickey(FILETYPE_PEM, str(caCert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo))))
verify(x, cert.signature, cert.tbs_certificate_bytes, cert.signature_hash_algorithm.name)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_revoke_certificate(self):
"""
Tests certificate revocation
"""
cmd = revokeCertificate.revokeCertificateCmd()
cmd.serial = 'abc123' # hex value
cmd.cn = 'example.com'
cmd.provider = 'root'
self.dbclient.execute("delete from crl where serial='%s'" % cmd.serial)
response = self.apiclient.revokeCertificate(cmd)
self.assertTrue(response.success)
crl = self.dbclient.execute("select serial, cn from crl where serial='%s'" % cmd.serial)[0]
self.assertEqual(crl[0], cmd.serial)
self.assertEqual(crl[1], cmd.cn)
@attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False)
def test_provision_certificate(self):
"""
Tests certificate provisioning
"""
hosts = self.getUpSystemVMHosts()
if not hosts or len(hosts) < 1:
raise self.skipTest("No Up systemvm hosts found, skipping test")
host = hosts[0]
cmd = provisionCertificate.provisionCertificateCmd()
cmd.hostid = host.id
cmd.reconnect = True
cmd.provider = 'root'
response = self.apiclient.provisionCertificate(cmd)
self.assertTrue(response.success)
if self.hypervisor.lower() == 'simulator':
hosts = self.getUpSystemVMHosts(host.id)
self.assertTrue(hosts is None or len(hosts) == 0)
else:
def checkHostIsUp(hostId):
hosts = self.getUpSystemVMHosts(host.id)
return (hosts is not None), hosts
result, hosts = wait_until(1, 30, checkHostIsUp, host.id)
if result:
self.assertTrue(len(hosts) == 1)
else:
self.fail("Failed to have systemvm host in Up state after cert provisioning")
| apache-2.0 | -6,341,699,366,786,309,000 | 39.790393 | 492 | 0.675302 | false |
hannesstockner/samza | samza-test/src/main/python/samza_job_yarn_deployer.py | 15 | 12921 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import logging
import json
import requests
import shutil
import tarfile
import zopkio.constants as constants
import zopkio.runtime as runtime
import templates
from subprocess import PIPE, Popen
from zopkio.deployer import Deployer, Process
from zopkio.remote_host_helper import better_exec_command, DeploymentError, get_sftp_client, get_ssh_client, open_remote_file
logger = logging.getLogger(__name__)
class SamzaJobYarnDeployer(Deployer):
def __init__(self, configs={}):
"""
Instantiates a Samza job deployer that uses run-job.sh and kill-yarn-job.sh
to start and stop Samza jobs in a YARN grid.
param: configs -- Map of config key/values pairs. These configs will be used
as a default whenever overrides are not provided in the methods (install,
start, stop, etc) below.
"""
logging.getLogger("paramiko").setLevel(logging.ERROR)
# map from job_id to app_id
self.username = runtime.get_username()
self.password = runtime.get_password()
self.app_ids = {}
self.default_configs = configs
Deployer.__init__(self)
def install(self, package_id, configs={}):
"""
Installs a package (tarball, or zip) on to a list of remote hosts by
SFTP'ing the package to the remote install_path.
param: package_id -- A unique ID used to identify an installed YARN package.
param: configs -- Map of config key/values pairs. Valid keys include:
yarn_site_template: Jinja2 yarn-site.xml template local path.
yarn_driver_configs: Key/value pairs to be injected into the yarn-site.xml template.
yarn_nm_hosts: A list of YARN NM hosts to install the package onto.
install_path: An absolute path where the package will be installed.
executable: A local path pointing to the location of the package that should be installed on remote hosts.
"""
configs = self._get_merged_configs(configs)
self._validate_configs(configs, ['yarn_site_template', 'yarn_driver_configs', 'yarn_nm_hosts', 'install_path', 'executable'])
# Get configs.
nm_hosts = configs.get('yarn_nm_hosts')
install_path = configs.get('install_path')
executable = configs.get('executable')
# FTP and decompress job tarball to all NMs.
exec_file_location = os.path.join(install_path, self._get_package_tgz_name(package_id))
exec_file_install_path = os.path.join(install_path, package_id)
for host in nm_hosts:
logger.info('Deploying {0} on host: {1}'.format(package_id, host))
with get_ssh_client(host, self.username, self.password) as ssh:
better_exec_command(ssh, "mkdir -p {0}".format(install_path), "Failed to create path: {0}".format(install_path))
with get_sftp_client(host, self.username, self.password) as ftp:
def progress(transferred_bytes, total_bytes_to_transfer):
logger.debug("{0} of {1} bytes transferred.".format(transferred_bytes, total_bytes_to_transfer))
ftp.put(executable, exec_file_location, callback=progress)
# Extract archive locally so we can use run-job.sh.
executable_tgz = tarfile.open(executable, 'r:gz')
executable_tgz.extractall(package_id)
# Generate yarn-site.xml install it in package's local 'config' directory.
yarn_site_dir = self._get_yarn_conf_dir(package_id)
yarn_site_path = os.path.join(yarn_site_dir, 'yarn-site.xml')
logger.info("Installing yarn-site.xml to {0}".format(yarn_site_path))
if not os.path.exists(yarn_site_dir):
os.makedirs(yarn_site_dir)
templates.render_config(configs.get('yarn_site_template'), yarn_site_path, configs.get('yarn_driver_configs'))
def start(self, job_id, configs={}):
"""
Starts a Samza job using the bin/run-job.sh script.
param: job_id -- A unique ID used to idenitfy a Samza job. Job IDs are associated
with a package_id, and a config file.
param: configs -- Map of config key/values pairs. Valid keys include:
package_id: The package_id for the package that contains the code for job_id.
Usually, the package_id refers to the .tgz job tarball that contains the
code necessary to run job_id.
config_factory: The config factory to use to decode the config_file.
config_file: Path to the config file for the job to be run.
install_path: Path where the package for the job has been installed on remote NMs.
properties: (optional) [(property-name,property-value)] Optional override
properties for the run-job.sh script. These properties override the
config_file's properties.
"""
configs = self._get_merged_configs(configs)
self._validate_configs(configs, ['package_id', 'config_factory', 'config_file', 'install_path'])
# Get configs.
package_id = configs.get('package_id')
config_factory = configs.get('config_factory')
config_file = configs.get('config_file')
install_path = configs.get('install_path')
properties = configs.get('properties', {})
properties['yarn.package.path'] = 'file:' + os.path.join(install_path, self._get_package_tgz_name(package_id))
# Execute bin/run-job.sh locally from driver machine.
command = "{0} --config-factory={1} --config-path={2}".format(os.path.join(package_id, "bin/run-job.sh"), config_factory, os.path.join(package_id, config_file))
env = self._get_env_vars(package_id)
for property_name, property_value in properties.iteritems():
command += " --config {0}={1}".format(property_name, property_value)
p = Popen(command.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
output, err = p.communicate()
logger.debug("Output from run-job.sh:\nstdout: {0}\nstderr: {1}".format(output, err))
assert p.returncode == 0, "Command ({0}) returned non-zero exit code ({1}).\nstdout: {2}\nstderr: {3}".format(command, p.returncode, output, err)
# Save application_id for job_id so we can kill the job later.
regex = r'.*Submitted application (\w*)'
match = re.match(regex, output.replace("\n", ' '))
assert match, "Job ({0}) appears not to have started. Expected to see a log line matching regex: {1}".format(job_id, regex)
app_id = match.group(1)
logger.debug("Got application_id {0} for job_id {1}.".format(app_id, job_id))
self.app_ids[job_id] = app_id
def stop(self, job_id, configs={}):
"""
Stops a Samza job using the bin/kill-yarn-job.sh script.
param: job_id -- A unique ID used to idenitfy a Samza job.
param: configs -- Map of config key/values pairs. Valid keys include:
package_id: The package_id for the package that contains the code for job_id.
Usually, the package_id refers to the .tgz job tarball that contains the
code necessary to run job_id.
"""
configs = self._get_merged_configs(configs)
self._validate_configs(configs, ['package_id'])
# Get configs.
package_id = configs.get('package_id')
# Get the application_id for the job.
application_id = self.app_ids.get(job_id)
# Kill the job, if it's been started, or WARN and return if it's hasn't.
if not application_id:
logger.warn("Can't stop a job that was never started: {0}".format(job_id))
else:
command = "{0} {1}".format(os.path.join(package_id, "bin/kill-yarn-job.sh"), application_id)
env = self._get_env_vars(package_id)
p = Popen(command.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
p.wait()
assert p.returncode == 0, "Command returned non-zero exit code ({0}): {1}".format(p.returncode, command)
def await(self, job_id, configs={}):
"""
Waits for a Samza job to finish using bin/stat-yarn-job.sh. A job is
finished when its "Final State" is not "UNDEFINED".
param: job_id -- A unique ID used to idenitfy a Samza job.
param: configs -- Map of config key/values pairs. Valid keys include:
package_id: The package_id for the package that contains the code for job_id.
Usually, the package_id refers to the .tgz job tarball that contains the
code necessary to run job_id.
"""
configs = self._get_merged_configs(configs)
self._validate_configs(configs, ['package_id'])
# Get configs.
package_id = configs.get('package_id')
# Get the application_id for the job.
application_id = self.app_ids.get(job_id)
# Stat the job, if it's been started, or WARN and return if it's hasn't.
final_state = 'UNDEFINED'
if not application_id:
logger.warn("Can't stat a job that was never started: {0}".format(job_id))
else:
command = "{0} {1}".format(os.path.join(package_id, "bin/stat-yarn-job.sh"), application_id)
env = self._get_env_vars(package_id)
while final_state == 'UNDEFINED':
p = Popen(command.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
output, err = p.communicate()
logger.debug("Output from run-job.sh:\nstdout: {0}\nstderr: {1}".format(output, err))
assert p.returncode == 0, "Command ({0}) returned non-zero exit code ({1}).\nstdout: {2}\nstderr: {3}".format(command, p.returncode, output, err)
# Check the final state for the job.
regex = r'.*Final.State . (\w*)'
match = re.match(regex, output.replace("\n", ' '))
final_state = match.group(1)
logger.debug("Got final state {0} for job_id {1}.".format(final_state, job_id))
return final_state
def uninstall(self, package_id, configs={}):
"""
Removes the install path for package_id from all remote hosts that it's been
installed on.
param: package_id -- A unique ID used to identify an installed YARN package.
param: configs -- Map of config key/values pairs. Valid keys include:
yarn_nm_hosts: A list of hosts that package was installed on.
install_path: Path where the package for the job has been installed on remote NMs.
"""
configs = self._get_merged_configs(configs)
self._validate_configs(configs, ['yarn_nm_hosts', 'install_path'])
# Get configs.
nm_hosts = configs.get('yarn_nm_hosts')
install_path = configs.get('install_path')
# Delete job package on all NMs.
for host in nm_hosts:
with get_ssh_client(host, self.username, self.password) as ssh:
better_exec_command(ssh, "rm -rf {0}".format(install_path), "Failed to remove {0}".format(install_path))
# Delete job pacakge directory from local driver box.
shutil.rmtree(package_id)
# TODO we should implement the below helper methods over time, as we need them.
def get_processes(self):
# TODO raise NotImplementedError
return []
def get_pid(self, container_id, configs={}):
raise NotImplementedError
def get_host(self, container_id):
raise NotImplementedError
def get_containers(self, job_id):
raise NotImplementedError
def get_jobs(self):
raise NotImplementedError
def sleep(self, container_id, delay, configs={}):
raise NotImplementedError
def pause(self, container_id, configs={}):
raise NotImplementedError
def resume(self, container_id, configs={}):
raise NotImplementedError
def kill(self, container_id, configs={}):
raise NotImplementedError
def terminate(self, container_id, configs={}):
raise NotImplementedError
def get_logs(self, container_id, logs, directory):
raise NotImplementedError
def _validate_configs(self, configs, config_keys):
for required_config in config_keys:
assert configs.get(required_config), 'Required config is undefined: {0}'.format(required_config)
def _get_merged_configs(self, configs):
tmp = self.default_configs.copy()
tmp.update(configs)
return tmp
def _get_package_tgz_name(self, package_id):
return '{0}.tgz'.format(package_id)
def _get_yarn_home_dir(self, package_id):
return os.path.abspath(package_id)
def _get_yarn_conf_dir(self, package_id):
return os.path.join(self._get_yarn_home_dir(package_id), 'config')
def _get_env_vars(self, package_id):
env = os.environ.copy()
env['YARN_CONF_DIR'] = self._get_yarn_conf_dir(package_id)
env['HADOOP_CONF_DIR'] = env['YARN_CONF_DIR']
logger.debug('Built environment: {0}'.format(env))
return env
| apache-2.0 | 690,226,025,822,370,300 | 41.643564 | 164 | 0.688337 | false |
AnotherIvan/calibre | src/calibre/ebooks/epub/periodical.py | 14 | 6111 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from uuid import uuid4
import time
from calibre.constants import __appname__, __version__
from calibre import strftime, prepare_string_for_xml as xml
from calibre.utils.date import parse_date
SONY_METADATA = u'''\
<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:prs="http://xmlns.sony.net/e-book/prs/">
<rdf:Description rdf:about="">
<dc:title>{title}</dc:title>
<dc:publisher>{publisher}</dc:publisher>
<dcterms:alternative>{short_title}</dcterms:alternative>
<dcterms:issued>{issue_date}</dcterms:issued>
<dc:language>{language}</dc:language>
<dcterms:conformsTo rdf:resource="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0"/>
<dcterms:type rdf:resource="http://xmlns.sony.net/e-book/prs/datatype/newspaper"/>
<dcterms:type rdf:resource="http://xmlns.sony.net/e-book/prs/datatype/periodical"/>
</rdf:Description>
</rdf:RDF>
'''
SONY_ATOM = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:prs="http://xmlns.sony.net/e-book/prs/"
xmlns:media="http://video.search.yahoo.com/mrss"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<title>{short_title}</title>
<updated>{updated}</updated>
<id>{id}</id>
{entries}
</feed>
'''
SONY_ATOM_SECTION = u'''\
<entry rdf:ID="{title}">
<title>{title}</title>
<link href="{href}"/>
<id>{id}</id>
<updated>{updated}</updated>
<summary>{desc}</summary>
<category term="{short_title}/{title}"
scheme="http://xmlns.sony.net/e-book/terms/" label="{title}"/>
<dc:type xsi:type="prs:datatype">newspaper/section</dc:type>
<dcterms:isReferencedBy rdf:resource=""/>
</entry>
'''
SONY_ATOM_ENTRY = u'''\
<entry>
<title>{title}</title>
<author><name>{author}</name></author>
<link href="{href}"/>
<id>{id}</id>
<updated>{updated}</updated>
<summary>{desc}</summary>
<category term="{short_title}/{section_title}"
scheme="http://xmlns.sony.net/e-book/terms/" label="{section_title}"/>
<dcterms:extent xsi:type="prs:word-count">{word_count}</dcterms:extent>
<dc:type xsi:type="prs:datatype">newspaper/article</dc:type>
<dcterms:isReferencedBy rdf:resource="#{section_title}"/>
</entry>
'''
def sony_metadata(oeb):
m = oeb.metadata
title = short_title = unicode(m.title[0])
publisher = __appname__ + ' ' + __version__
try:
pt = unicode(oeb.metadata.publication_type[0])
short_title = u':'.join(pt.split(':')[2:])
except:
pass
try:
date = parse_date(unicode(m.date[0]),
as_utc=False).strftime('%Y-%m-%d')
except:
date = strftime('%Y-%m-%d')
try:
language = unicode(m.language[0]).replace('_', '-')
except:
language = 'en'
short_title = xml(short_title, True)
metadata = SONY_METADATA.format(title=xml(title),
short_title=short_title,
publisher=xml(publisher), issue_date=xml(date),
language=xml(language))
updated = strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
def cal_id(x):
for k, v in x.attrib.items():
if k.endswith('scheme') and v == 'uuid':
return True
try:
base_id = unicode(list(filter(cal_id, m.identifier))[0])
except:
base_id = str(uuid4())
toc = oeb.toc
if False and toc.depth() < 3:
# Single section periodical
# Disabled since I prefer the current behavior
from calibre.ebooks.oeb.base import TOC
section = TOC(klass='section', title=_('All articles'),
href=oeb.spine[2].href)
for x in toc: section.nodes.append(x)
toc = TOC(klass='periodical', href=oeb.spine[2].href,
title=unicode(oeb.metadata.title[0]))
toc.nodes.append(section)
entries = []
seen_titles = set([])
for i, section in enumerate(toc):
if not section.href:
continue
secid = 'section%d'%i
sectitle = section.title
if not sectitle:
sectitle = _('Unknown')
d = 1
bsectitle = sectitle
while sectitle in seen_titles:
sectitle = bsectitle + ' ' + str(d)
d += 1
seen_titles.add(sectitle)
sectitle = xml(sectitle, True)
secdesc = section.description
if not secdesc:
secdesc = ''
secdesc = xml(secdesc)
entries.append(SONY_ATOM_SECTION.format(title=sectitle,
href=section.href, id=xml(base_id)+'/'+secid,
short_title=short_title, desc=secdesc, updated=updated))
for j, article in enumerate(section):
if not article.href:
continue
atitle = article.title
btitle = atitle
d = 1
while atitle in seen_titles:
atitle = btitle + ' ' + str(d)
d += 1
auth = article.author if article.author else ''
desc = section.description
if not desc:
desc = ''
aid = 'article%d'%j
entries.append(SONY_ATOM_ENTRY.format(
title=xml(atitle),
author=xml(auth),
updated=updated,
desc=desc,
short_title=short_title,
section_title=sectitle,
href=article.href,
word_count=str(1),
id=xml(base_id)+'/'+secid+'/'+aid
))
atom = SONY_ATOM.format(short_title=short_title,
entries='\n\n'.join(entries), updated=updated,
id=xml(base_id)).encode('utf-8')
return metadata, atom
| gpl-3.0 | 6,889,401,046,172,105,000 | 31.333333 | 101 | 0.585829 | false |
ksachs/invenio | modules/bibsort/lib/bibsort_washer.py | 6 | 4744 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Applies a transformation function to a value"""
import re
from invenio.dateutils import strftime, strptime
from invenio.textutils import decode_to_unicode, translate_to_ascii
LEADING_ARTICLES = ['the', 'a', 'an', 'at', 'on', 'of']
_RE_NOSYMBOLS = re.compile("\w+")
class InvenioBibSortWasherNotImplementedError(Exception):
"""Exception raised when a washer method
defined in the bibsort config file is not implemented"""
pass
class BibSortWasher(object):
"""Implements all the washer methods"""
def __init__(self, washer):
self.washer = washer
fnc_name = '_' + washer
try:
self.washer_fnc = self.__getattribute__(fnc_name)
except AttributeError, err:
raise InvenioBibSortWasherNotImplementedError(err)
def get_washer(self):
"""Returns the washer name"""
return self.washer
def get_transformed_value(self, val):
"""Returns the value"""
return self.washer_fnc(val)
def _sort_alphanumerically_remove_leading_articles_strip_accents(self, val):
"""
Convert:
'The title' => 'title'
'A title' => 'title'
'Title' => 'title'
"""
if not val:
return ''
val = translate_to_ascii(val).pop().lower()
val_tokens = val.split(" ", 1) #split in leading_word, phrase_without_leading_word
if len(val_tokens) == 2 and val_tokens[0].strip() in LEADING_ARTICLES:
return val_tokens[1].strip()
return val.strip()
def _sort_alphanumerically_remove_leading_articles(self, val):
"""
Convert:
'The title' => 'title'
'A title' => 'title'
'Title' => 'title'
"""
if not val:
return ''
val = decode_to_unicode(val).lower().encode('UTF-8')
val_tokens = val.split(" ", 1) #split in leading_word, phrase_without_leading_word
if len(val_tokens) == 2 and val_tokens[0].strip() in LEADING_ARTICLES:
return val_tokens[1].strip()
return val.strip()
def _sort_case_insensitive_strip_accents(self, val):
"""Remove accents and convert to lower case"""
if not val:
return ''
return translate_to_ascii(val).pop().lower()
def _sort_nosymbols_case_insensitive_strip_accents(self, val):
"""Remove accents, remove symbols, and convert to lower case"""
if not val:
return ''
return ''.join(_RE_NOSYMBOLS.findall(translate_to_ascii(val).pop().lower()))
def _sort_case_insensitive(self, val):
"""Conversion to lower case"""
if not val:
return ''
return decode_to_unicode(val).lower().encode('UTF-8')
def _sort_dates(self, val):
"""
Convert:
'8 nov 2010' => '2010-11-08'
'nov 2010' => '2010-11-01'
'2010' => '2010-01-01'
"""
datetext_format = "%Y-%m-%d"
try:
datestruct = strptime(val, datetext_format)
except ValueError:
try:
datestruct = strptime(val, "%d %b %Y")
except ValueError:
try:
datestruct = strptime(val, "%b %Y")
except ValueError:
try:
datestruct = strptime(val, "%Y")
except ValueError:
return val
return strftime(datetext_format, datestruct)
def _sort_numerically(self, val):
"""
Convert:
1245 => float(1245)
"""
try:
return float(val)
except ValueError:
return 0
def get_all_available_washers():
"""
Returns all the available washer functions without the leading '_'
"""
method_list = dir(BibSortWasher)
return [method[1:] for method in method_list if method.startswith('_') and method.find('__') < 0]
| gpl-2.0 | 6,214,775,736,165,882,000 | 32.408451 | 101 | 0.58769 | false |
Hitechverma/zamboni | mkt/feed/views.py | 2 | 39503 | from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.utils.datastructures import MultiValueDictKeyError
from django.http import Http404
from django.views.decorators.cache import cache_control
import commonware
from django_statsd.clients import statsd
from elasticsearch_dsl import filter as es_filter
from elasticsearch_dsl import function as es_function
from elasticsearch_dsl import query, Search
from PIL import Image
from rest_framework import generics, response, status, viewsets
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.filters import BaseFilterBackend, OrderingFilter
from rest_framework.response import Response
from rest_framework.serializers import Serializer, ValidationError
from rest_framework.views import APIView
import mkt
import mkt.feed.constants as feed
from mkt.access import acl
from mkt.api.authentication import (RestAnonymousAuthentication,
RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowReadOnly, AnyOf, GroupPermission
from mkt.api.base import CORSMixin, MarketplaceView, SlugOrIdMixin
from mkt.api.paginator import ESPaginator
from mkt.constants.carriers import CARRIER_MAP
from mkt.constants.regions import REGIONS_DICT
from mkt.developers.tasks import pngcrush_image
from mkt.feed.indexers import FeedItemIndexer
from mkt.operators.models import OperatorPermission
from mkt.search.filters import (DeviceTypeFilter, ProfileFilter,
PublicAppsFilter, RegionFilter)
from mkt.site.utils import HttpResponseSendFile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
from .authorization import FeedAuthorization
from .fields import DataURLImageField, ImageURLField
from .models import FeedApp, FeedBrand, FeedCollection, FeedItem, FeedShelf
from .serializers import (FeedAppESSerializer, FeedAppSerializer,
FeedBrandESSerializer, FeedBrandSerializer,
FeedCollectionESSerializer, FeedCollectionSerializer,
FeedItemESSerializer, FeedItemSerializer,
FeedShelfESSerializer, FeedShelfSerializer)
log = commonware.log.getLogger('z.feed')
class ImageURLUploadMixin(viewsets.ModelViewSet):
"""
Attaches pre/post save methods for image handling.
The pre_save downloads an image from a URL and validates. The post_save
saves the image in feed element's directory.
We look at the class' `image_fields` property for the list of tuples to
check. The tuples are the names of the the image form name, the hash field,
and a suffix to append to the image file name::
image_fields = ('background_image_upload_url', 'image_hash', '')
"""
def pre_save(self, obj):
"""Download and validate image URL."""
for image_field, hash_field, suffix in self.image_fields:
if self.request.DATA.get(image_field):
img, hash_ = ImageURLField().from_native(
self.request.DATA[image_field])
# Store img for `post_save` where we have access to the pk so
# we can save img in appropriate directory.
setattr(obj, '_%s' % image_field, img)
setattr(obj, hash_field, hash_)
elif hasattr(obj, 'type') and obj.type == feed.COLLECTION_PROMO:
# Remove background images for promo collections.
setattr(obj, hash_field, None)
return super(ImageURLUploadMixin, self).pre_save(obj)
def post_save(self, obj, created=True):
"""Store image that we attached to the obj in pre_save."""
for image_field, hash_field, suffix in self.image_fields:
image = getattr(obj, '_%s' % image_field, None)
if image:
i = Image.open(image)
path = obj.image_path(suffix)
with storage.open(path, 'wb') as f:
i.save(f, 'png')
pngcrush_image.delay(path, set_modified_on=[obj])
return super(ImageURLUploadMixin, self).post_save(obj, created)
class GroupedAppsViewSetMixin(object):
def set_apps_grouped(self, obj, apps):
if apps:
try:
obj.set_apps_grouped(apps)
except Webapp.DoesNotExist:
raise ParseError(detail=self.exceptions['doesnt_exist'])
def set_apps(self, obj, apps):
"""
Attempt to set the apps via the superclass, catching and handling the
TypeError raised if the apps are passed in a grouped manner.
"""
try:
super(GroupedAppsViewSetMixin, self).set_apps(obj, apps)
except TypeError:
self.set_apps_grouped(obj, apps)
class BaseFeedCollectionViewSet(CORSMixin, SlugOrIdMixin, MarketplaceView,
ImageURLUploadMixin):
"""
Base viewset for subclasses of BaseFeedCollection.
"""
serializer_class = None
queryset = None
cors_allowed_methods = ('get', 'post', 'delete', 'patch', 'put')
permission_classes = [FeedAuthorization]
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
exceptions = {
'doesnt_exist': 'One or more of the specified `apps` do not exist.'
}
image_fields = (('background_image_upload_url', 'image_hash', ''),)
def list(self, request, *args, **kwargs):
page = self.paginate_queryset(
self.filter_queryset(self.get_queryset()))
serializer = self.get_pagination_serializer(page)
return response.Response(serializer.data)
def set_apps(self, obj, apps):
if apps:
try:
obj.set_apps(apps)
except Webapp.DoesNotExist:
raise ParseError(detail=self.exceptions['doesnt_exist'])
def create(self, request, *args, **kwargs):
apps = request.DATA.pop('apps', [])
serializer = self.get_serializer(data=request.DATA,
files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.set_apps(self.object, apps)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return response.Response(serializer.data,
status=status.HTTP_201_CREATED,
headers=headers)
return response.Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
apps = request.DATA.pop('apps', [])
self.set_apps(self.get_object(), apps)
ret = super(BaseFeedCollectionViewSet, self).update(
request, *args, **kwargs)
return ret
class RegionCarrierFilter(BaseFilterBackend):
def filter_queryset(self, request, qs, view):
q = request.QUERY_PARAMS
# Filter for only the region if specified.
if q.get('region') and q.get('region') in mkt.regions.REGIONS_DICT:
region_id = mkt.regions.REGIONS_DICT[q['region']].id
qs = qs.filter(region=region_id)
# Exclude feed items that specify carrier but do not match carrier.
if q.get('carrier') and q.get('carrier') in mkt.carriers.CARRIER_MAP:
carrier = mkt.carriers.CARRIER_MAP[q['carrier']].id
qs = qs.exclude(~Q(carrier=carrier), carrier__isnull=False)
return qs
class FeedItemViewSet(CORSMixin, viewsets.ModelViewSet):
"""
A viewset for the FeedItem class, which wraps all items that live on the
feed.
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
permission_classes = [AnyOf(AllowReadOnly,
GroupPermission('Feed', 'Curate'))]
filter_backends = (OrderingFilter, RegionCarrierFilter)
queryset = FeedItem.objects.all()
cors_allowed_methods = ('get', 'delete', 'post', 'put', 'patch')
serializer_class = FeedItemSerializer
class FeedBuilderView(CORSMixin, APIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [GroupPermission('Feed', 'Curate')]
cors_allowed_methods = ('put',)
def put(self, request, *args, **kwargs):
"""
For each region in the object:
Deletes all of the (carrier-less) FeedItems in the region.
Batch create all of the FeedItems in order for each region.
-- feed - object of regions that point to a list of feed
element IDs (as well as their type) .
{
'us': [
['app', 36L],
['app', 42L],
['collection', 12L],
['brand', 12L]
]
}
"""
regions = [mkt.regions.REGIONS_DICT[region].id
for region in request.DATA.keys()]
FeedItem.objects.filter(
carrier=None, region__in=regions).delete()
feed_items = []
for region, feed_elements in request.DATA.items():
for order, feed_element in enumerate(feed_elements):
try:
item_type, item_id = feed_element
except ValueError:
return response.Response(
'Expected two-element arrays.',
status=status.HTTP_400_BAD_REQUEST)
feed_item = {
'region': mkt.regions.REGIONS_DICT[region].id,
'order': order,
'item_type': item_type,
}
feed_item[item_type + '_id'] = item_id
feed_items.append(FeedItem(**feed_item))
FeedItem.objects.bulk_create(feed_items)
# Index the feed items created. bulk_create doesn't call save or
# post_save so get the IDs manually.
feed_item_ids = list(FeedItem.objects.filter(region__in=regions)
.values_list('id', flat=True))
FeedItem.get_indexer().index_ids(feed_item_ids, no_delay=True)
return response.Response(status=status.HTTP_201_CREATED)
class FeedAppViewSet(CORSMixin, MarketplaceView, SlugOrIdMixin,
ImageURLUploadMixin):
"""
A viewset for the FeedApp class, which highlights a single app and some
additional metadata (e.g. a review or a screenshot).
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
permission_classes = [AnyOf(AllowReadOnly,
GroupPermission('Feed', 'Curate'))]
filter_backends = (OrderingFilter,)
queryset = FeedApp.objects.all()
cors_allowed_methods = ('get', 'delete', 'post', 'put', 'patch')
serializer_class = FeedAppSerializer
image_fields = (('background_image_upload_url', 'image_hash', ''),)
def list(self, request, *args, **kwargs):
page = self.paginate_queryset(
self.filter_queryset(self.get_queryset()))
serializer = self.get_pagination_serializer(page)
return response.Response(serializer.data)
class FeedBrandViewSet(BaseFeedCollectionViewSet):
"""
A viewset for the FeedBrand class, a type of collection that allows editors
to quickly create content without involving localizers.
"""
queryset = FeedBrand.objects.all()
serializer_class = FeedBrandSerializer
class FeedCollectionViewSet(GroupedAppsViewSetMixin,
BaseFeedCollectionViewSet):
"""
A viewset for the FeedCollection class.
"""
queryset = FeedCollection.objects.all()
serializer_class = FeedCollectionSerializer
class FeedShelfPermissionMixin(object):
"""
There are some interesting permissions-related things going on with
FeedShelves. DRF will never run object-level permissions checks (i.e.
has_object_permission) if the user fails the top-level checks (i.e.
has_permission), but there are cases with FeedShelf objects where access
to an object requires access to properties of the object. This means we
have to manually make these checks in the viewsets.
This mixin provides all the necessary methods to do so.
"""
def req_data(self):
"""
Returns a MultiDict containing the request data. This is shimmed to
ensure that it works if passed either rest_framework's Request class
or Django's WSGIRequest class.
"""
return (self.request.DATA if hasattr(self.request, 'DATA') else
self.request.POST)
def is_admin(self, user):
"""
Returns a boolean indicating whether the passed user passes either
OperatorDashboard:* or Feed:Curate.
"""
return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or
acl.action_allowed(self.request, 'Feed', 'Curate'))
def require_operator_permission(self, user, carrier, region):
"""
Raises PermissionDenied if the passed user does not have an
OperatorPermission object for the passed carrier and region.
"""
if user.is_anonymous():
raise PermissionDenied()
elif self.is_admin(user):
return
carrier = (carrier if isinstance(carrier, (int, long)) else
CARRIER_MAP[carrier].id)
region = (region if isinstance(region, (int, long)) else
REGIONS_DICT[region].id)
passes = OperatorPermission.objects.filter(
user=user, carrier=carrier, region=region).exists()
if not passes:
raise PermissionDenied()
def require_object_permission(self, user, obj):
"""
Raises PermissionDenied if the passed user does not have an
OperatorPermission object for the passed Feedshelf object's carrier and
region.
"""
self.require_operator_permission(user, obj.carrier, obj.region)
class FeedShelfViewSet(GroupedAppsViewSetMixin, FeedShelfPermissionMixin,
BaseFeedCollectionViewSet):
"""
A viewset for the FeedShelf class.
"""
queryset = FeedShelf.objects.all()
serializer_class = FeedShelfSerializer
permission_classes = []
image_fields = (
('background_image_upload_url', 'image_hash', ''),
('background_image_landing_upload_url', 'image_landing_hash',
'_landing'),
)
def mine(self, request, *args, **kwargs):
"""
Return all shelves a user can administer. Anonymous users will always
receive an empty list.
"""
qs = self.queryset
if request.user.is_anonymous():
qs = self.queryset.none()
elif not self.is_admin(request.user):
perms = OperatorPermission.objects.filter(user=request.user)
if perms:
query = Q()
for perm in perms:
query |= Q(carrier=perm.carrier, region=perm.region)
qs = self.queryset.filter(query)
else:
qs = self.queryset.none()
self.object_list = self.filter_queryset(qs)
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
"""
Raise PermissionDenied if the authenticating user does not pass the
checks in require_operator_permission for the carrier and region in the
request data.
"""
data = self.req_data()
try:
self.require_operator_permission(
request.user, data['carrier'], data['region'])
except (KeyError, MultiValueDictKeyError):
raise ParseError()
return super(FeedShelfViewSet, self).create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
Raise PermissionDenied if the authenticating user does not pass the
checks in require_operator_permission for the carrier and region on the
FeedShelf object they are attempting to update.
"""
self.require_object_permission(request.user, self.get_object())
return super(FeedShelfViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
Raise PermissionDenied if the authenticating user does not pass the
checks in require_operator_permission for the carrier and region on the
FeedShelf object they are attempting to destroy.
"""
self.require_object_permission(request.user, self.get_object())
return super(FeedShelfViewSet, self).destroy(request, *args, **kwargs)
class FeedShelfPublishView(FeedShelfPermissionMixin, CORSMixin, APIView):
"""
put -- creates a FeedItem for a FeedShelf with respective carrier/region
pair. Deletes any currently existing FeedItems with the carrier/region
pair to effectively "unpublish" it since only one shelf can be toggled
at a time for a carrier/region.
delete -- deletes the FeedItem for a FeedShelf with respective
carrier/region.
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = []
cors_allowed_methods = ('delete', 'put',)
def get_object(self, pk):
if pk.isdigit():
obj = FeedShelf.objects.get(pk=pk)
else:
obj = FeedShelf.objects.get(slug=pk)
self.require_object_permission(self.request.user, obj)
return obj
def put(self, request, *args, **kwargs):
try:
shelf = self.get_object(self.kwargs['pk'])
except FeedShelf.DoesNotExist:
return response.Response(status=status.HTTP_404_NOT_FOUND)
feed_item_kwargs = {
'item_type': feed.FEED_TYPE_SHELF,
'carrier': shelf.carrier,
'region': shelf.region
}
FeedItem.objects.filter(**feed_item_kwargs).delete()
feed_item = FeedItem.objects.create(shelf_id=shelf.id,
**feed_item_kwargs)
# Return.
serializer = FeedItemSerializer(feed_item, context={
'request': self.request,
})
return response.Response(serializer.data,
status=status.HTTP_201_CREATED)
def delete(self, request, *args, **kwargs):
try:
shelf = self.get_object(self.kwargs['pk'])
except FeedShelf.DoesNotExist:
return response.Response(status=status.HTTP_404_NOT_FOUND)
feed_item_kwargs = {
'item_type': feed.FEED_TYPE_SHELF,
'carrier': shelf.carrier,
'region': shelf.region
}
FeedItem.objects.filter(**feed_item_kwargs).delete()
# Return.
return response.Response(status=status.HTTP_204_NO_CONTENT)
class CollectionImageViewSet(CORSMixin, SlugOrIdMixin, MarketplaceView,
generics.GenericAPIView, viewsets.ViewSet):
permission_classes = [AnyOf(AllowReadOnly,
GroupPermission('Feed', 'Curate'))]
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
cors_allowed_methods = ('get', 'put', 'delete')
hash_field = 'image_hash'
image_suffix = ''
# Dummy serializer to keep DRF happy when it's answering to OPTIONS.
serializer_class = Serializer
def perform_content_negotiation(self, request, force=False):
"""
Force DRF's content negociation to not raise an error - It wants to use
the format passed to the URL, but we don't care since we only deal with
"raw" content: we don't even use the renderers.
"""
return super(CollectionImageViewSet, self).perform_content_negotiation(
request, force=True)
@cache_control(max_age=60 * 60 * 24 * 365)
def retrieve(self, request, *args, **kwargs):
obj = self.get_object()
if not getattr(obj, 'image_hash', None):
raise Http404
return HttpResponseSendFile(request, obj.image_path(self.image_suffix),
content_type='image/png')
def update(self, request, *args, **kwargs):
obj = self.get_object()
try:
img, hash_ = DataURLImageField().from_native(request.read())
except ValidationError:
return Response(status=status.HTTP_400_BAD_REQUEST)
i = Image.open(img)
with storage.open(obj.image_path(self.image_suffix), 'wb') as f:
i.save(f, 'png')
# Store the hash of the original image data sent.
obj.update(**{self.hash_field: hash_})
pngcrush_image.delay(obj.image_path(self.image_suffix))
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if getattr(obj, 'image_hash', None):
storage.delete(obj.image_path(self.image_suffix))
obj.update(**{self.hash_field: None})
return Response(status=status.HTTP_204_NO_CONTENT)
class FeedAppImageViewSet(CollectionImageViewSet):
queryset = FeedApp.objects.all()
class FeedCollectionImageViewSet(CollectionImageViewSet):
queryset = FeedCollection.objects.all()
class FeedShelfImageViewSet(FeedShelfPermissionMixin, CollectionImageViewSet):
queryset = FeedShelf.objects.all()
class FeedShelfLandingImageViewSet(FeedShelfPermissionMixin,
CollectionImageViewSet):
queryset = FeedShelf.objects.all()
hash_field = 'image_landing_hash'
image_suffix = '_landing'
class BaseFeedESView(CORSMixin, APIView):
filter_backends = [PublicAppsFilter, DeviceTypeFilter, RegionFilter,
ProfileFilter]
def __init__(self, *args, **kw):
self.ITEM_TYPES = {
'apps': feed.FEED_TYPE_APP,
'brands': feed.FEED_TYPE_BRAND,
'collections': feed.FEED_TYPE_COLL,
'shelves': feed.FEED_TYPE_SHELF,
}
self.PLURAL_TYPES = dict((v, k) for k, v in self.ITEM_TYPES.items())
self.SERIALIZERS = {
feed.FEED_TYPE_APP: FeedAppESSerializer,
feed.FEED_TYPE_BRAND: FeedBrandESSerializer,
feed.FEED_TYPE_COLL: FeedCollectionESSerializer,
feed.FEED_TYPE_SHELF: FeedShelfESSerializer,
}
self.INDICES = {
feed.FEED_TYPE_APP: settings.ES_INDEXES['mkt_feed_app'],
feed.FEED_TYPE_BRAND: settings.ES_INDEXES['mkt_feed_brand'],
feed.FEED_TYPE_COLL: settings.ES_INDEXES['mkt_feed_collection'],
feed.FEED_TYPE_SHELF: settings.ES_INDEXES['mkt_feed_shelf'],
}
super(BaseFeedESView, self).__init__(*args, **kw)
def get_feed_element_index(self):
"""Return a list of index to query all at once."""
return [
settings.ES_INDEXES['mkt_feed_app'],
settings.ES_INDEXES['mkt_feed_brand'],
settings.ES_INDEXES['mkt_feed_collection'],
settings.ES_INDEXES['mkt_feed_shelf']
]
def get_app_ids(self, feed_element):
"""Get a single feed element's app IDs."""
if hasattr(feed_element, 'app'):
return [feed_element.app]
return feed_element.apps
def get_app_ids_all(self, feed_elements):
"""From a list of feed_elements, return a list of app IDs."""
app_ids = []
for elm in feed_elements:
app_ids += self.get_app_ids(elm)
return app_ids
def get_apps(self, request, app_ids):
"""
Takes a list of app_ids. Gets the apps, including filters.
Returns an app_map for serializer context.
"""
sq = WebappIndexer.search()
if request.QUERY_PARAMS.get('filtering', '1') == '1':
# With filtering (default).
for backend in self.filter_backends:
sq = backend().filter_queryset(request, sq, self)
sq = WebappIndexer.filter_by_apps(app_ids, sq)
# Store the apps to attach to feed elements later.
with statsd.timer('mkt.feed.views.apps_query'):
apps = sq.execute().hits
return dict((app.id, app) for app in apps)
def filter_feed_items(self, request, feed_items):
"""
Removes feed items from the feed if they do not meet some
requirements like app count.
"""
for feed_item in feed_items:
item_type = feed_item['item_type']
feed_item[item_type] = self.filter_feed_element(
request, feed_item[item_type], item_type)
# Filter out feed elements that did not pass the filters.
return filter(lambda item: item[item['item_type']], feed_items)
def filter_feed_element(self, request, feed_element, item_type):
"""
If a feed element does not have enough apps, return None.
Else return the feed element.
"""
if not feed_element:
# Handle edge case where the ES index might get stale.
return None
if request.QUERY_PARAMS.get('filtering', '1') == '0':
# Without filtering
return feed_element
# No empty collections.
if 'app_count' in feed_element and feed_element['app_count'] == 0:
return None
# If the app of a featured app was filtered out.
if item_type == feed.FEED_TYPE_APP and not feed_element['app']:
return None
# Enforce minimum apps on collections.
if (item_type == feed.FEED_TYPE_COLL and
feed_element['app_count'] < feed.MIN_APPS_COLLECTION):
return None
return feed_element
@classmethod
def as_view(cls, **kwargs):
# Make all search views non_atomic: they should not need the db, or
# at least they should not need to make db writes, so they don't need
# to be wrapped in transactions.
view = super(BaseFeedESView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class FeedElementSearchView(BaseFeedESView):
"""
Search view for the Curation Tools.
Returns an object keyed by feed element type
('apps', 'brands', 'collections').
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [GroupPermission('Feed', 'Curate')]
cors_allowed_methods = ('get',)
def _phrase(self, q):
return {
'query': q,
'type': 'phrase',
'slop': 2,
}
def get(self, request, *args, **kwargs):
q = request.GET.get('q')
# Make search.
queries = [
query.Q('match', slug=self._phrase(q)), # Slug.
query.Q('match', type=self._phrase(q)), # Type.
query.Q('match', search_names=self._phrase(q)), # Name.
query.Q('prefix', carrier=q), # Shelf carrier.
query.Q('term', region=q) # Shelf region.
]
sq = query.Bool(should=queries)
# Search.
res = {'apps': [], 'brands': [], 'collections': [], 'shelves': []}
es = Search(using=FeedItemIndexer.get_es(),
index=self.get_feed_element_index())
feed_elements = es.query(sq).execute().hits
if not feed_elements:
return response.Response(res, status=status.HTTP_404_NOT_FOUND)
# Deserialize.
ctx = {'app_map': self.get_apps(request,
self.get_app_ids_all(feed_elements)),
'request': request}
for feed_element in feed_elements:
item_type = feed_element.item_type
serializer = self.SERIALIZERS[item_type]
data = serializer(feed_element, context=ctx).data
res[self.PLURAL_TYPES[item_type]].append(data)
# Return.
return response.Response(res, status=status.HTTP_200_OK)
class FeedView(MarketplaceView, BaseFeedESView, generics.GenericAPIView):
"""
THE feed view. It hits ES with:
- a weighted function score query to get feed items
- a filter to deserialize feed elements
- a filter to deserialize apps
"""
authentication_classes = []
cors_allowed_methods = ('get',)
paginator_class = ESPaginator
permission_classes = []
def get_es_feed_query(self, sq, region=mkt.regions.RESTOFWORLD.id,
carrier=None, original_region=None):
"""
Build ES query for feed.
Must match region.
Orders by FeedItem.order.
Boosted operator shelf matching region + carrier.
Boosted operator shelf matching original_region + carrier.
region -- region ID (integer)
carrier -- carrier ID (integer)
original_region -- region from before we were falling back,
to keep the original shelf atop the RoW feed.
"""
region_filter = es_filter.Term(region=region)
shelf_filter = es_filter.Term(item_type=feed.FEED_TYPE_SHELF)
ordering_fn = es_function.FieldValueFactor(
field='order', modifier='reciprocal',
filter=es_filter.Bool(must=[region_filter],
must_not=[shelf_filter]))
boost_fn = es_function.BoostFactor(value=10000.0,
filter=shelf_filter)
if carrier is None:
# If no carrier, just match the region and exclude shelves.
return sq.query('function_score',
functions=[ordering_fn],
filter=es_filter.Bool(
must=[region_filter],
must_not=[shelf_filter]
))
# Must match region.
# But also include the original region if we falling back to RoW.
# The only original region feed item that will be included is a shelf
# else we wouldn't be falling back in the first place.
region_filters = [region_filter]
if original_region:
region_filters.append(es_filter.Term(region=original_region))
return sq.query(
'function_score',
functions=[boost_fn, ordering_fn],
filter=es_filter.Bool(
should=region_filters,
# Filter out shelves that don't match the carrier.
must_not=[es_filter.Bool(
must=[shelf_filter],
must_not=[es_filter.Term(carrier=carrier)])])
)
def get_es_feed_element_query(self, sq, feed_items):
"""
From a list of FeedItems with normalized feed element IDs,
return an ES query that fetches the feed elements for each feed item.
"""
filters = []
for feed_item in feed_items:
item_type = feed_item['item_type']
filters.append(es_filter.Bool(
must=[es_filter.Term(id=feed_item[item_type]),
es_filter.Term(item_type=item_type)]))
return sq.filter(es_filter.Bool(should=filters))[0:len(feed_items)]
def _check_empty_feed(self, items, rest_of_world):
"""
Return -1 if feed is empty and we are already falling back to RoW.
Return 0 if feed is empty and we are not falling back to RoW yet.
Return 1 if at least one feed item and the only feed item is not shelf.
"""
if not items or (len(items) == 1 and items[0].get('shelf')):
# Empty feed.
if rest_of_world:
return -1
return 0
return 1
def _handle_empty_feed(self, empty_feed_code, region, request, args,
kwargs):
"""
If feed is empty, this method handles appropriately what to return.
If empty_feed_code == 0: try to fallback to RoW.
If empty_feed_code == -1: 404.
"""
if empty_feed_code == 0:
return self._get(request, rest_of_world=True,
original_region=region, *args, **kwargs)
return response.Response(status=status.HTTP_404_NOT_FOUND)
def _get(self, request, rest_of_world=False, original_region=None,
*args, **kwargs):
es = FeedItemIndexer.get_es()
# Parse region.
if rest_of_world:
region = mkt.regions.RESTOFWORLD.id
else:
region = request.REGION.id
# Parse carrier.
carrier = None
q = request.QUERY_PARAMS
if q.get('carrier') and q['carrier'] in mkt.carriers.CARRIER_MAP:
carrier = mkt.carriers.CARRIER_MAP[q['carrier']].id
# Fetch FeedItems.
sq = self.get_es_feed_query(FeedItemIndexer.search(using=es),
region=region, carrier=carrier,
original_region=original_region)
# The paginator triggers the ES request.
with statsd.timer('mkt.feed.view.feed_query'):
feed_items = self.paginate_queryset(sq)
feed_ok = self._check_empty_feed(feed_items, rest_of_world)
if feed_ok != 1:
return self._handle_empty_feed(feed_ok, region, request, args,
kwargs)
# Build the meta object.
meta = mkt.api.paginator.CustomPaginationSerializer(
feed_items, context={'request': request}).data['meta']
# Set up serializer context.
feed_element_map = {
feed.FEED_TYPE_APP: {},
feed.FEED_TYPE_BRAND: {},
feed.FEED_TYPE_COLL: {},
feed.FEED_TYPE_SHELF: {},
}
# Fetch feed elements to attach to FeedItems later.
apps = []
sq = self.get_es_feed_element_query(
Search(using=es, index=self.get_feed_element_index()), feed_items)
with statsd.timer('mkt.feed.view.feed_element_query'):
feed_elements = sq.execute().hits
for feed_elm in feed_elements:
# Store the feed elements to attach to FeedItems later.
feed_element_map[feed_elm['item_type']][feed_elm['id']] = feed_elm
# Store the apps to retrieve later.
apps += self.get_app_ids(feed_elm)
# Remove dupes from apps list.
apps = list(set(apps))
# Fetch apps to attach to feed elements later.
app_map = self.get_apps(request, apps)
# Super serialize.
with statsd.timer('mkt.feed.view.serialize'):
feed_items = FeedItemESSerializer(feed_items, many=True, context={
'app_map': app_map,
'feed_element_map': feed_element_map,
'request': request
}).data
# Filter excluded apps. If there are feed items that have all their
# apps excluded, they will be removed from the feed.
feed_items = self.filter_feed_items(request, feed_items)
feed_ok = self._check_empty_feed(feed_items, rest_of_world)
if feed_ok != 1:
if not rest_of_world:
log.warning('Feed empty for region {0}. Requerying feed with '
'region=RESTOFWORLD'.format(region))
return self._handle_empty_feed(feed_ok, region, request, args,
kwargs)
return response.Response({'meta': meta, 'objects': feed_items},
status=status.HTTP_200_OK)
def get(self, request, *args, **kwargs):
with statsd.timer('mkt.feed.view'):
return self._get(request, *args, **kwargs)
class FeedElementGetView(BaseFeedESView):
"""
Fetches individual feed elements from ES. Detail views.
"""
authentication_classes = []
permission_classes = []
cors_allowed_methods = ('get',)
def get_feed_element_filter(self, sq, item_type, slug):
"""Matches a single feed element."""
bool_filter = es_filter.Bool(must=[
es_filter.Term(item_type=item_type),
es_filter.Term(**{'slug.raw': slug})
])
return sq.filter(bool_filter)
def get(self, request, item_type, slug, **kwargs):
item_type = self.ITEM_TYPES[item_type]
# Hit ES.
sq = self.get_feed_element_filter(
Search(using=FeedItemIndexer.get_es(),
index=self.INDICES[item_type]),
item_type, slug)
try:
feed_element = sq.execute().hits[0]
except IndexError:
return response.Response(status=status.HTTP_404_NOT_FOUND)
# Deserialize.
data = self.SERIALIZERS[item_type](feed_element, context={
'app_map': self.get_apps(request, self.get_app_ids(feed_element)),
'request': request
}).data
return response.Response(data, status=status.HTTP_200_OK)
class FeedElementListView(BaseFeedESView, MarketplaceView,
generics.GenericAPIView):
"""
Fetches the five most recent of a feed element type for Curation Tools.
With pagination.
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [GroupPermission('Feed', 'Curate')]
cors_allowed_methods = ('get',)
paginator_class = ESPaginator
def get_recent_feed_elements(self, sq):
"""Matches all sorted by recent."""
return sq.sort('-created').query(query.MatchAll())
def get(self, request, item_type, **kwargs):
item_type = self.ITEM_TYPES[item_type]
# Hit ES.
sq = self.get_recent_feed_elements(
Search(using=FeedItemIndexer.get_es(),
index=self.INDICES[item_type]))
feed_elements = self.paginate_queryset(sq)
if not feed_elements:
return response.Response({'objects': []},
status=status.HTTP_404_NOT_FOUND)
# Deserialize. Manually use pagination serializer because this view
# uses multiple serializers.
meta = mkt.api.paginator.CustomPaginationSerializer(
feed_elements, context={'request': request}).data['meta']
objects = self.SERIALIZERS[item_type](feed_elements, context={
'app_map': self.get_apps(request,
self.get_app_ids_all(feed_elements)),
'request': request
}, many=True).data
return response.Response({'meta': meta, 'objects': objects},
status=status.HTTP_200_OK)
| bsd-3-clause | 2,247,102,181,119,732,500 | 38.503 | 79 | 0.602435 | false |
sirchia/CouchPotatoServer | libs/guessit/guess.py | 4 | 10337 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import datetime
import logging
log = logging.getLogger(__name__)
class Guess(dict):
"""A Guess is a dictionary which has an associated confidence for each of
its values.
As it is a subclass of dict, you can use it everywhere you expect a
simple dict."""
def __init__(self, *args, **kwargs):
try:
confidence = kwargs.pop('confidence')
except KeyError:
confidence = 0
dict.__init__(self, *args, **kwargs)
self._confidence = {}
for prop in self:
self._confidence[prop] = confidence
def to_utf8_dict(self):
from guessit.language import Language
data = dict(self)
for prop, value in data.items():
if isinstance(value, datetime.date):
data[prop] = value.isoformat()
elif isinstance(value, Language):
data[prop] = str(value)
elif isinstance(value, unicode):
data[prop] = value.encode('utf-8')
elif isinstance(value, list):
data[prop] = [str(x) for x in value]
return data
def nice_string(self):
data = self.to_utf8_dict()
parts = json.dumps(data, indent=4).split('\n')
for i, p in enumerate(parts):
if p[:5] != ' "':
continue
prop = p.split('"')[1]
parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:]
return '\n'.join(parts)
def __str__(self):
return str(self.to_utf8_dict())
def confidence(self, prop):
return self._confidence.get(prop, -1)
def set(self, prop, value, confidence=None):
self[prop] = value
if confidence is not None:
self._confidence[prop] = confidence
def set_confidence(self, prop, value):
self._confidence[prop] = value
def update(self, other, confidence=None):
dict.update(self, other)
if isinstance(other, Guess):
for prop in other:
self._confidence[prop] = other.confidence(prop)
if confidence is not None:
for prop in other:
self._confidence[prop] = confidence
def update_highest_confidence(self, other):
"""Update this guess with the values from the given one. In case
there is property present in both, only the one with the highest one
is kept."""
if not isinstance(other, Guess):
raise ValueError('Can only call this function on Guess instances')
for prop in other:
if prop in self and self.confidence(prop) >= other.confidence(prop):
continue
self[prop] = other[prop]
self._confidence[prop] = other.confidence(prop)
def choose_int(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are integers."""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if (v1 == v2):
return (v1, 1 - (1 - c1) * (1 - c2))
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1)
def choose_string(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> choose_string(('Hello', 0.75), ('World', 0.5))
('Hello', 0.25)
>>> choose_string(('Hello', 0.5), ('hello', 0.5))
('Hello', 0.75)
>>> choose_string(('Hello', 0.4), ('Hello World', 0.4))
('Hello', 0.64)
>>> choose_string(('simpsons', 0.5), ('The Simpsons', 0.5))
('The Simpsons', 0.75)
"""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if not v1:
return g2
elif not v2:
return g1
v1, v2 = v1.strip(), v2.strip()
v1l, v2l = v1.lower(), v2.lower()
combined_prob = 1 - (1 - c1) * (1 - c2)
if v1l == v2l:
return (v1, combined_prob)
# check for common patterns
elif v1l == 'the ' + v2l:
return (v1, combined_prob)
elif v2l == 'the ' + v1l:
return (v2, combined_prob)
# if one string is contained in the other, return the shortest one
elif v2l in v1l:
return (v2, combined_prob)
elif v1l in v2l:
return (v1, combined_prob)
# in case of conflict, return the one with highest priority
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1)
def _merge_similar_guesses_nocheck(guesses, prop, choose):
"""Take a list of guesses and merge those which have the same properties,
increasing or decreasing the confidence depending on whether their values
are similar.
This function assumes there are at least 2 valid guesses."""
similar = [guess for guess in guesses if prop in guess]
g1, g2 = similar[0], similar[1]
other_props = set(g1) & set(g2) - set([prop])
if other_props:
log.debug('guess 1: %s' % g1)
log.debug('guess 2: %s' % g2)
for prop in other_props:
if g1[prop] != g2[prop]:
log.warning('both guesses to be merged have more than one '
'different property in common, bailing out...')
return
# merge all props of s2 into s1, updating the confidence for the
# considered property
v1, v2 = g1[prop], g2[prop]
c1, c2 = g1.confidence(prop), g2.confidence(prop)
new_value, new_confidence = choose((v1, c1), (v2, c2))
if new_confidence >= c1:
msg = "Updating matching property '%s' with confidence %.2f"
else:
msg = "Updating non-matching property '%s' with confidence %.2f"
log.debug(msg % (prop, new_confidence))
g2[prop] = new_value
g2.set_confidence(prop, new_confidence)
g1.update(g2)
guesses.remove(g2)
def merge_similar_guesses(guesses, prop, choose):
"""Take a list of guesses and merge those which have the same properties,
increasing or decreasing the confidence depending on whether their values
are similar."""
similar = [guess for guess in guesses if prop in guess]
if len(similar) < 2:
# nothing to merge
return
if len(similar) == 2:
_merge_similar_guesses_nocheck(guesses, prop, choose)
if len(similar) > 2:
log.debug('complex merge, trying our best...')
before = len(guesses)
_merge_similar_guesses_nocheck(guesses, prop, choose)
after = len(guesses)
if after < before:
# recurse only when the previous call actually did something,
# otherwise we end up in an infinite loop
merge_similar_guesses(guesses, prop, choose)
def merge_append_guesses(guesses, prop):
"""Take a list of guesses and merge those which have the same properties by
appending them in a list.
DEPRECATED, remove with old guessers
"""
similar = [guess for guess in guesses if prop in guess]
if not similar:
return
merged = similar[0]
merged[prop] = [merged[prop]]
# TODO: what to do with global confidence? mean of them all?
for m in similar[1:]:
for prop2 in m:
if prop == prop2:
merged[prop].append(m[prop])
else:
if prop2 in m:
log.warning('overwriting property "%s" with value %s' % (prop2, m[prop2]))
merged[prop2] = m[prop2]
# TODO: confidence also
guesses.remove(m)
def merge_all(guesses, append=None):
"""Merge all the guesses in a single result, remove very unlikely values,
and return it.
You can specify a list of properties that should be appended into a list
instead of being merged.
>>> merge_all([ Guess({ 'season': 2 }, confidence = 0.6),
... Guess({ 'episodeNumber': 13 }, confidence = 0.8) ])
{'season': 2, 'episodeNumber': 13}
>>> merge_all([ Guess({ 'episodeNumber': 27 }, confidence = 0.02),
... Guess({ 'season': 1 }, confidence = 0.2) ])
{'season': 1}
"""
if not guesses:
return Guess()
result = guesses[0]
if append is None:
append = []
for g in guesses[1:]:
# first append our appendable properties
for prop in append:
if prop in g:
result.set(prop, result.get(prop, []) + [g[prop]],
# TODO: what to do with confidence here? maybe an
# arithmetic mean...
confidence=g.confidence(prop))
del g[prop]
# then merge the remaining ones
dups = set(result) & set(g)
if dups:
log.warning('duplicate properties %s in merged result...' % dups)
result.update_highest_confidence(g)
# delete very unlikely values
for p in result.keys():
if result.confidence(p) < 0.05:
del result[p]
# make sure our appendable properties contain unique values
for prop in append:
if prop in result:
result[prop] = list(set(result[prop]))
return result
| gpl-3.0 | 6,297,461,750,012,670,000 | 30.135542 | 94 | 0.591177 | false |
SeattleTestbed/repy_v2 | repy.py | 1 | 13432 | """
<Author>
Justin Cappos
Ivan Beschastnikh (12/24/08) -- added usage
Brent Couvrette (2/27/09) -- added servicelog commandline option
Conrad Meyer (5/22/09) -- switch option parsing to getopt
Moshe Kaplan (8/15/12) -- switched option parsing to optparse
<Start Date>
June 26th, 2008
<Description>
Restricted execution environment for python. Should stop someone
from doing "bad things" (which is also defined to include many
useful things). This module allows the user to define code that
gets called either on the receipt of a packet, when a timer fires,
on startup, and on shutdown. The restricted code can only do a few
"external" things like send data packets and store data to disk.
The CPU, memory, disk usage, and network bandwidth are all limited.
<Usage>
Usage: repy.py [options] resourcefn program_to_run.r2py [program args]
Where [options] are some combination of the following:
--ip IP : This flag informs Repy that it is allowed to bind to the given given IP.
: This flag may be asserted multiple times.
: Repy will attempt to use IP's and interfaces in the order they are given.
--execinfo : Display information regarding the current execution state.
--iface interface : This flag informs Repy that it is allowed to bind to the given interface.
: This flag may be asserted multiple times.
--nootherips : Instructs Repy to only use IP's and interfaces that are explicitly given.
: It should be noted that loopback (127.0.0.1) is always permitted.
--logfile filename : Set up for circular log buffer of size 16KB.Suffix is .old/.new.
: This file is created in the initial working directory which initializes the call.
--stop filename : Repy will watch for the creation of this file and abort when it happens
: File can have format EXITCODE;EXITMESG. Code 44 is Stopped and is the default.
: EXITMESG will be printed prior to exiting if it is non-null.
--status filename.txt : Write status information into this file
--cwd dir : Set Current working directory
--servicelog : Enable usage of the servicelogger for internal errors
"""
import os
import sys
import time
import optparse
import threading
# Relative imports
# First make sure the version of python is supported
import checkpythonversion
checkpythonversion.ensure_python_version_is_supported()
import safe
import nanny
import emulcomm
import idhelper
import harshexit
import namespace
import nonportable
import loggingrepy
import statusstorage
import repy_constants
import nmstatusinterface
# Armon: Using VirtualNamespace as an abstraction around direct execution
import virtual_namespace
## we'll use tracebackrepy to print our exceptions
import tracebackrepy
from exception_hierarchy import *
# Disables safe, and resumes normal fork
def nonSafe_fork():
val = __orig_fork()
if val == 0 and safe._builtin_globals_backup != None:
safe._builtin_restore()
return val
# Only override fork if it exists (e.g. Windows)
if "fork" in dir(os):
__orig_fork = os.fork
os.fork = nonSafe_fork
def get_safe_context(args):
# These will be the functions and variables in the user's namespace (along
# with the builtins allowed by the safe module).
usercontext = {'mycontext':{}}
# Add to the user's namespace wrapped versions of the API functions we make
# available to the untrusted user code.
namespace.wrap_and_insert_api_functions(usercontext)
# Convert the usercontext from a dict to a SafeDict
usercontext = safe.SafeDict(usercontext)
# Allow some introspection by providing a reference to the context
usercontext["_context"] = usercontext
# call the initialize function
usercontext['callfunc'] = 'initialize'
usercontext['callargs'] = args[:]
return usercontext
def execute_namespace_until_completion(thisnamespace, thiscontext):
# I'll use this to detect when the program is idle so I know when to quit...
idlethreadcount = threading.activeCount()
# add my thread to the set of threads that are used...
event_id = idhelper.getuniqueid()
try:
nanny.tattle_add_item('events', event_id)
except Exception, e:
tracebackrepy.handle_internalerror("Failed to acquire event for '" + \
"initialize' event.\n(Exception was: %s)" % e.message, 140)
try:
thisnamespace.evaluate(thiscontext)
except SystemExit:
raise
except:
# I think it makes sense to exit if their code throws an exception...
tracebackrepy.handle_exception()
harshexit.harshexit(6)
finally:
nanny.tattle_remove_item('events', event_id)
# I've changed to the threading library, so this should increase if there are
# pending events
while threading.activeCount() > idlethreadcount:
# do accounting here?
time.sleep(0.25)
# Once there are no more events, return...
return
def init_repy_location(repy_directory):
# Translate into an absolute path
if os.path.isabs(repy_directory):
absolute_repy_directory = repy_directory
else:
# This will join the currect directory with the relative path
# and then get the absolute path to that location
absolute_repy_directory = os.path.abspath(os.path.join(os.getcwd(), repy_directory))
# Store the absolute path as the repy startup directory
repy_constants.REPY_START_DIR = absolute_repy_directory
# For security, we need to make sure that the Python path doesn't change even
# if the directory does...
newsyspath = []
for item in sys.path[:]:
if item == '' or item == '.':
newsyspath.append(os.getcwd())
else:
newsyspath.append(item)
# It should be safe now. I'm assuming the user isn't trying to undercut us
# by setting a crazy python path
sys.path = newsyspath
def add_repy_options(parser):
"""Adds the Repy command-line options to the specified optparser
"""
parser.add_option('--ip',
action="append", type="string", dest="ip" ,
help="Explicitly allow Repy to bind to the specified IP. This option can be used multiple times."
)
parser.add_option('--execinfo',
action="store_true", dest="execinfo", default=False,
help="Display information regarding the current execution state."
)
parser.add_option('--iface',
action="append", type="string", dest="interface",
help="Explicitly allow Repy to bind to the specified interface. This option can be used multiple times."
)
parser.add_option('--nootherips',
action="store_true", dest="nootherips",default=False,
help="Do not allow IPs or interfaces that are not explicitly specified"
)
parser.add_option('--logfile',
action="store", type="string", dest="logfile",
help="Set up a circular log buffer and output to logfile"
)
parser.add_option('--stop',
action="store", type="string", dest="stopfile",
help="Watch for the creation of stopfile and abort when it is created"
)
parser.add_option('--status',
action="store", type="string", dest="statusfile",
help="Write status information into statusfile"
)
parser.add_option('--cwd',
action="store", type="string", dest="cwd",
help="Set Current working directory to cwd"
)
parser.add_option('--servicelog',
action="store_true", dest="servicelog",
help="Enable usage of the servicelogger for internal errors"
)
def parse_options(options):
""" Parse the specified options and initialize all required structures
Note: This modifies global state, specifically, the emulcomm module
"""
if options.ip:
emulcomm.user_ip_interface_preferences = True
# Append this ip to the list of available ones if it is new
for ip in options.ip:
if (True, ip) not in emulcomm.user_specified_ip_interface_list:
emulcomm.user_specified_ip_interface_list.append((True, ip))
if options.interface:
emulcomm.user_ip_interface_preferences = True
# Append this interface to the list of available ones if it is new
for interface in options.interface:
if (False, interface) not in emulcomm.user_specified_ip_interface_list:
emulcomm.user_specified_ip_interface_list.append((False, interface))
# Check if they have told us to only use explicitly allowed IP's and interfaces
if options.nootherips:
# Set user preference to True
emulcomm.user_ip_interface_preferences = True
# Disable nonspecified IP's
emulcomm.allow_nonspecified_ips = False
# set up the circular log buffer...
# Armon: Initialize the circular logger before starting the nanny
if options.logfile:
# time to set up the circular logger
loggerfo = loggingrepy.circular_logger(options.logfile)
# and redirect err and out there...
sys.stdout = loggerfo
sys.stderr = loggerfo
else:
# let's make it so that the output (via print) is always flushed
sys.stdout = loggingrepy.flush_logger(sys.stdout)
# We also need to pass in whether or not we are going to be using the service
# log for repy. We provide the repy directory so that the vessel information
# can be found regardless of where we are called from...
tracebackrepy.initialize(options.servicelog, repy_constants.REPY_START_DIR)
# Set Current Working Directory
if options.cwd:
os.chdir(options.cwd)
# Update repy current directory
repy_constants.REPY_CURRENT_DIR = os.path.abspath(os.getcwd())
# Initialize the NM status interface
nmstatusinterface.init(options.stopfile, options.statusfile)
# Write out our initial status
statusstorage.write_status("Started")
def initialize_nanny(resourcefn):
# start the nanny up and read the resource file.
# JAC: Should this take a string instead?
nanny.start_resource_nanny(resourcefn)
# now, let's fire up the cpu / disk / memory monitor...
nonportable.monitor_cpu_disk_and_mem()
# JAC: I believe this is needed for interface / ip-based restrictions
emulcomm.update_ip_cache()
def main():
# JAC: This function should be kept as stable if possible. Others who
# extend Repy may be doing essentially the same thing in their main and
# your changes may not be reflected there!
# Armon: The CMD line path to repy is the first argument
repy_location = sys.argv[0]
# Get the directory repy is in
repy_directory = os.path.dirname(repy_location)
init_repy_location(repy_directory)
### PARSE OPTIONS. These are command line in our case, but could be from
### anywhere if this is repurposed...
usage = "USAGE: repy.py [options] resource_file program_to_run.r2py [program args]"
parser = optparse.OptionParser(usage=usage)
# Set optparse to stop parsing arguments on the first non-option arg. We
# need this so that command-line args to the sandboxed Repy program don't
# clash or get confused with args to the sandbox (repy.py) itself.
# See also SeattleTestbed/repy_v2#101 .
# (Per the USAGE string above, the user program name is the first
# non-option argument which causes parsing to stop.)
parser.disable_interspersed_args()
add_repy_options(parser)
options, args = parser.parse_args()
if len(args) < 2:
print "Repy requires a resource file and the program to run!"
parser.print_help()
sys.exit(1)
resourcefn = args[0]
progname = args[1]
progargs = args[2:]
# Do a huge amount of initialization.
parse_options(options)
### start resource restrictions, etc. for the nanny
initialize_nanny(resourcefn)
# Read the user code from the file
try:
filehandle = open(progname)
usercode = filehandle.read()
filehandle.close()
except:
print "FATAL ERROR: Unable to read the specified program file: '%s'" % (progname)
sys.exit(1)
# create the namespace...
try:
newnamespace = virtual_namespace.VirtualNamespace(usercode, progname)
except CodeUnsafeError, e:
print "Specified repy program is unsafe!"
print "Static-code analysis failed with error: "+str(e)
harshexit.harshexit(5)
# allow the (potentially large) code string to be garbage collected
del usercode
# Insert program log separator and execution information
if options.execinfo:
print '=' * 40
print "Running program:", progname
print "Arguments:", progargs
print '=' * 40
# get a new namespace
newcontext = get_safe_context(progargs)
# one could insert a new function for repy code here by changing newcontext
# to contain an additional function.
# run the code to completion...
execute_namespace_until_completion(newnamespace, newcontext)
# No more pending events for the user thread, we exit
harshexit.harshexit(0)
if __name__ == '__main__':
try:
main()
except SystemExit:
harshexit.harshexit(4)
except:
tracebackrepy.handle_exception()
harshexit.harshexit(3)
| mit | 9,118,748,605,965,150,000 | 33.005063 | 124 | 0.681581 | false |
wavicles/pycode-browser | Code/Maths/julia.py | 8 | 1094 | '''
Region of a complex plane ranging from -1 to +1 in both real
and imaginary axes is rpresented using a 2 dimensional matrix
having X x Y elements.For X and Y equal to 200, the stepsize
in the complex plane is 2.0/200 = 0.01.
The nature of the pattern depends very much on the value of c.
'''
from pylab import *
X = 200
Y = 200
rlim = 1.0
ilim = 1.0
rscale = 2*rlim / X
iscale = 2*ilim / Y
MAXIT = 100
MAXABS = 2.0
c = 0.02 - 0.8j # The constant in equation z**2 + c
m = zeros([X,Y],dtype=uint8) # A two dimensional array
def numit(x,y): # number of iterations to diverge
z = complex(x,y)
for k in range(MAXIT):
if abs(z) <= MAXABS:
z = z**2 + c
else:
return k # diverged after k trials
return MAXIT # did not diverge,
for x in range(X):
for y in range(Y):
re = rscale * x - rlim # complex number represented
im = iscale * y - ilim # by the (x,y) coordinate
m[x][y] = numit(re,im) # get the color for (x,y)
imshow(m) # Colored plot using the two dimensional matrix
show()
| gpl-3.0 | -2,232,362,152,945,310,500 | 26.05 | 62 | 0.609689 | false |
coteyr/home-assistant | tests/components/automation/test_sun.py | 2 | 13076 | """
tests.components.automation.test_sun
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests sun automation.
"""
from datetime import datetime
import unittest
from unittest.mock import patch
from homeassistant.components import sun
import homeassistant.components.automation as automation
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
class TestAutomationSun(unittest.TestCase):
""" Test the sun automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
self.hass.config.components.append('sun')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_sunset_trigger(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '02:00:00 16-09-2015',
})
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.utcnow',
return_value=now):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunset',
},
'action': {
'service': 'test.automation',
}
}
}))
fire_time_changed(self.hass, trigger_time)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_sunrise_trigger(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.utcnow',
return_value=now):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunrise',
},
'action': {
'service': 'test.automation',
}
}
}))
fire_time_changed(self.hass, trigger_time)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_sunset_trigger_with_offset(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '02:00:00 16-09-2015',
})
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.utcnow',
return_value=now):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunset',
'offset': '0:30:00'
},
'action': {
'service': 'test.automation',
}
}
}))
fire_time_changed(self.hass, trigger_time)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_sunrise_trigger_with_offset(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.utcnow',
return_value=now):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunrise',
'offset': '-0:30:00'
},
'action': {
'service': 'test.automation',
}
}
}))
fire_time_changed(self.hass, trigger_time)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'before': 'sunrise',
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 10, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'after': 'sunrise',
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 13, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before_with_offset(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'before': 'sunrise',
'before_offset': '+1:00:00'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 15, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after_with_offset(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '14:00:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'after': 'sunrise',
'after_offset': '+1:00:00'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 14, 59, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before_and_after_during(self):
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '10:00:00 16-09-2015',
sun.STATE_ATTR_NEXT_SETTING: '15:00:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'after': 'sunrise',
'before': 'sunset'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 9, 59, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 12, tzinfo=dt_util.UTC)
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after_different_tz(self):
import pytz
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '17:30:00 16-09-2015',
})
automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'platform': 'sun',
'after': 'sunset',
},
'action': {
'service': 'test.automation'
}
}
})
# Before
now = datetime(2015, 9, 16, 17, tzinfo=pytz.timezone('US/Mountain'))
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
# After
now = datetime(2015, 9, 16, 18, tzinfo=pytz.timezone('US/Mountain'))
with patch('homeassistant.components.automation.sun.dt_util.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
| mit | 8,928,940,585,674,092,000 | 34.824658 | 76 | 0.4948 | false |
xuwei95/shadowsocksr | shadowsocks/obfsplugin/verify.py | 13 | 4374 | #!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import struct
import zlib
import hmac
import hashlib
import shadowsocks
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def create_verify_deflate(method):
return verify_deflate(method)
obfs_map = {
'verify_deflate': (create_verify_deflate,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class obfs_verify_data(object):
def __init__(self):
pass
class verify_base(plain.plain):
def __init__(self, method):
super(verify_base, self).__init__(method)
self.method = method
def init_data(self):
return obfs_verify_data()
def set_server_info(self, server_info):
self.server_info = server_info
def client_encode(self, buf):
return buf
def client_decode(self, buf):
return (buf, False)
def server_encode(self, buf):
return buf
def server_decode(self, buf):
return (buf, True, False)
class verify_deflate(verify_base):
def __init__(self, method):
super(verify_deflate, self).__init__(method)
self.recv_buf = b''
self.unit_len = 32700
self.decrypt_packet_num = 0
self.raw_trans = False
def pack_data(self, buf):
if len(buf) == 0:
return b''
data = zlib.compress(buf)
data = struct.pack('>H', len(data)) + data[2:]
return data
def client_pre_encrypt(self, buf):
ret = b''
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 2:
length = struct.unpack('>H', self.recv_buf[:2])[0]
if length >= 32768 or length < 6:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length > len(self.recv_buf):
break
out_buf += zlib.decompress(b'x\x9c' + self.recv_buf[2:length])
self.recv_buf = self.recv_buf[length:]
if out_buf:
self.decrypt_packet_num += 1
return out_buf
def server_pre_encrypt(self, buf):
ret = b''
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 2:
length = struct.unpack('>H', self.recv_buf[:2])[0]
if length >= 32768 or length < 6:
self.raw_trans = True
self.recv_buf = b''
if self.decrypt_packet_num == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
out_buf += zlib.decompress(b'\x78\x9c' + self.recv_buf[2:length])
self.recv_buf = self.recv_buf[length:]
if out_buf:
self.decrypt_packet_num += 1
return (out_buf, False)
| apache-2.0 | 5,763,225,900,761,875,000 | 27.402597 | 77 | 0.585277 | false |
FInAT/FInAT | finat/quadrature.py | 1 | 3725 | from abc import ABCMeta, abstractproperty
from functools import reduce
import numpy
import gem
from gem.utils import cached_property
from FIAT.reference_element import LINE, QUADRILATERAL, TENSORPRODUCT
from FIAT.quadrature import GaussLegendreQuadratureLineRule
from FIAT.quadrature_schemes import create_quadrature as fiat_scheme
from finat.point_set import PointSet, GaussLegendrePointSet, TensorPointSet
def make_quadrature(ref_el, degree, scheme="default"):
"""
Generate quadrature rule for given reference element
that will integrate an polynomial of order 'degree' exactly.
For low-degree (<=6) polynomials on triangles and tetrahedra, this
uses hard-coded rules, otherwise it falls back to a collapsed
Gauss scheme on simplices. On tensor-product cells, it is a
tensor-product quadrature rule of the subcells.
:arg ref_el: The FIAT cell to create the quadrature for.
:arg degree: The degree of polynomial that the rule should
integrate exactly.
"""
if ref_el.get_shape() == TENSORPRODUCT:
try:
degree = tuple(degree)
except TypeError:
degree = (degree,) * len(ref_el.cells)
assert len(ref_el.cells) == len(degree)
quad_rules = [make_quadrature(c, d, scheme)
for c, d in zip(ref_el.cells, degree)]
return TensorProductQuadratureRule(quad_rules)
if ref_el.get_shape() == QUADRILATERAL:
return make_quadrature(ref_el.product, degree, scheme)
if degree < 0:
raise ValueError("Need positive degree, not %d" % degree)
if ref_el.get_shape() == LINE:
# FIAT uses Gauss-Legendre line quadature, however, since we
# symbolically label it as such, we wish not to risk attaching
# the wrong label in case FIAT changes. So we explicitly ask
# for Gauss-Legendre line quadature.
num_points = (degree + 1 + 1) // 2 # exact integration
fiat_rule = GaussLegendreQuadratureLineRule(ref_el, num_points)
point_set = GaussLegendrePointSet(fiat_rule.get_points())
return QuadratureRule(point_set, fiat_rule.get_weights())
fiat_rule = fiat_scheme(ref_el, degree, scheme)
return QuadratureRule(PointSet(fiat_rule.get_points()), fiat_rule.get_weights())
class AbstractQuadratureRule(metaclass=ABCMeta):
"""Abstract class representing a quadrature rule as point set and a
corresponding set of weights."""
@abstractproperty
def point_set(self):
"""Point set object representing the quadrature points."""
@abstractproperty
def weight_expression(self):
"""GEM expression describing the weights, with the same free indices
as the point set."""
class QuadratureRule(AbstractQuadratureRule):
"""Generic quadrature rule with no internal structure."""
def __init__(self, point_set, weights):
weights = numpy.asarray(weights)
assert len(point_set.points) == len(weights)
self.point_set = point_set
self.weights = numpy.asarray(weights)
@cached_property
def point_set(self):
pass # set at initialisation
@cached_property
def weight_expression(self):
return gem.Indexed(gem.Literal(self.weights), self.point_set.indices)
class TensorProductQuadratureRule(AbstractQuadratureRule):
"""Quadrature rule which is a tensor product of other rules."""
def __init__(self, factors):
self.factors = tuple(factors)
@cached_property
def point_set(self):
return TensorPointSet(q.point_set for q in self.factors)
@cached_property
def weight_expression(self):
return reduce(gem.Product, (q.weight_expression for q in self.factors))
| mit | -827,749,074,207,715,800 | 34.141509 | 84 | 0.69047 | false |
sekikn/incubator-airflow | airflow/providers/amazon/aws/hooks/batch_client.py | 3 | 18330 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client for AWS batch services
.. seealso::
- http://boto3.readthedocs.io/en/latest/guide/configuration.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from random import uniform
from time import sleep
from typing import Dict, List, Optional, Union
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.typing_compat import Protocol, runtime_checkable
# Add exceptions to pylint for the boto3 protocol only; ideally the boto3 library
# could provide
# protocols for all their dynamically generated classes (try to migrate this to a PR on botocore).
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: disable=invalid-name, unused-argument
@runtime_checkable
class AwsBatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.AwsBatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: List[str]) -> Dict:
"""
Get job descriptions from AWS batch
:param jobs: a list of JobId to describe
:type jobs: List[str]
:return: an API response to describe jobs
:rtype: Dict
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiterName: str
:return: a waiter object for the named AWS batch service
:rtype: botocore.waiter.Waiter
.. note::
AWS batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client('batch').waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: Dict,
parameters: Dict,
containerOverrides: Dict,
tags: Dict,
) -> Dict:
"""
Submit a batch job
:param jobName: the name for the AWS batch job
:type jobName: str
:param jobQueue: the queue name on AWS Batch
:type jobQueue: str
:param jobDefinition: the job definition name on AWS Batch
:type jobDefinition: str
:param arrayProperties: the same parameter that boto3 will receive
:type arrayProperties: Dict
:param parameters: the same parameter that boto3 will receive
:type parameters: Dict
:param containerOverrides: the same parameter that boto3 will receive
:type containerOverrides: Dict
:param tags: the same parameter that boto3 will receive
:type tags: Dict
:return: an API response
:rtype: Dict
"""
...
def terminate_job(self, jobId: str, reason: str) -> Dict:
"""
Terminate a batch job
:param jobId: a job ID to terminate
:type jobId: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: enable=invalid-name, unused-argument
class AwsBatchClientHook(AwsBaseHook):
"""
A client for AWS batch services.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:type max_retries: Optional[int]
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:type status_retries: Optional[int]
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check batch job status, modify these defaults, e.g.:
.. code-block::
AwsBatchClient.DEFAULT_DELAY_MIN = 0
AwsBatchClient.DEFAULT_DELAY_MAX = 5
When explicit delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = AwsBatchClient.add_jitter(10, width=5, minima=0)``
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
def __init__(
self, *args, max_retries: Optional[int] = None, status_retries: Optional[int] = None, **kwargs
) -> None:
# https://github.com/python/mypy/issues/6799 hence type: ignore
super().__init__(client_type='batch', *args, **kwargs) # type: ignore
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
@property
def client(self) -> Union[AwsBatchProtocol, botocore.client.BaseClient]: # noqa: D402
"""
An AWS API client for batch services, like ``boto3.client('batch')``
:return: a boto3 'batch' client for the ``.region_name``
:rtype: Union[AwsBatchProtocol, botocore.client.BaseClient]
"""
return self.conn
def terminate_job(self, job_id: str, reason: str) -> Dict:
"""
Terminate a batch job
:param job_id: a job ID to terminate
:type job_id: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
response = self.get_conn().terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the batch job; return True if the job
'SUCCEEDED', else raise an AirflowException
:param job_id: a batch job ID
:type job_id: str
:rtype: bool
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == "SUCCEEDED":
self.log.info("AWS batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == "FAILED":
raise AirflowException(f"AWS Batch job ({job_id}) failed: {job}")
if job_status in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"]:
raise AirflowException(f"AWS Batch job ({job_id}) is not complete: {job}")
raise AirflowException(f"AWS Batch job ({job_id}) has unknown status: {job}")
def wait_for_job(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Wait for batch job to complete
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
self.poll_for_job_complete(job_id, delay)
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Poll for job running. The status that indicates a job is running or
already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
running_status = ["RUNNING", "SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Poll for job completion. The status that indicates job completion
are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
complete_status = ["SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: List[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a batch job ID
:type job_id: str
:param match_status: a list of job status to match; the batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:type match_status: List[str]
:rtype: bool
:raises: AirflowException
"""
retries = 0
while True:
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s",
job_id,
job_status,
match_status,
)
if job_status in match_status:
return True
if retries >= self.max_retries:
raise AirflowException(f"AWS Batch job ({job_id}) status checks exceed max_retries")
retries += 1
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
def get_job_description(self, job_id: str) -> Dict:
"""
Get job description (using status_retries).
:param job_id: a batch job ID
:type job_id: str
:return: an API response for describe jobs
:rtype: Dict
:raises: AirflowException
"""
retries = 0
while True:
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except botocore.exceptions.ClientError as err:
error = err.response.get("Error", {})
if error.get("Code") == "TooManyRequestsException":
pass # allow it to retry, if possible
else:
raise AirflowException(f"AWS Batch job ({job_id}) description error: {err}")
retries += 1
if retries >= self.status_retries:
raise AirflowException(
"AWS Batch job ({}) description error: exceeded "
"status_retries ({})".format(job_id, self.status_retries)
)
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
@staticmethod
def parse_job_description(job_id: str, response: Dict) -> Dict:
"""
Parse job description to extract description for job_id
:param job_id: a batch job ID
:type job_id: str
:param response: an API response for describe jobs
:type response: Dict
:return: an API response to describe job_id
:rtype: Dict
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(f"AWS Batch job ({job_id}) description error: response: {response}")
return matching_jobs[0]
@staticmethod
def add_jitter(
delay: Union[int, float], width: Union[int, float] = 1, minima: Union[int, float] = 0
) -> float:
"""
Use delay +/- width for random jitter
Adding jitter to status polling can help to avoid
AWS batch API limits for monitoring batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:type delay: Union[int, float]
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:type width: Union[int, float]
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:type minima: Union[int, float]
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
:rtype: float
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return uniform(lower, upper)
@staticmethod
def delay(delay: Union[int, float, None] = None) -> None:
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
:type delay: Optional[Union[int, float]]
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = uniform(AwsBatchClientHook.DEFAULT_DELAY_MIN, AwsBatchClientHook.DEFAULT_DELAY_MAX)
else:
delay = AwsBatchClientHook.add_jitter(delay)
sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
An exponential back-off delay, with random jitter. There is a maximum
interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
:param tries: Number of tries
:type tries: int
:rtype: float
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return uniform(delay / 3, delay)
| apache-2.0 | 2,077,799,243,754,084,900 | 33.197761 | 103 | 0.604201 | false |
foobarbazblarg/stayclean | stayclean-2016/serve-posts-and-comments-from-interesting-users.py | 1 | 4227 | #!/usr/bin/python
import subprocess
import time
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit me!
activeCommentHashFiles = [ 'retiredcommenthashes.txt',
'../stayclean-2016-april/retiredcommenthashes.txt',
'../stayclean-2016-march/retiredcommenthashes.txt',
'../stayclean-2016-february/retiredcommenthashes.txt' ]
flaskport = 8700
app = Flask(__name__)
app.debug = True
# commentHashesAndComments = {}
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def retiredCommentHashes():
answer = []
for filename in activeCommentHashFiles:
with open(filename, "r") as commentHashFile:
# return commentHashFile.readlines()
answer += commentHashFile.read().splitlines()
return answer
@app.route('/interesting-and-problematic-users')
def interestingAndProblematicUsers():
# TODO: left off here.
# global commentHashesAndComments
global submission
# commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
redditSession = loginOAuthAndReturnRedditSession()
unreadMessages = redditSession.get_unread(limit=None)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write("my unread messages")
stringio.write("</h3>\n\n")
for unreadMessage in unreadMessages:
i += 1
commentHash = sha1()
if unreadMessage.__class__ == praw.objects.Comment:
# This next line takes 2 seconds. It must need to do an HTTPS transaction to get the permalink.
# Not much we can do about that, I guess.
# print int(round(time.time() * 1000))
commentHash.update(unreadMessage.permalink)
# print int(round(time.time() * 1000))
else:
commentHash.update(str(unreadMessage.author))
commentHash.update(unreadMessage.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
# commentHashesAndComments[commentHash] = unreadMessage
authorName = str(unreadMessage.author) # can be None if author was deleted. So check for that and skip if it's None.
# participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if unreadMessage.__class__ == praw.objects.Comment:
stringio.write('<small><font color="gray">' + bleach.clean(unreadMessage.submission.title) + '</font></small><br>')
else:
stringio.write('<b>' + bleach.clean(unreadMessage.subject) + '</b><br>')
stringio.write(bleach.clean(markdown.markdown(unreadMessage.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| mit | -8,780,181,807,481,865,000 | 36.741071 | 131 | 0.673764 | false |
marinkaz/orange3 | Orange/classification/simple_tree.py | 5 | 10627 | import ctypes as ct
import numpy as np
from Orange.base import Learner, Model
__all__ = ['SimpleTreeLearner']
from . import _simple_tree
_tree = ct.pydll.LoadLibrary(_simple_tree.__file__)
DiscreteNode = 0
ContinuousNode = 1
PredictorNode = 2
Classification = 0
Regression = 1
IntVar = 0
FloatVar = 1
c_int_p = ct.POINTER(ct.c_int)
c_double_p = ct.POINTER(ct.c_double)
class SIMPLE_TREE_NODE(ct.Structure):
pass
SIMPLE_TREE_NODE._fields_ = [
('type', ct.c_int),
('children_size', ct.c_int),
('split_attr', ct.c_int),
('split', ct.c_float),
('children', ct.POINTER(ct.POINTER(SIMPLE_TREE_NODE))),
('dist', ct.POINTER(ct.c_float)),
('n', ct.c_float),
('sum', ct.c_float),
]
_tree.build_tree.restype = ct.POINTER(SIMPLE_TREE_NODE)
_tree.new_node.restype = ct.POINTER(SIMPLE_TREE_NODE)
class SimpleTreeNode:
pass
class SimpleTreeLearner(Learner):
"""
Classification or regression tree learner.
Uses gain ratio for classification and mean square error for
regression. This learner was developed to speed-up random
forest construction, but can also be used as a standalone tree learner.
min_instances : int, optional (default = 2)
Minimal number of data instances in leaves. When growing the three,
new nodes are not introduced if they would result in leaves
with fewer instances than min_instances. Instance count is weighed.
max_depth : int, optional (default = 1024)
Maximal depth of tree.
max_majority : float, optional (default = 1.0)
Maximal proportion of majority class. When this is
exceeded, induction stops (only used for classification).
skip_prob : string, optional (default = 0.0)
Data attribute will be skipped with probability ``skip_prob``.
- if float, then skip attribute with this probability.
- if "sqrt", then `skip_prob = 1 - sqrt(n_features) / n_features`
- if "log2", then `skip_prob = 1 - log2(n_features) / n_features`
bootstrap : data table, optional (default = False)
A bootstrap data set.
seed : int, optional (default = 42)
Random seed.
"""
name = 'simple tree'
def __init__(self, min_instances=2, max_depth=1024, max_majority=1.0,
skip_prob=0.0, bootstrap=False, seed=42):
self.min_instances = min_instances
self.max_depth = max_depth
self.max_majority = max_majority
self.skip_prob = skip_prob
self.bootstrap = bootstrap
self.seed = seed
def fit_storage(self, data):
return SimpleTreeModel(self, data)
class SimpleTreeModel(Model):
def __init__(self, learner, data):
X = np.ascontiguousarray(data.X)
Y = np.ascontiguousarray(data.Y)
W = np.ascontiguousarray(data.W)
self.num_attrs = X.shape[1]
self.dom_attr = data.domain.attributes
self.cls_vars = list(data.domain.class_vars)
if len(data.domain.class_vars) != 1:
n_cls = len(data.domain.class_vars)
raise ValueError("Number of classes should be 1: {}".format(n_cls))
if data.domain.has_discrete_class:
self.type = Classification
self.cls_vals = len(data.domain.class_var.values)
elif data.domain.has_continuous_class:
self.type = Regression
self.cls_vals = 0
else:
raise ValueError("Only Continuous and Discrete "
"variables are supported")
if isinstance(learner.skip_prob, (float, int)):
skip_prob = learner.skip_prob
elif learner.skip_prob == 'sqrt':
skip_prob = 1.0 - np.sqrt(X.shape[1]) / X.shape[1]
elif learner.skip_prob == 'log2':
skip_prob = 1.0 - np.log2(X.shape[1]) / X.shape[1]
else:
raise ValueError(
"skip_prob not valid: {}".format(learner.skip_prob))
attr_vals = []
domain = []
for attr in data.domain.attributes:
if attr.is_discrete:
attr_vals.append(len(attr.values))
domain.append(IntVar)
elif attr.is_continuous:
attr_vals.append(0)
domain.append(FloatVar)
else:
raise ValueError("Only Continuous and Discrete "
"variables are supported")
attr_vals = np.array(attr_vals, dtype=np.int32)
domain = np.array(domain, dtype=np.int32)
self.node = _tree.build_tree(
X.ctypes.data_as(c_double_p),
Y.ctypes.data_as(c_double_p),
W.ctypes.data_as(c_double_p),
X.shape[0],
W.size,
learner.min_instances,
learner.max_depth,
ct.c_float(learner.max_majority),
ct.c_float(skip_prob),
self.type,
self.num_attrs,
self.cls_vals,
attr_vals.ctypes.data_as(c_int_p),
domain.ctypes.data_as(c_int_p),
learner.bootstrap,
learner.seed)
def predict_storage(self, data):
X = np.ascontiguousarray(data.X)
if self.type == Classification:
p = np.zeros((X.shape[0], self.cls_vals))
_tree.predict_classification(
X.ctypes.data_as(c_double_p),
X.shape[0],
self.node,
self.num_attrs,
self.cls_vals,
p.ctypes.data_as(c_double_p))
return p.argmax(axis=1), p
elif self.type == Regression:
p = np.zeros(X.shape[0])
_tree.predict_regression(
X.ctypes.data_as(c_double_p),
X.shape[0],
self.node,
self.num_attrs,
p.ctypes.data_as(c_double_p))
return p
else:
assert False, "Invalid prediction type"
def __del__(self):
if hasattr(self, "node"):
_tree.destroy_tree(self.node, self.type)
def __getstate__(self):
dict = self.__dict__.copy()
del dict['node']
py_node = self.__to_python(self.node)
return dict, py_node
def __setstate__(self, state):
dict, py_node = state
self.__dict__.update(dict)
self.node = self.__from_python(py_node)
# for pickling a tree
def __to_python(self, node):
n = node.contents
py_node = SimpleTreeNode()
py_node.type = n.type
py_node.children_size = n.children_size
py_node.split_attr = n.split_attr
py_node.split = n.split
py_node.children = [
self.__to_python(n.children[i]) for i in range(n.children_size)]
if self.type == Classification:
py_node.dist = [n.dist[i] for i in range(self.cls_vals)]
else:
py_node.n = n.n
py_node.sum = n.sum
return py_node
# for unpickling a tree
def __from_python(self, py_node):
node = _tree.new_node(py_node.children_size, self.type, self.cls_vals)
n = node.contents
n.type = py_node.type
n.children_size = py_node.children_size
n.split_attr = py_node.split_attr
n.split = py_node.split
for i in range(n.children_size):
n.children[i] = self.__from_python(py_node.children[i])
if self.type == Classification:
for i in range(self.cls_vals):
n.dist[i] = py_node.dist[i]
else:
n.n = py_node.n
n.sum = py_node.sum
return node
# for comparing two trees
def dumps_tree(self, node):
n = node.contents
xs = ['{', str(n.type)]
if n.type != PredictorNode:
xs.append(str(n.split_attr))
if n.type == ContinuousNode:
xs.append('{:.5f}'.format(n.split))
elif self.type == Classification:
for i in range(self.cls_vals):
xs.append('{:.2f}'.format(n.dist[i]))
else:
xs.append('{:.5f} {:.5f}'.format(n.n, n.sum))
for i in range(n.children_size):
xs.append(self.dumps_tree(n.children[i]))
xs.append('}')
return ' '.join(xs)
def to_string(self, node=None, level=0):
"""Return a text-based representation of the tree.
Parameters
----------
node : LP_SIMPLE_TREE_NODE, optional (default=None)
Tree node. Used to construct representation of the
tree under this node.
If not provided, node is considered root node.
level : int, optional (defaul=0)
Level of the node. Used for line indentation.
Returns
-------
tree : str
Text-based representation of the tree.
"""
if node is None:
if self.node is None:
return '(null node)'
else:
node = self.node
n = node.contents
if self.type == Classification:
decimals = 1
else:
decimals = self.domain.class_var.number_of_decimals
if n.children_size == 0:
if self.type == Classification:
node_cont = [round(n.dist[i], decimals)
for i in range(self.cls_vals)]
index = node_cont.index(max(node_cont))
major_class = self.cls_vars[0].values[index]
return ' --> %s (%s)' % (major_class, node_cont)
else:
node_cont = str(round(n.sum / n.n, decimals)) + ': ' + str(n.n)
return ' --> (%s)' % node_cont
else:
node_desc = self.dom_attr[n.split_attr].name
if self.type == Classification:
node_cont = [round(n.dist[i], decimals)
for i in range(self.cls_vals)]
else:
node_cont = str(round(n.sum / n.n, decimals)) + ': ' + str(n.n)
ret_str = '\n' + ' ' * level + '%s (%s)' % (node_desc,
node_cont)
for i in range(n.children_size):
attr = self.dom_attr[n.split_attr]
if attr.is_continuous:
split = '<=' if i % 2 == 0 else '>'
split += str(round(n.split, 5))
ret_str += '\n' + ' ' * level + ': %s' % split
else:
ret_str += '\n' + ' ' * level + ': %s' % attr.values[i]
ret_str += self.to_string(n.children[i], level + 1)
return ret_str
| bsd-2-clause | -2,589,193,165,253,487,000 | 33.615635 | 79 | 0.53684 | false |
carmine/open-kilda | services/traffexam/kilda/traffexam/context.py | 2 | 3104 | # Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging.config
import os
import pathlib
from kilda.traffexam import const
from kilda.traffexam import common
from kilda.traffexam import exc
class Context(object):
is_debug = False
root = os.path.join(os.sep, 'var', 'run', const.PROJECT_NAME)
service = None
_init_done = False
def __init__(self, iface, rest_bind):
self.iface = iface
self.rest_bind = rest_bind
self.children = common.ProcMonitor()
self.shared_registry = common.Registry()
self._acquired_resources = []
self.set_root(self.root)
self.set_debug_mode(self.is_debug)
self._init_done = True
def close(self):
for resource in self._acquired_resources:
resource.release()
def path(self, *chunks):
return self.root.joinpath(*chunks)
def acquire_resources(self, *resources):
for allocator in resources:
self._acquired_resources.insert(0, allocator(self))
def make_lock_file_name(self):
name = '{}-{}.lock'.format(self.iface.name, self.iface.index)
return str(self.path(name))
def make_network_namespace_name(self):
return '{}{}.{}'.format(
const.IF_PREFIX, self.iface.name, self.iface.index)
def make_bridge_name(self):
return '{}gw.{}'.format(const.IF_PREFIX, self.iface.index)
def make_veth_base_name(self):
return '{}nsgw.{}'.format(const.IF_PREFIX, self.iface.index)
def set_root(self, root):
self.root = pathlib.Path(root)
if not self._init_done:
self.root.mkdir(parents=True, exist_ok=True)
return self
def set_default_logging(self):
stderr = logging.StreamHandler()
stderr.setFormatter('%(asctime)s %(levelname)s %(name)s - %(message)s')
log = logging.getLogger()
log.addHandler(stderr)
log.setLevel(logging.INFO)
return self
def set_logging_config(self, config, incremental=False):
try:
logging.config.fileConfig(
config, disable_existing_loggers=not incremental)
except (IOError, OSError) as e:
raise exc.InvalidLoggingConfigError(config, e)
return self
def set_debug_mode(self, mode):
self.is_debug = mode
return self
def set_service_adapter(self, adapter):
self.service = adapter
return self
class ContextConsumer(object):
def __init__(self, context):
self.context = context
| apache-2.0 | 1,999,451,595,794,261,500 | 28.846154 | 79 | 0.642397 | false |
saurabh6790/trufil_app | selling/report/sales_person_target_variance_item_group_wise/sales_person_target_variance_item_group_wise.py | 30 | 5091 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, msgprint
from webnotes.utils import flt
import time
from accounts.utils import get_fiscal_year
from controllers.trends import get_period_date_ranges, get_period_month_ranges
from webnotes.model.meta import get_field_precision
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
sim_map = get_salesperson_item_month_map(filters)
data = []
for salesperson, salesperson_items in sim_map.items():
for item_group, monthwise_data in salesperson_items.items():
row = [salesperson, item_group]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "achieved", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "target_on"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = ["Sales Person:Link/Sales Person:120", "Item Group:Link/Item Group:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in ["Target (%s)", "Achieved (%s)", "Variance (%s)"]:
if group_months:
label = label % (from_date.strftime("%b") + " - " + to_date.strftime("%b"))
else:
label = label % from_date.strftime("%b")
columns.append(label+":Float:120")
return columns + ["Total Target:Float:120", "Total Achieved:Float:120",
"Total Variance:Float:120"]
#Get sales person & item group details
def get_salesperson_details(filters):
return webnotes.conn.sql("""select sp.name, td.item_group, td.target_qty,
td.target_amount, sp.distribution_id
from `tabSales Person` sp, `tabTarget Detail` td
where td.parent=sp.name and td.fiscal_year=%s order by sp.name""",
(filters["fiscal_year"]), as_dict=1)
#Get target distribution details of item group
def get_target_distribution_details(filters):
target_details = {}
for d in webnotes.conn.sql("""select bd.name, bdd.month, bdd.percentage_allocation
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd
where bdd.parent=bd.name and bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get achieved details from sales order
def get_achieved_details(filters):
start_date, end_date = get_fiscal_year(fiscal_year = filters["fiscal_year"])[1:]
item_details = webnotes.conn.sql("""select soi.item_code, soi.qty, soi.amount, so.transaction_date,
st.sales_person, MONTHNAME(so.transaction_date) as month_name
from `tabSales Order Item` soi, `tabSales Order` so, `tabSales Team` st
where soi.parent=so.name and so.docstatus=1 and
st.parent=so.name and so.transaction_date>=%s and
so.transaction_date<=%s""" % ('%s', '%s'),
(start_date, end_date), as_dict=1)
item_actual_details = {}
for d in item_details:
item_actual_details.setdefault(d.sales_person, {}).setdefault(\
get_item_group(d.item_code), []).append(d)
return item_actual_details
def get_salesperson_item_month_map(filters):
import datetime
salesperson_details = get_salesperson_details(filters)
tdd = get_target_distribution_details(filters)
achieved_details = get_achieved_details(filters)
sim_map = {}
for sd in salesperson_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
sim_map.setdefault(sd.name, {}).setdefault(sd.item_group, {})\
.setdefault(month, webnotes._dict({
"target": 0.0, "achieved": 0.0
}))
tav_dict = sim_map[sd.name][sd.item_group][month]
month_percentage = tdd.get(sd.distribution_id, {}).get(month, 0) \
if sd.distribution_id else 100.0/12
for ad in achieved_details.get(sd.name, {}).get(sd.item_group, []):
if (filters["target_on"] == "Quantity"):
tav_dict.target = flt(sd.target_qty) * month_percentage / 100
if ad.month_name == month:
tav_dict.achieved += ad.qty
if (filters["target_on"] == "Amount"):
tav_dict.target = flt(sd.target_amount) * month_percentage / 100
if ad.month_name == month:
tav_dict.achieved += ad.amount
return sim_map
def get_item_group(item_name):
return webnotes.conn.get_value("Item", item_name, "item_group") | agpl-3.0 | -7,279,310,025,965,091,000 | 36.718519 | 101 | 0.687488 | false |
skoslowski/gnuradio | gr-filter/python/filter/design/api_object.py | 3 | 2038 | from __future__ import unicode_literals
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
class ApiObject(object):
'''
Filter count variable if the filter design tool has to
return multiple filter parameters in future
e.g Cascaded Filters
'''
def __init__(self, filtcount = 1):
self.filtercount = filtcount
self.restype = [''] * self.filtercount
self.params = [''] * self.filtercount
self.taps = [''] * self.filtercount
'''
Updates params dictionary for the given filter number
'''
def update_params(self, params, filtno):
if (filtno <= self.filtercount):
self.params[filtno - 1] = params
'''
Updates filter type for the given filter number
'''
def update_filttype(self, filttype, filtno):
if (filtno <= self.filtercount):
self.filttype[filtno - 1] = filttype
'''
updates taps for the given filter number. taps will
contain a list of coefficients in the case of fir design
and (b,a) tuple in the case of iir design
'''
def update_taps(self, taps, filtno):
if (filtno <= self.filtercount):
self.taps[filtno - 1] = taps
'''
updates all of them in a single call
'''
def update_all(self, filttype, params, taps, filtno):
if (filtno <= self.filtercount):
self.taps[filtno - 1] = taps
self.params[filtno - 1] = params
self.restype[filtno - 1] = filttype
def get_filtercount(self):
return self.filtercount
def get_restype(self, filtno=1):
if (filtno <= self.filtercount):
return self.restype[filtno - 1]
def get_params(self, filtno=1):
if (filtno <= self.filtercount):
return self.params[filtno - 1]
def get_taps(self, filtno=1):
if (filtno <= self.filtercount):
return self.taps[filtno - 1]
| gpl-3.0 | 3,837,973,088,949,248,000 | 28.970588 | 64 | 0.592247 | false |
evgeni/cfgdiff | test/test_cfgdiff.py | 1 | 4680 | import unittest
import cfgdiff
class CfgDiffTestCase(unittest.TestCase):
def _test_same(self, cls, filea, fileb, parser=None):
a = cls(filea, ordered=False, parser=parser)
b = cls(fileb, ordered=False, parser=parser)
self.assertIsNone(a.error)
self.assertIsNone(b.error)
self.assertEqual(a.readlines(), b.readlines())
def _test_different(self, cls, filea, fileb, parser=None):
a = cls(filea, ordered=False, parser=parser)
b = cls(fileb, ordered=False, parser=parser)
self.assertIsNone(a.error)
self.assertIsNone(b.error)
self.assertNotEqual(a.readlines(), b.readlines())
class INIDiffTestCase(CfgDiffTestCase):
def test_ini_same(self):
self._test_same(cfgdiff.INIDiff, './test/test_same_1-a.ini',
'./test/test_same_1-b.ini')
def test_ini_different(self):
self._test_different(cfgdiff.INIDiff,
'./test/test_different_1-a.ini',
'./test/test_different_1-b.ini')
class JSONDiffTestCase(CfgDiffTestCase):
def test_json_same(self):
self._test_same(cfgdiff.JSONDiff, './test/test_same_1-a.json',
'./test/test_same_1-b.json')
def test_json_different(self):
self._test_different(cfgdiff.JSONDiff,
'./test/test_different_1-a.json',
'./test/test_different_1-b.json')
@unittest.skipUnless('yaml' in cfgdiff.supported_formats, 'requires PyYAML')
class YAMLDiffTestcase(CfgDiffTestCase):
def test_yaml_same(self):
self._test_same(cfgdiff.YAMLDiff, './test/test_same_1-a.yaml',
'./test/test_same_1-b.yaml')
def test_yaml_different(self):
self._test_different(cfgdiff.YAMLDiff,
'./test/test_different_1-a.yaml',
'./test/test_different_1-b.yaml')
@unittest.skipUnless('xml' in cfgdiff.supported_formats, 'requires LXML')
class XMLDiffTestCase(CfgDiffTestCase):
def test_xml_same(self):
self._test_same(cfgdiff.XMLDiff, './test/test_same_1-a.xml',
'./test/test_same_1-b.xml')
def test_xml_different(self):
self._test_different(cfgdiff.XMLDiff,
'./test/test_different_1-a.xml',
'./test/test_different_1-b.xml')
@unittest.skipUnless('conf' in cfgdiff.supported_formats, 'requires ConfigObj')
class ConfigDiffTestCase(CfgDiffTestCase):
def test_conf_same(self):
self._test_same(cfgdiff.ConfigDiff, './test/test_same_1-a.ini',
'./test/test_same_1-b.ini')
def test_conf_different(self):
self._test_different(cfgdiff.ConfigDiff,
'./test/test_different_1-a.ini',
'./test/test_different_1-b.ini')
@unittest.skipUnless('reconf' in cfgdiff.supported_formats,
'requires reconfigure')
class ReconfigureDiffTestCase(CfgDiffTestCase):
def setUp(self):
configs = __import__('reconfigure.configs', fromlist=['reconfigure'])
self.parser = configs.SambaConfig
@unittest.expectedFailure
def test_reconf_same(self):
self._test_same(cfgdiff.ReconfigureDiff,
'./test/test_same_1-a.ini',
'./test/test_same_1-b.ini', self.parser)
def test_reconf_different(self):
self._test_different(cfgdiff.ReconfigureDiff,
'./test/test_different_1-a.ini',
'./test/test_different_1-b.ini', self.parser)
@unittest.skipUnless('zone' in cfgdiff.supported_formats, 'requires dnspython')
class ZoneDiffTestCase(CfgDiffTestCase):
def test_zone_same(self):
self._test_same(cfgdiff.ZoneDiff,
'./test/test_same_1-a.zone',
'./test/test_same_1-b.zone')
def test_zone_different(self):
self._test_different(cfgdiff.ZoneDiff,
'./test/test_different_1-a.zone',
'./test/test_different_1-b.zone')
@unittest.skipUnless('isc' in cfgdiff.supported_formats, 'requires iscpy')
class ISCDiffTestCase(CfgDiffTestCase):
def test_isc_same(self):
self._test_same(cfgdiff.ISCDiff,
'./test/test_same_1-a.isc',
'./test/test_same_1-b.isc')
def test_isc_different(self):
self._test_different(cfgdiff.ISCDiff,
'./test/test_different_1-a.isc',
'./test/test_different_1-b.isc')
| mit | 1,457,297,573,597,181,200 | 33.925373 | 79 | 0.576923 | false |
HiSPARC/station-software | user/python/Lib/site-packages/win32comext/axdebug/adb.py | 18 | 17292 | """The glue between the Python debugger interface and the Active Debugger interface
"""
from win32com.axdebug.util import trace, _wrap, _wrap_remove
from win32com.server.util import unwrap
import win32com.client.connect
import gateways
import sys, bdb, traceback
import axdebug, stackframe
import win32api, pythoncom
import thread, os
def fnull(*args):
pass
try:
os.environ["DEBUG_AXDEBUG"]
debugging = 1
except KeyError:
debugging = 0
traceenter = fnull # trace enter of functions
tracev = fnull # verbose trace
if debugging:
traceenter = trace # trace enter of functions
tracev = trace # verbose trace
class OutputReflector:
def __init__(self, file, writefunc):
self.writefunc = writefunc
self.file = file
def __getattr__(self,name):
return getattr(self.file, name)
def write(self,message):
self.writefunc(message)
self.file.write(message)
def _dumpf(frame):
if frame is None:
return "<None>"
else:
addn = "(with trace!)"
if frame.f_trace is None:
addn = " **No Trace Set **"
return "Frame at %d, file %s, line: %d%s" % (id(frame), frame.f_code.co_filename, frame.f_lineno, addn)
g_adb = None
def OnSetBreakPoint(codeContext, breakPointState, lineNo):
try:
fileName = codeContext.codeContainer.GetFileName()
# inject the code into linecache.
import linecache
linecache.cache[fileName] = 0, 0, codeContext.codeContainer.GetText(), fileName
g_adb._OnSetBreakPoint(fileName, codeContext, breakPointState, lineNo+1)
except:
traceback.print_exc()
class Adb(bdb.Bdb,gateways.RemoteDebugApplicationEvents):
def __init__(self):
self.debugApplication = None
self.debuggingThread = None
self.debuggingThreadStateHandle = None
self.stackSnifferCookie = self.stackSniffer = None
self.codeContainerProvider = None
self.debuggingThread = None
self.breakFlags = None
self.breakReason = None
self.appDebugger = None
self.appEventConnection = None
self.logicalbotframe = None # Anything at this level or below does not exist!
self.currentframe = None # The frame we are currently in.
self.recursiveData = [] # Data saved for each reentery on this thread.
bdb.Bdb.__init__(self)
self._threadprotectlock = thread.allocate_lock()
self.reset()
def canonic(self, fname):
if fname[0]=='<':
return fname
return bdb.Bdb.canonic(self, fname)
def reset(self):
traceenter("adb.reset")
bdb.Bdb.reset(self)
def __xxxxx__set_break(self, filename, lineno, cond = None):
# As per standard one, except no linecache checking!
if filename not in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if lineno in list:
return 'There is already a breakpoint there!'
list.append(lineno)
if cond is not None: self.cbreaks[filename, lineno]=cond
def stop_here(self, frame):
traceenter("stop_here", _dumpf(frame), _dumpf(self.stopframe))
# As per bdb.stop_here, except for logicalbotframe
## if self.stopframe is None:
## return 1
if frame is self.stopframe:
return 1
tracev("stop_here said 'No'!")
return 0
def break_here(self, frame):
traceenter("break_here", self.breakFlags, _dumpf(frame))
self.breakReason = None
if self.breakFlags==axdebug.APPBREAKFLAG_DEBUGGER_HALT:
self.breakReason = axdebug.BREAKREASON_DEBUGGER_HALT
elif self.breakFlags==axdebug.APPBREAKFLAG_DEBUGGER_BLOCK:
self.breakReason = axdebug.BREAKREASON_DEBUGGER_BLOCK
elif self.breakFlags==axdebug.APPBREAKFLAG_STEP:
self.breakReason = axdebug.BREAKREASON_STEP
else:
print "Calling base 'break_here' with", self.breaks
if bdb.Bdb.break_here(self, frame):
self.breakReason = axdebug.BREAKREASON_BREAKPOINT
return self.breakReason is not None
def break_anywhere(self, frame):
traceenter("break_anywhere", _dumpf(frame))
if self.breakFlags==axdebug.APPBREAKFLAG_DEBUGGER_HALT:
self.breakReason = axdebug.BREAKREASON_DEBUGGER_HALT
return 1
rc = bdb.Bdb.break_anywhere(self, frame)
tracev("break_anywhere",_dumpf(frame),"returning",rc)
return rc
def dispatch_return(self, frame, arg):
traceenter("dispatch_return", _dumpf(frame), arg)
if self.logicalbotframe is frame:
# We dont want to debug parent frames.
tracev("dispatch_return resetting sys.trace")
sys.settrace(None)
return
# self.bSetTrace = 0
self.currentframe = frame.f_back
return bdb.Bdb.dispatch_return(self, frame, arg)
def dispatch_line(self, frame):
traceenter("dispatch_line", _dumpf(frame), _dumpf(self.botframe))
# trace("logbotframe is", _dumpf(self.logicalbotframe), "botframe is", self.botframe)
if frame is self.logicalbotframe:
trace("dispatch_line", _dumpf(frame), "for bottom frame returing tracer")
# The next code executed in the frame above may be a builtin (eg, apply())
# in which sys.trace needs to be set.
sys.settrace(self.trace_dispatch)
# And return the tracer incase we are about to execute Python code,
# in which case sys tracer is ignored!
return self.trace_dispatch
if self.codeContainerProvider.FromFileName(frame.f_code.co_filename) is None:
trace("dispatch_line has no document for", _dumpf(frame), "- skipping trace!")
return None
self.currentframe = frame # So the stack sniffer knows our most recent, debuggable code.
return bdb.Bdb.dispatch_line(self, frame)
def dispatch_call(self, frame, arg):
traceenter("dispatch_call",_dumpf(frame))
frame.f_locals['__axstack_address__'] = axdebug.GetStackAddress()
if frame is self.botframe:
trace("dispatch_call is self.botframe - returning tracer")
return self.trace_dispatch
# Not our bottom frame. If we have a document for it,
# then trace it, otherwise run at full speed.
if self.codeContainerProvider.FromFileName(frame.f_code.co_filename) is None:
trace("dispatch_call has no document for", _dumpf(frame), "- skipping trace!")
## sys.settrace(None)
return None
return self.trace_dispatch
# rc = bdb.Bdb.dispatch_call(self, frame, arg)
# trace("dispatch_call", _dumpf(frame),"returned",rc)
# return rc
def trace_dispatch(self, frame, event, arg):
traceenter("trace_dispatch", _dumpf(frame), event, arg)
if self.debugApplication is None:
trace("trace_dispatch has no application!")
return # None
return bdb.Bdb.trace_dispatch(self, frame, event, arg)
#
# The user functions do bugger all!
#
# def user_call(self, frame, argument_list):
# traceenter("user_call",_dumpf(frame))
def user_line(self, frame):
traceenter("user_line",_dumpf(frame))
# Traces at line zero
if frame.f_lineno!=0:
breakReason = self.breakReason
if breakReason is None:
breakReason = axdebug.BREAKREASON_STEP
self._HandleBreakPoint(frame, None, breakReason)
def user_return(self, frame, return_value):
# traceenter("user_return",_dumpf(frame),return_value)
bdb.Bdb.user_return(self, frame, return_value)
def user_exception(self, frame, exc_info):
# traceenter("user_exception")
bdb.Bdb.user_exception(self, frame, exc_info)
def _HandleBreakPoint(self, frame, tb, reason):
traceenter("Calling HandleBreakPoint with reason", reason,"at frame", _dumpf(frame))
traceenter(" Current frame is", _dumpf(self.currentframe))
try:
resumeAction = self.debugApplication.HandleBreakPoint(reason)
tracev("HandleBreakPoint returned with ", resumeAction)
except pythoncom.com_error, details:
# Eeek - the debugger is dead, or something serious is happening.
# Assume we should continue
resumeAction = axdebug.BREAKRESUMEACTION_CONTINUE
trace("HandleBreakPoint FAILED with", details)
self.stack = []
self.curindex = 0
if resumeAction == axdebug.BREAKRESUMEACTION_ABORT:
self.set_quit()
elif resumeAction == axdebug.BREAKRESUMEACTION_CONTINUE:
tracev("resume action is continue")
self.set_continue()
elif resumeAction == axdebug.BREAKRESUMEACTION_STEP_INTO:
tracev("resume action is step")
self.set_step()
elif resumeAction == axdebug.BREAKRESUMEACTION_STEP_OVER:
tracev("resume action is next")
self.set_next(frame)
elif resumeAction == axdebug.BREAKRESUMEACTION_STEP_OUT:
tracev("resume action is stop out")
self.set_return(frame)
else:
raise ValueError("unknown resume action flags")
self.breakReason = None
def set_trace(self):
self.breakReason = axdebug.BREAKREASON_LANGUAGE_INITIATED
bdb.Bdb.set_trace(self)
def CloseApp(self):
traceenter("ClosingApp")
self.reset()
self.logicalbotframe = None
if self.stackSnifferCookie is not None:
try:
self.debugApplication.RemoveStackFrameSniffer(self.stackSnifferCookie)
except pythoncom.com_error:
trace("*** Could not RemoveStackFrameSniffer %d" % (self.stackSnifferCookie))
if self.stackSniffer:
_wrap_remove(self.stackSniffer)
self.stackSnifferCookie = self.stackSniffer = None
if self.appEventConnection is not None:
self.appEventConnection.Disconnect()
self.appEventConnection = None
self.debugApplication = None
self.appDebugger = None
if self.codeContainerProvider is not None:
self.codeContainerProvider.Close()
self.codeContainerProvider = None
def AttachApp(self, debugApplication, codeContainerProvider):
# traceenter("AttachApp", debugApplication, codeContainerProvider)
self.codeContainerProvider = codeContainerProvider
self.debugApplication = debugApplication
self.stackSniffer = _wrap(stackframe.DebugStackFrameSniffer(self), axdebug.IID_IDebugStackFrameSniffer)
self.stackSnifferCookie = debugApplication.AddStackFrameSniffer(self.stackSniffer)
# trace("StackFrameSniffer added (%d)" % self.stackSnifferCookie)
# Connect to the application events.
self.appEventConnection = win32com.client.connect.SimpleConnection(self.debugApplication, self, axdebug.IID_IRemoteDebugApplicationEvents)
def ResetAXDebugging(self):
traceenter("ResetAXDebugging", self, "with refcount", len(self.recursiveData))
if win32api.GetCurrentThreadId()!=self.debuggingThread:
trace("ResetAXDebugging called on other thread")
return
if len(self.recursiveData)==0:
# print "ResetAXDebugging called for final time."
self.logicalbotframe = None
self.debuggingThread = None
self.currentframe = None
self.debuggingThreadStateHandle = None
return
self.logbotframe, self.stopframe, self.currentframe, self.debuggingThreadStateHandle = self.recursiveData[0]
self.recursiveData = self.recursiveData[1:]
def SetupAXDebugging(self, baseFrame = None, userFrame = None):
"""Get ready for potential debugging. Must be called on the thread
that is being debugged.
"""
# userFrame is for non AXScript debugging. This is the first frame of the
# users code.
if userFrame is None:
userFrame = baseFrame
else:
# We have missed the "dispatch_call" function, so set this up now!
userFrame.f_locals['__axstack_address__'] = axdebug.GetStackAddress()
traceenter("SetupAXDebugging", self)
self._threadprotectlock.acquire()
try:
thisThread = win32api.GetCurrentThreadId()
if self.debuggingThread is None:
self.debuggingThread = thisThread
else:
if self.debuggingThread!=thisThread:
trace("SetupAXDebugging called on other thread - ignored!")
return
# push our context.
self.recursiveData.insert(0, (self.logicalbotframe,self.stopframe, self.currentframe,self.debuggingThreadStateHandle))
finally:
self._threadprotectlock.release()
trace("SetupAXDebugging has base frame as", _dumpf(baseFrame))
self.botframe = baseFrame
self.stopframe = userFrame
self.logicalbotframe = baseFrame
self.currentframe = None
self.debuggingThreadStateHandle = axdebug.GetThreadStateHandle()
self._BreakFlagsChanged()
# RemoteDebugApplicationEvents
def OnConnectDebugger(self, appDebugger):
traceenter("OnConnectDebugger", appDebugger)
self.appDebugger = appDebugger
# Reflect output to appDebugger
writefunc = lambda s: appDebugger.onDebugOutput(s)
sys.stdout = OutputReflector(sys.stdout, writefunc)
sys.stderr = OutputReflector(sys.stderr, writefunc)
def OnDisconnectDebugger(self):
traceenter("OnDisconnectDebugger")
# Stop reflecting output
if isinstance(sys.stdout, OutputReflector):
sys.stdout = sys.stdout.file
if isinstance(sys.stderr, OutputReflector):
sys.stderr = sys.stderr.file
self.appDebugger = None
self.set_quit()
def OnSetName(self, name):
traceenter("OnSetName", name)
def OnDebugOutput(self, string):
traceenter("OnDebugOutput", string)
def OnClose(self):
traceenter("OnClose")
def OnEnterBreakPoint(self, rdat):
traceenter("OnEnterBreakPoint", rdat)
def OnLeaveBreakPoint(self, rdat):
traceenter("OnLeaveBreakPoint", rdat)
def OnCreateThread(self, rdat):
traceenter("OnCreateThread", rdat)
def OnDestroyThread(self, rdat):
traceenter("OnDestroyThread", rdat)
def OnBreakFlagChange(self, abf, rdat):
traceenter("Debugger OnBreakFlagChange", abf, rdat)
self.breakFlags = abf
self._BreakFlagsChanged()
def _BreakFlagsChanged(self):
traceenter("_BreakFlagsChanged to %s with our thread = %s, and debugging thread = %s" % (self.breakFlags, self.debuggingThread, win32api.GetCurrentThreadId()))
trace("_BreakFlagsChanged has breaks", self.breaks)
# If a request comes on our debugging thread, then do it now!
# if self.debuggingThread!=win32api.GetCurrentThreadId():
# return
if len(self.breaks) or self.breakFlags:
if self.logicalbotframe:
trace("BreakFlagsChange with bot frame", _dumpf(self.logicalbotframe))
# We have frames not to be debugged (eg, Scripting engine frames
# (sys.settrace will be set when out logicalbotframe is hit -
# this may not be the right thing to do, as it may not cause the
# immediate break we desire.)
self.logicalbotframe.f_trace = self.trace_dispatch
else:
trace("BreakFlagsChanged, but no bottom frame")
if self.stopframe is not None:
self.stopframe.f_trace = self.trace_dispatch
# If we have the thread-state for the thread being debugged, then
# we dynamically set its trace function - it is possible that the thread
# being debugged is in a blocked call (eg, a message box) and we
# want to hit the debugger the instant we return
if self.debuggingThreadStateHandle is not None and \
self.breakFlags and \
self.debuggingThread != win32api.GetCurrentThreadId():
axdebug.SetThreadStateTrace(self.debuggingThreadStateHandle, self.trace_dispatch)
def _OnSetBreakPoint(self, key, codeContext, bps, lineNo):
traceenter("_OnSetBreakPoint", self, key, codeContext, bps, lineNo)
if bps==axdebug.BREAKPOINT_ENABLED:
problem = self.set_break(key, lineNo)
if problem:
print "*** set_break failed -", problem
trace("_OnSetBreakPoint just set BP and has breaks", self.breaks)
else:
self.clear_break(key, lineNo)
self._BreakFlagsChanged()
trace("_OnSetBreakPoint leaving with breaks", self.breaks)
def Debugger():
global g_adb
if g_adb is None:
g_adb = Adb()
return g_adb
| gpl-3.0 | -2,416,130,532,915,679,700 | 40.567308 | 167 | 0.636537 | false |
numenta/nupic.research | projects/transformers/experiments/hpsearch.py | 2 | 1819 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see htt"://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
"""
Base Transformers Experiment configuration.
"""
from copy import deepcopy
from ray import tune
from .base import bert_base
def hp_space(trial):
return dict(
learning_rate=tune.loguniform(1e-4, 1e-2)
)
debug_hp_search = deepcopy(bert_base)
debug_hp_search.update(
finetuning=False,
# Data Training arguments
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
# Training Arguments
logging_steps=50,
warmup_steps=10,
max_steps=50,
overwrite_output_dir=True,
dataloader_drop_last=True,
per_device_train_batch_size=8,
per_device_eval_batch_size=8,
do_train=True,
do_eval=True,
do_predict=False,
# hyperparameter search
hp_space=hp_space, # required
hp_num_trials=2, # required
hp_validation_dataset_pct=0.05, # default
hp_extra_kwargs=dict() # default
)
# Export configurations in this file
CONFIGS = dict(
debug_hp_search=debug_hp_search
)
| agpl-3.0 | 6,478,573,843,356,277,000 | 24.985714 | 73 | 0.709731 | false |
rCorvidae/OrionPI | src/tests/Updater/TestUpdaterTransmissionNegotiation.py | 1 | 5052 | from bin.Updater.UpdaterTransmissionNegotiation import NegotiationResult, TransmissionNegotiation
import unittest
import json
class TestTransmissionResult(unittest.TestCase):
def setUp(self):
self.positive_ack_dict = {
"ACK": {
"filename": "update.zip",
"filesize": 2000,
"MD5": "5d41402abc4b2a76b9719d911017c592"
}
}
self.positive_ack_json = json.dumps(self.positive_ack_dict)
self.results = NegotiationResult(self.positive_ack_dict, True)
def test_has_passed_negotiation(self):
results = NegotiationResult(self.positive_ack_dict, True)
self.assertTrue(results)
def test_has_not_passed(self):
results = NegotiationResult(self.positive_ack_dict, False)
self.assertFalse(results)
def test_receive_the_same_ack_if_no_params_passed(self):
ack = self.results.results()
self.assertDictEqual(self.positive_ack_dict, ack)
def test_receive_anything_below_ack(self):
ack = self.results.results(TransmissionNegotiation.ACK)
self.assertDictEqual(self.positive_ack_dict["ACK"], ack)
def test_receive_file_name_below_ack(self):
ack = self.results.results(TransmissionNegotiation.ACK,
TransmissionNegotiation.FILE_NAME)
self.assertEqual(self.positive_ack_dict["ACK"]["filename"], ack)
def test_receive_md5_below_ack(self):
ack = self.results.results(TransmissionNegotiation.ACK,
TransmissionNegotiation.MD5)
self.assertEqual(self.positive_ack_dict["ACK"]["MD5"], ack)
class TestUpdaterTransmissionNegotiation(unittest.TestCase):
def setUp(self):
self.positive_sync_dict = {
"SYN": {
"filename": "update.zip",
"filesize": 2000,
"MD5": "5d41402abc4b2a76b9719d911017c592"
}
}
self.positive_ack_dict = {
"ACK": {
"filename": "update.zip",
"filesize": 2000,
"MD5": "5d41402abc4b2a76b9719d911017c592"
}
}
self.erroneous_sync_dict = {
"SYNC": {
"filename": "update.zip",
"filesize": 2000,
"MD5": "5d41402abc4b2a76b9719d911017c592"
}
}
self.positive_sync_json = json.dumps(self.positive_sync_dict)
self.positive_ack_json = json.dumps(self.positive_ack_dict)
self.erroneous_sync_json = json.dumps(self.erroneous_sync_dict)
self.erroneous_sync_json = self.erroneous_sync_json[2:]
self.sync_not_all_values_passed_dict = self.positive_sync_dict.copy()
def test_negotiate_positive_syn_ack_dict(self):
negotiator = TransmissionNegotiation()
ack = negotiator.negotiate(self.positive_sync_dict)
self.assertDictEqual(self.positive_ack_dict, ack.results())
def test_negotiate_erroneous_syn_ack_dict(self):
negotiator = TransmissionNegotiation()
ack = negotiator.negotiate(self.erroneous_sync_dict)
self.assertFalse(ack)
def test_negotiatie_erroneous_syn_ack_not_all_values_passed(self):
negotiator = TransmissionNegotiation()
del self.sync_not_all_values_passed_dict["SYN"]["filesize"]
ack = negotiator.negotiate(self.sync_not_all_values_passed_dict)
self.assertFalse(ack)
def test_get_bare_transmission_condition_given_correct_data(self):
negotiator = TransmissionNegotiation()
negotiator.trans_cond = self.positive_sync_dict
expected_bare = self.positive_sync_dict["SYN"]
negotiator._get_bare_transmission_condition()
self.assertDictEqual(expected_bare, negotiator.trans_cond)
def test_get_bare_transmission_cond_given_erroneous_sync_data(self):
negotiator = TransmissionNegotiation()
negotiator.trans_cond = self.erroneous_sync_dict
self.assertRaises(KeyError, negotiator._get_bare_transmission_condition)
def test_has_all_transmission_parameters_expect_true(self):
negotiator = TransmissionNegotiation()
negotiator.trans_cond = self.positive_sync_dict
negotiator._get_bare_transmission_condition()
self.assertTrue(negotiator._has_all_transmission_parameters())
def test_has_all_transmission_parameters_not_all_values_passed_expect_false(self):
negotiator = TransmissionNegotiation()
negotiator.trans_cond = self.positive_sync_dict
negotiator._get_bare_transmission_condition()
del negotiator.trans_cond[TransmissionNegotiation.FILE_NAME]
self.assertFalse(negotiator._has_all_transmission_parameters())
def test_convert_transmission_cond_to_dict_on_erroneous_json(self):
negotiator = TransmissionNegotiation()
negotiator.trans_cond = self.erroneous_sync_json
self.assertRaises(TypeError, negotiator._convert_transmission_cond_to_dict)
if __name__ == "__main__":
unittest.main()
| mit | -1,533,825,401,147,790,800 | 40.073171 | 97 | 0.653009 | false |
d-grossman/magichour | deprecated/LogSig/multi/LogSigMulti2.py | 2 | 7941 | from collections import Counter
from itertools import combinations
from collections import namedtuple
from functools import partial
import multiprocessing
import datetime
import hashlib
import sys
import time
import signal
import gzip
'''
name some intermediate data structures
'''
LogLine = namedtuple('LogLine', ['ts', 'text'])
DataRecord = namedtuple('DataRecord', ['line', 'md5hash', 'stats'])
# Signal handler updates GLOBAL to stop processing
globalStop = False
def openFile(name, mode):
if name.lower().endswith('.gz'):
return gzip.open(name, mode + 'b')
else:
return open(name, mode)
# TODO make sure that this is correct
def makeEdges(m, i):
retval = []
ranOnce = False
d = 0
c = 0
while c < m - 1 - i:
ranOnce = True
d = c
c = c + i
retval.append((d, c))
if (m - d) > 0:
retval.append((c, m))
if not ranOnce:
retval.append((0, m))
return retval
def distribWork(C, G, denominator, D, workBounds):
start, finish = workBounds
CNext = [Counter() for _ in range(len(C))]
GNext = dict()
for i in range(start, finish):
j = argMaxPhiSimple(C, D[i], G, denominator)
GNext[D[i].md5hash] = j
CNext[j].update(D[i].stats)
return (CNext, GNext)
# GOOD
def signal_handler(signal, frame):
'''
stop processing if CTRL-C pressed
'''
global globalStop
globalStop = True
# TODO lookup faster hashes
def makeHash(s):
'''
make a md5 string rep of an input string
'''
m = hashlib.md5()
m.update(s)
return m.hexdigest()
# GOOD
def tuple2Str(a):
'''
make a concatenation of a tuple
can make multiple things alias to the same comparison..
'a','aaa','aa','aa','aaa','a' all map to 'aaaa'
'''
return '%s%s' % a
# GOOD
def str2Counter(X):
'''
make a counter object from a string
set chosen to make membership of a tuple instead of count of tuple
Counter is to track the number of DOCUMENTS containing the tuple
not the count of the tuples in a DOCUMENT.
'''
return Counter(map(tuple2Str, set(combinations(X.rstrip().split(), 2))))
# TODO update with results from email to paper authors
# @profile
def argMaxPhiSimple(C, X, G, denominator):
'''
calculate the best partition for X to be part of
return the number of the partition to caller
'''
numGroups = len(C)
# see which group X should be in to maximize
partition = G[X.md5hash]
retScore = 0.0
retval = partition
Xr = X.stats
for partition in range(numGroups):
currentScore = 0.0
numerator = 0.0
for r in Xr.iterkeys():
numerator += C[partition].get(r, 0)
currentScore += numerator * numerator
# TODO make sure this is the correct way to calculate
# currentScore should be Sum(p(r,C)^2)
d = denominator.get(partition, 0.000000000001)
d = d * d
currentScore = numerator / d
# keep tabs of who is winning
if retScore < currentScore:
retScore = currentScore
retval = partition
return retval
# GOOD
def randomSeeds(D, k, G):
'''
store the data histograms
in each parition
'''
C = [Counter() for _ in range(k)]
partition = 0
for d in D:
# assigning groups to a message
G[d.md5hash] = partition
# Do things the Counter way
C[partition].update(d.stats)
partition = (partition + 1) % k
print 'UniqLogLines', len(G)
return C
# GOOD
def updatePartition(CNext, X, GNext, j):
'''
update CNext with statistics from X
update GNext with which group X belongs
'''
GNext[X.md5hash] = j
# TODO would a binary version of this be sufficient?
CNext[j].update(X.stats)
# GOOD
def partitionsNotEqual(C, CNext):
'''
determine if array of dicts are equal
'''
for i in range(len(C)):
if C[i] != CNext[i]:
return True
return False
# GOOD
def logSig_localSearch(D, G, k, maxIter):
'''
D : log message set
k : number of groups to partition
returns: C: partitions
'''
global globalStop
GNext = dict()
CNext = [Counter() for _ in range(k)]
C = randomSeeds(D, k, G)
denominator = Counter(G.itervalues())
print "Starting Run\n"
# TODO should this be an energy measure
# instead of dict comp?
limit = 0
partitionsNotSame = True
eL = makeEdges(len(D), 500)
while partitionsNotSame and (limit < maxIter) and not globalStop:
start = time.time()
pool = multiprocessing.Pool()
func = partial(distribWork, C, G, denominator, D)
distribOut = pool.map(func, eL)
pool.close()
pool.join()
for o in distribOut:
tempCNext, tempGNext = o
GNext.update(tempGNext)
# for key, value in tempGNext.iteritems():
# GNext[key] = value
for c in range(len(CNext)):
CNext[c].update(tempCNext[c])
print 'sanity next %i current %i' % (len(GNext), len(G))
limit += 1
finish = time.time()
# make sure to stop when partitions stable
partitionsNotSame = partitionsNotEqual(C, CNext)
# TODO is this the corret thing?
C = CNext
# update for passing back
G.clear()
G.update(GNext)
CNext = [Counter() for _ in range(k)]
GNext = dict()
denominator = Counter(G.itervalues())
print 'looping iteration %i time=%3.4f (sec)' % (limit, finish - start)
sys.stdout.flush()
# end while
print '\niterated %i times' % (limit)
return C
# GOOD
def dataset_iterator(fIn, num_lines):
'''
Handle reading the data from file into a know form
'''
lines_read = 0
success_full = 0
while num_lines == -1 or lines_read < num_lines:
lines_read += 1
line = fIn.readline()
if len(line) == 0:
break
else:
try:
ts = datetime.datetime.strptime(line[:14], '%b %d %H:%M:%S')
yield LogLine(ts.replace(year=2015), line[15:].strip())
success_full += 1
except:
pass
# GOOD
def main(argv):
totalS = time.time()
print 'Attempting to open %s' % (argv[0])
print 'k = %i' % int(argv[1])
print 'maxIter = %i' % int(argv[2])
a = openFile(argv[0], 'r')
D = list()
G = dict()
readCount = 0
for r in dataset_iterator(a, -1):
h = makeHash(r.text)
s = str2Counter(r.text)
D.append(DataRecord(r, h, s))
readCount += 1
a.close()
print 'Read %i items' % readCount
logSig_localSearch(D, G, int(argv[1]), int(argv[2]))
totalE = time.time()
outHist = Counter(G.itervalues())
partitions = sorted(set(G.itervalues()))
# print a histogram of partition sizes
print 'cluster|number'
for p in partitions:
print '%4i|%4i' % (p, outHist[p])
print 'total execution time %s (sec)\n' % (totalE - totalS)
outSet = set()
outDict = dict()
for item in G.itervalues():
outSet.add(item)
for index, item in enumerate(outSet):
outDict[item] = index
# print things in partition order at the expense of looping
for p in partitions:
for d in D:
if p == G[d.md5hash]:
# print ' %03i | %s' % (G[d.md5hash], d.line.text)
print '%s,%s,%s' % (time.mktime(d.line.ts.timetuple()),
outDict[G[d.md5hash]],
d.line.text)
if __name__ == "__main__":
# install the signal handler
signal.signal(signal.SIGINT, signal_handler)
main(sys.argv[1:])
| apache-2.0 | 3,701,406,154,012,823,600 | 21.950867 | 79 | 0.570205 | false |
RobertHilbrich/assist | ch.hilbri.assist.mapping.benchmarking/resources/generate-plots.py | 2 | 5839 | import numpy as np
import matplotlib.pyplot as plt
examples = np.arange(20)
width = 0.25
constraints_full = []
variables_full = []
fails_full = []
backtracks_full = []
resolution_full = []
constraints_inst_only = []
variables_inst_only = []
fails_inst_only = []
backtracks_inst_only = []
resolution_inst_only = []
constraints_union = []
variables_union = []
fails_union = []
backtracks_union = []
resolution_union = []
file = open("C:\\ASSIST-Toolsuite\\ASSIST-Code-Public\\ch.hilbri.assist.mapping.benchmarking\\resources\\results-imac2-full.txt", "r")
for line in file:
constraints = int(line.split(': ')[1].split(', ')[0].split(' ')[0])
variables = int(line.split(': ')[1].split(', ')[1].split(' ')[0])
fails = int(line.split(': ')[1].split(', ')[2].split(' ')[0])
backtracks = int(line.split(': ')[1].split(', ')[3].split(' ')[0])
resolution = float(line.split(': ')[1].split(', ')[4].split(' ')[0])
constraints_full.append(constraints)
variables_full.append(variables)
fails_full.append(fails)
backtracks_full.append(backtracks)
resolution_full.append(resolution)
file.close()
file = open("C:\\ASSIST-Toolsuite\\ASSIST-Code-Public\\ch.hilbri.assist.mapping.benchmarking\\resources\\results-imac2-inst-only.txt", "r")
for line in file:
constraints = int(line.split(': ')[1].split(', ')[0].split(' ')[0])
variables = int(line.split(': ')[1].split(', ')[1].split(' ')[0])
fails = int(line.split(': ')[1].split(', ')[2].split(' ')[0])
backtracks = int(line.split(': ')[1].split(', ')[3].split(' ')[0])
resolution = float(line.split(': ')[1].split(', ')[4].split(' ')[0])
constraints_inst_only.append(constraints)
variables_inst_only.append(variables)
fails_inst_only.append(fails)
backtracks_inst_only.append(backtracks)
resolution_inst_only.append(resolution)
file.close()
file = open("C:\\ASSIST-Toolsuite\\ASSIST-Code-Public\\ch.hilbri.assist.mapping.benchmarking\\resources\\results-imac2-union.txt", "r")
for line in file:
constraints = int(line.split(': ')[1].split(', ')[0].split(' ')[0])
variables = int(line.split(': ')[1].split(', ')[1].split(' ')[0])
fails = int(line.split(': ')[1].split(', ')[2].split(' ')[0])
backtracks = int(line.split(': ')[1].split(', ')[3].split(' ')[0])
resolution = float(line.split(': ')[1].split(', ')[4].split(' ')[0])
constraints_union.append(constraints)
variables_union.append(variables)
fails_union.append(fails)
backtracks_union.append(backtracks)
resolution_union.append(resolution)
file.close()
def generateConstraintCountPlot():
fig, ax = plt.subplots(figsize=(7.3, 2))
ax.bar(examples - 0.5 * width, constraints_full[:-1], width, color='r', label='element-wise')
ax.bar(examples + 0.5 * width, constraints_inst_only[:-1], width, color='b', label='instantiation-only')
ax.bar(examples + 1.5 * width, constraints_union[:-1], width, color='g', label='combined')
ax.set_ylabel('# Constraints')
ax.set_xlabel('Example')
#ax.set_title('Constraints in all examples')
ax.set_xticks(examples + width / 2)
ax.set_xticklabels(np.arange(20)+1)
ax.legend(loc='upper right')
plt.subplots_adjust(top=1, bottom=0.22, left=0.14, right=1)
plt.show()
def generateVarCountPlot():
fig, ax = plt.subplots(figsize=(7.3, 2))
ax.bar(examples - 0.5 * width, variables_full[:-1], width, color='r', label='element-wise')
ax.bar(examples + 0.5 * width, variables_inst_only[:-1], width, color='b', label='instantiation-only')
ax.bar(examples + 1.5 * width, variables_union[:-1], width, color='g', label='combined')
ax.set_ylabel('# Variables')
ax.set_xlabel('Example')
ax.set_xticks(examples + width / 2)
ax.set_xticklabels(np.arange(20)+1)
ax.legend(loc='upper right')
plt.subplots_adjust(top=1, bottom=0.22, left=0.14, right=1)
plt.show()
def generateResolutionTimePlot():
fig, ax = plt.subplots(figsize=(7.3, 2))
ax.bar(examples - 0.5 * width, resolution_full[:-1], width, color='r', label='element-wise')
ax.bar(examples + 0.5 * width, resolution_inst_only[:-1], width, color='b', label='instantiation-only')
ax.bar(examples + 1.5 * width, resolution_union[:-1], width, color='g', label='combined')
ax.set_ylabel('Resolution [ms]')
ax.set_xlabel('Example')
ax.set_xticks(examples + width / 2)
ax.set_xticklabels(np.arange(20)+1)
ax.set_yscale("log", nonposy='clip')
ax.legend(loc='upper right')
plt.subplots_adjust(top=0.98, bottom=0.22, left=0.14, right=1)
plt.show()
def generateFailsPlot():
fig, ax = plt.subplots(figsize=(7.3, 2))
ax.bar(examples - 0.5 * width, fails_full[:-1], width, color='r', label='element-wise')
ax.bar(examples + 0.5 * width, fails_inst_only[:-1], width, color='b', label='instantiation-only')
ax.bar(examples + 1.5 * width, fails_union[:-1], width, color='g', label='combined')
ax.set_ylabel('# Fails')
ax.set_xlabel('Example')
ax.set_xticks(examples + width / 2)
ax.set_xticklabels(np.arange(20)+1)
ax.legend(loc='upper right')
plt.subplots_adjust(top=1, bottom=0.22, left=0.14, right=1)
plt.show()
def generateBacktracksPlot():
fig, ax = plt.subplots(figsize=(7.3, 2))
ax.bar(examples - 0.5 * width, backtracks_full[:-1], width, color='r', label='element-wise')
ax.bar(examples + 0.5 * width, backtracks_inst_only[:-1], width, color='b', label='instantiation-only')
ax.bar(examples + 1.5 * width, backtracks_union[:-1], width, color='g', label='combined')
ax.set_ylabel('# Backtracks')
ax.set_xlabel('Example')
ax.set_xticks(examples + width / 2)
ax.set_xticklabels(np.arange(20)+1)
ax.legend(loc='upper right')
plt.subplots_adjust(top=1, bottom=0.22, left=0.14, right=1)
plt.show()
generateResolutionTimePlot()
| gpl-2.0 | -5,068,243,061,225,530,000 | 39.548611 | 139 | 0.639151 | false |
M4rtinK/tsubame | tests/account_test.py | 1 | 1853 | import unittest
import tempfile
import os
import sys
import blitzdb
from core.account import TwitterAccount
from core.db import CustomFileBackend
class TwitterAccountClassTest(unittest.TestCase):
def account_test(self):
"""Check if twitter_accounts can be properly instantiated"""
with tempfile.TemporaryDirectory() as temp_dir_name:
db = CustomFileBackend(temp_dir_name)
account = TwitterAccount.new(db, username="avatar", token="oxium",
token_secret="radium", name="Steve")
self.assertEquals(account.username, "avatar")
self.assertEquals(account.name, "Steve")
self.assertEquals(account.token, "oxium")
self.assertEquals(account.token_secret, "radium")
def serialisation_test(self):
"""Check that twitter_accounts can be serialized and deserialized"""
with tempfile.TemporaryDirectory() as temp_dir_name:
db = CustomFileBackend(temp_dir_name)
account = TwitterAccount.new(db, username="avatar", token="oxium",
token_secret="radium", name="Steve")
# check that the temporary folder exists (just in case)
self.assertTrue(os.path.isdir(temp_dir_name))
# serialize the account to the database
account.save(commit=True)
# deserialize the account from the file
loaded_account = TwitterAccount.from_db(db, username="avatar")
# check that the deserialized account has the expected properties
self.assertEquals(loaded_account.username, "avatar")
self.assertEquals(loaded_account.name, "Steve")
self.assertEquals(loaded_account.token, "oxium")
self.assertEquals(loaded_account.token_secret, "radium")
| gpl-3.0 | -7,980,700,245,270,876,000 | 40.177778 | 78 | 0.640583 | false |
jbenet/mongo-python-driver | test/test_grid_file.py | 1 | 15832 | # -*- coding: utf-8 -*-
#
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the grid_file module.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import datetime
import os
import sys
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.objectid import ObjectId
from gridfs.grid_file import (_SEEK_CUR,
_SEEK_END,
GridIn,
GridFile,
GridOut)
from gridfs.errors import (NoFile,
UnsupportedAPI)
from test_connection import get_connection
import qcheck
class TestGridFile(unittest.TestCase):
def setUp(self):
self.db = get_connection().pymongo_test
self.db.fs.files.remove({})
self.db.fs.chunks.remove({})
def test_basic(self):
f = GridIn(self.db.fs, filename="test")
f.write("hello world")
f.close()
self.assertEqual(1, self.db.fs.files.find().count())
self.assertEqual(1, self.db.fs.chunks.find().count())
g = GridOut(self.db.fs, f._id)
self.assertEqual("hello world", g.read())
# make sure it's still there...
g = GridOut(self.db.fs, f._id)
self.assertEqual("hello world", g.read())
f = GridIn(self.db.fs, filename="test")
f.close()
self.assertEqual(2, self.db.fs.files.find().count())
self.assertEqual(1, self.db.fs.chunks.find().count())
g = GridOut(self.db.fs, f._id)
self.assertEqual("", g.read())
def test_md5(self):
f = GridIn(self.db.fs)
f.write("hello world\n")
f.close()
self.assertEqual("6f5902ac237024bdd0c176cb93063dc4", f.md5)
def test_alternate_collection(self):
self.db.alt.files.remove({})
self.db.alt.chunks.remove({})
f = GridIn(self.db.alt)
f.write("hello world")
f.close()
self.assertEqual(1, self.db.alt.files.find().count())
self.assertEqual(1, self.db.alt.chunks.find().count())
g = GridOut(self.db.alt, f._id)
self.assertEqual("hello world", g.read())
# test that md5 still works...
self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", g.md5)
def test_grid_file(self):
self.assertRaises(UnsupportedAPI, GridFile)
def test_grid_in_default_opts(self):
self.assertRaises(TypeError, GridIn, "foo")
a = GridIn(self.db.fs)
self.assert_(isinstance(a._id, ObjectId))
self.assertRaises(AttributeError, setattr, a, "_id", 5)
self.assertEqual(None, a.filename)
a.filename = "my_file"
self.assertEqual("my_file", a.filename)
self.assertEqual(None, a.content_type)
a.content_type = "text/html"
self.assertEqual("text/html", a.content_type)
self.assertRaises(AttributeError, getattr, a, "length")
self.assertRaises(AttributeError, setattr, a, "length", 5)
self.assertEqual(256 * 1024, a.chunk_size)
self.assertRaises(AttributeError, setattr, a, "chunk_size", 5)
self.assertRaises(AttributeError, getattr, a, "upload_date")
self.assertRaises(AttributeError, setattr, a, "upload_date", 5)
self.assertRaises(AttributeError, getattr, a, "aliases")
a.aliases = ["foo"]
self.assertEqual(["foo"], a.aliases)
self.assertRaises(AttributeError, getattr, a, "metadata")
a.metadata = {"foo": 1}
self.assertEqual({"foo": 1}, a.metadata)
self.assertRaises(AttributeError, getattr, a, "md5")
self.assertRaises(AttributeError, setattr, a, "md5", 5)
a.close()
self.assert_(isinstance(a._id, ObjectId))
self.assertRaises(AttributeError, setattr, a, "_id", 5)
self.assertEqual("my_file", a.filename)
self.assertEqual("text/html", a.content_type)
self.assertEqual(0, a.length)
self.assertRaises(AttributeError, setattr, a, "length", 5)
self.assertEqual(256 * 1024, a.chunk_size)
self.assertRaises(AttributeError, setattr, a, "chunk_size", 5)
self.assert_(isinstance(a.upload_date, datetime.datetime))
self.assertRaises(AttributeError, setattr, a, "upload_date", 5)
self.assertEqual(["foo"], a.aliases)
self.assertEqual({"foo": 1}, a.metadata)
self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5)
self.assertRaises(AttributeError, setattr, a, "md5", 5)
def test_grid_in_custom_opts(self):
self.assertRaises(TypeError, GridIn, "foo")
a = GridIn(self.db.fs, _id=5, filename="my_file",
contentType="text/html", chunkSize=1000, aliases=["foo"],
metadata={"foo": 1, "bar": 2}, bar=3, baz="hello")
self.assertEqual(5, a._id)
self.assertEqual("my_file", a.filename)
self.assertEqual("text/html", a.content_type)
self.assertEqual(1000, a.chunk_size)
self.assertEqual(["foo"], a.aliases)
self.assertEqual({"foo": 1, "bar": 2}, a.metadata)
self.assertEqual(3, a.bar)
self.assertEqual("hello", a.baz)
self.assertRaises(AttributeError, getattr, a, "mike")
b = GridIn(self.db.fs,
content_type="text/html", chunk_size=1000, baz=100)
self.assertEqual("text/html", b.content_type)
self.assertEqual(1000, b.chunk_size)
self.assertEqual(100, b.baz)
def test_grid_out_default_opts(self):
self.assertRaises(TypeError, GridOut, "foo")
self.assertRaises(NoFile, GridOut, self.db.fs, 5)
a = GridIn(self.db.fs)
a.close()
b = GridOut(self.db.fs, a._id)
self.assertEqual(a._id, b._id)
self.assertEqual(0, b.length)
self.assertEqual(None, b.content_type)
self.assertEqual(256 * 1024, b.chunk_size)
self.assert_(isinstance(b.upload_date, datetime.datetime))
self.assertEqual(None, b.aliases)
self.assertEqual(None, b.metadata)
self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", b.md5)
for attr in ["_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"]:
self.assertRaises(AttributeError, setattr, b, attr, 5)
def test_grid_out_custom_opts(self):
a = GridIn(self.db.fs, _id=5, filename="my_file",
contentType="text/html", chunkSize=1000, aliases=["foo"],
metadata={"foo": 1, "bar": 2}, bar=3, baz="hello")
a.write("hello world")
a.close()
b = GridOut(self.db.fs, 5)
self.assertEqual(5, b._id)
self.assertEqual(11, b.length)
self.assertEqual("text/html", b.content_type)
self.assertEqual(1000, b.chunk_size)
self.assert_(isinstance(b.upload_date, datetime.datetime))
self.assertEqual(["foo"], b.aliases)
self.assertEqual({"foo": 1, "bar": 2}, b.metadata)
self.assertEqual(3, b.bar)
self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", b.md5)
for attr in ["_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"]:
self.assertRaises(AttributeError, setattr, b, attr, 5)
def test_grid_out_file_document(self):
a = GridIn(self.db.fs)
a.write("foo bar")
a.close()
b = GridOut(self.db.fs, file_document=self.db.fs.files.find_one())
self.assertEqual("foo bar", b.read())
c = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one())
self.assertEqual("foo bar", c.read())
self.assertRaises(NoFile, GridOut, self.db.fs, file_document={})
def test_write_file_like(self):
a = GridIn(self.db.fs)
a.write("hello world")
a.close()
b = GridOut(self.db.fs, a._id)
c = GridIn(self.db.fs)
c.write(b)
c.close()
d = GridOut(self.db.fs, c._id)
self.assertEqual("hello world", d.read())
e = GridIn(self.db.fs, chunk_size=2)
e.write("hello")
buffer = StringIO(" world")
e.write(buffer)
e.write(" and mongodb")
e.close()
self.assertEqual("hello world and mongodb",
GridOut(self.db.fs, e._id).read())
def test_write_lines(self):
a = GridIn(self.db.fs)
a.writelines(["hello ", "world"])
a.close()
self.assertEqual("hello world", GridOut(self.db.fs, a._id).read())
def test_close(self):
f = GridIn(self.db.fs)
f.close()
self.assertRaises(ValueError, f.write, "test")
f.close()
def test_multi_chunk_file(self):
random_string = qcheck.gen_string(qcheck.lift(300000))()
f = GridIn(self.db.fs)
f.write(random_string)
f.close()
self.assertEqual(1, self.db.fs.files.find().count())
self.assertEqual(2, self.db.fs.chunks.find().count())
g = GridOut(self.db.fs, f._id)
self.assertEqual(random_string, g.read())
def test_small_chunks(self):
self.files = 0
self.chunks = 0
def helper(data):
f = GridIn(self.db.fs, chunkSize=1)
f.write(data)
f.close()
self.files += 1
self.chunks += len(data)
self.assertEqual(self.files, self.db.fs.files.find().count())
self.assertEqual(self.chunks, self.db.fs.chunks.find().count())
g = GridOut(self.db.fs, f._id)
self.assertEqual(data, g.read())
g = GridOut(self.db.fs, f._id)
self.assertEqual(data, g.read(10) + g.read(10))
return True
qcheck.check_unittest(self, helper,
qcheck.gen_string(qcheck.gen_range(0, 20)))
def test_seek(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write("hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual("hello world", g.read())
g.seek(0)
self.assertEqual("hello world", g.read())
g.seek(1)
self.assertEqual("ello world", g.read())
self.assertRaises(IOError, g.seek, -1)
g.seek(-3, _SEEK_END)
self.assertEqual("rld", g.read())
g.seek(0, _SEEK_END)
self.assertEqual("", g.read())
self.assertRaises(IOError, g.seek, -100, _SEEK_END)
g.seek(3)
g.seek(3, _SEEK_CUR)
self.assertEqual("world", g.read())
self.assertRaises(IOError, g.seek, -100, _SEEK_CUR)
def test_tell(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write("hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(0, g.tell())
g.read(0)
self.assertEqual(0, g.tell())
g.read(1)
self.assertEqual(1, g.tell())
g.read(2)
self.assertEqual(3, g.tell())
g.read()
self.assertEqual(g.length, g.tell())
def test_multiple_reads(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write("hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual("he", g.read(2))
self.assertEqual("ll", g.read(2))
self.assertEqual("o ", g.read(2))
self.assertEqual("wo", g.read(2))
self.assertEqual("rl", g.read(2))
self.assertEqual("d", g.read(2))
self.assertEqual("", g.read(2))
def test_readline(self):
f = GridIn(self.db.fs, chunkSize=5)
f.write("""Hello world,
How are you?
Hope all is well.
Bye""")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual("H", g.read(1))
self.assertEqual("ello world,\n", g.readline())
self.assertEqual("How a", g.readline(5))
self.assertEqual("", g.readline(0))
self.assertEqual("re you?\n", g.readline())
self.assertEqual("Hope all is well.\n", g.readline(1000))
self.assertEqual("Bye", g.readline())
self.assertEqual("", g.readline())
def test_iterator(self):
f = GridIn(self.db.fs)
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual([], list(g))
f = GridIn(self.db.fs)
f.write("hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(["hello world"], list(g))
self.assertEqual("hello", g.read(5))
self.assertEqual(["hello world"], list(g))
self.assertEqual(" worl", g.read(5))
f = GridIn(self.db.fs, chunk_size=2)
f.write("hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(["he", "ll", "o ", "wo", "rl", "d"], list(g))
def test_read_chunks_unaligned_buffer_size(self):
in_data = ("This is a text that doesn't "
"quite fit in a single 16-byte chunk.")
f = GridIn(self.db.fs, chunkSize=16)
f.write(in_data)
f.close()
g = GridOut(self.db.fs, f._id)
out_data = ''
while 1:
s = g.read(13)
if not s:
break
out_data += s
self.assertEqual(in_data, out_data)
def test_write_unicode(self):
f = GridIn(self.db.fs)
self.assertRaises(TypeError, f.write, u"foo")
f = GridIn(self.db.fs, encoding="utf-8")
f.write(u"foo")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual("foo", g.read())
f = GridIn(self.db.fs, encoding="iso-8859-1")
f.write(u"aé")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(u"aé".encode("iso-8859-1"), g.read())
def test_set_after_close(self):
f = GridIn(self.db.fs, _id="foo", bar="baz")
self.assertEqual("foo", f._id)
self.assertEqual("baz", f.bar)
self.assertRaises(AttributeError, getattr, f, "baz")
self.assertRaises(AttributeError, getattr, f, "uploadDate")
self.assertRaises(AttributeError, setattr, f, "_id", 5)
f.bar = "foo"
f.baz = 5
self.assertEqual("foo", f._id)
self.assertEqual("foo", f.bar)
self.assertEqual(5, f.baz)
self.assertRaises(AttributeError, getattr, f, "uploadDate")
f.close()
self.assertEqual("foo", f._id)
self.assertEqual("foo", f.bar)
self.assertEqual(5, f.baz)
self.assert_(f.uploadDate)
self.assertRaises(AttributeError, setattr, f, "_id", 5)
f.bar = "a"
f.baz = "b"
self.assertRaises(AttributeError, setattr, f, "upload_date", 5)
g = GridOut(self.db.fs, f._id)
self.assertEqual("a", f.bar)
self.assertEqual("b", f.baz)
def test_context_manager(self):
if sys.version_info < (2, 6):
raise SkipTest()
contents = "Imagine this is some important data..."
# Hack around python2.4 an 2.5 not supporting 'with' syntax
exec """
with GridIn(self.db.fs, filename="important") as infile:
infile.write(contents)
with GridOut(self.db.fs, infile._id) as outfile:
self.assertEqual(contents, outfile.read())
"""
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -142,685,238,877,963,420 | 31.109533 | 77 | 0.575111 | false |
asciimoo/searx | searx/engines/searchcode_code.py | 5 | 1746 | """
Searchcode (It)
@website https://searchcode.com/
@provide-api yes (https://searchcode.com/api/)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
from json import loads
from searx.url_utils import urlencode
# engine dependent config
categories = ['it']
paging = True
# search-url
url = 'https://searchcode.com/'
search_url = url + 'api/codesearch_I/?{query}&p={pageno}'
# special code-endings which are not recognised by the file ending
code_endings = {'cs': 'c#',
'h': 'c',
'hpp': 'cpp',
'cxx': 'cpp'}
# do search-request
def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}), pageno=params['pageno'] - 1)
return params
# get response from search-request
def response(resp):
results = []
search_results = loads(resp.text)
# parse results
for result in search_results.get('results', []):
href = result['url']
title = "" + result['name'] + " - " + result['filename']
repo = result['repo']
lines = dict()
for line, code in result['lines'].items():
lines[int(line)] = code
code_language = code_endings.get(
result['filename'].split('.')[-1].lower(),
result['filename'].split('.')[-1].lower())
# append result
results.append({'url': href,
'title': title,
'content': '',
'repository': repo,
'codelines': sorted(lines.items()),
'code_language': code_language,
'template': 'code.html'})
# return results
return results
| agpl-3.0 | 6,675,887,752,398,029,000 | 24.304348 | 97 | 0.537801 | false |
kubernetes-client/python | kubernetes/client/models/v1alpha1_policy_rules_with_subjects.py | 1 | 7071 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1PolicyRulesWithSubjects(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'non_resource_rules': 'list[V1alpha1NonResourcePolicyRule]',
'resource_rules': 'list[V1alpha1ResourcePolicyRule]',
'subjects': 'list[FlowcontrolV1alpha1Subject]'
}
attribute_map = {
'non_resource_rules': 'nonResourceRules',
'resource_rules': 'resourceRules',
'subjects': 'subjects'
}
def __init__(self, non_resource_rules=None, resource_rules=None, subjects=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1PolicyRulesWithSubjects - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._non_resource_rules = None
self._resource_rules = None
self._subjects = None
self.discriminator = None
if non_resource_rules is not None:
self.non_resource_rules = non_resource_rules
if resource_rules is not None:
self.resource_rules = resource_rules
self.subjects = subjects
@property
def non_resource_rules(self):
"""Gets the non_resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
:return: The non_resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[V1alpha1NonResourcePolicyRule]
"""
return self._non_resource_rules
@non_resource_rules.setter
def non_resource_rules(self, non_resource_rules):
"""Sets the non_resource_rules of this V1alpha1PolicyRulesWithSubjects.
`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
:param non_resource_rules: The non_resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:type: list[V1alpha1NonResourcePolicyRule]
"""
self._non_resource_rules = non_resource_rules
@property
def resource_rules(self):
"""Gets the resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
:return: The resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[V1alpha1ResourcePolicyRule]
"""
return self._resource_rules
@resource_rules.setter
def resource_rules(self, resource_rules):
"""Sets the resource_rules of this V1alpha1PolicyRulesWithSubjects.
`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
:param resource_rules: The resource_rules of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:type: list[V1alpha1ResourcePolicyRule]
"""
self._resource_rules = resource_rules
@property
def subjects(self):
"""Gets the subjects of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
:return: The subjects of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[FlowcontrolV1alpha1Subject]
"""
return self._subjects
@subjects.setter
def subjects(self, subjects):
"""Sets the subjects of this V1alpha1PolicyRulesWithSubjects.
subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
:param subjects: The subjects of this V1alpha1PolicyRulesWithSubjects. # noqa: E501
:type: list[FlowcontrolV1alpha1Subject]
"""
if self.local_vars_configuration.client_side_validation and subjects is None: # noqa: E501
raise ValueError("Invalid value for `subjects`, must not be `None`") # noqa: E501
self._subjects = subjects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1PolicyRulesWithSubjects):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1PolicyRulesWithSubjects):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 5,400,181,612,892,856,000 | 38.502793 | 284 | 0.653797 | false |
OpenNFT/OpenNFT | opennft/mapimagewidget.py | 1 | 7463 | # -*- coding: utf-8 -*-
import collections
import typing as t
from loguru import logger
import numpy as np
from matplotlib import cm
from matplotlib import colors
import pyqtgraph as pg
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from opennft import pgext
from copy import copy
ColormapType = t.Union[str, colors.Colormap]
Thresholds = collections.namedtuple('Thresholds', ('lower', 'upper'))
HOT_COLORMAP = 'hot'
COLD_COLORMAP = 'Blues_r'
class MapImageThresholdsCalculator:
"""Statistics/CNR map thresholds calculator class
The class computes the optimal thresholds for display a map.
"""
def __init__(self, thr_coeff: float = 0.0005, no_value: float = 0.0):
self._thr_coeff = thr_coeff
self._no_value = no_value
def __call__(self, map_image: np.ndarray) -> t.Optional[Thresholds]:
map_image_ma = np.ma.masked_equal(map_image, self._no_value)
if map_image_ma.mask.all():
logger.warning('There are no any values on the map')
return None
data = np.sort(map_image_ma.compressed().ravel())
lower_data = data[:int(self._thr_coeff * data.size)]
upper_data = data[int(data.size - self._thr_coeff * data.size):]
if lower_data.size > 0:
lower_thr = np.median(lower_data)
else:
lower_thr = data.min()
if upper_data.size > 0:
upper_thr = np.median(upper_data)
else:
upper_thr = data.min()
return Thresholds(lower_thr, upper_thr)
class RgbaMapImage:
"""Represents the mapper map image to RGBA
"""
def __init__(self, colormap: ColormapType = HOT_COLORMAP, no_value: float = 0.0):
self._no_value = no_value
if isinstance(colormap, str):
colormap = copy(cm.get_cmap(colormap))
self._colormap = colormap
self._colormap.set_bad(alpha=0.0)
def __call__(self, map_image: np.ndarray, thresholds: t.Optional[Thresholds] = None,
alpha: float = 1.0) -> t.Optional[np.ndarray]:
map_image_ma = np.ma.masked_equal(map_image, self._no_value)
if map_image_ma.mask.all():
return
if thresholds is not None:
map_image_ma = np.ma.masked_outside(
map_image_ma, thresholds.lower, thresholds.upper)
return self._map_to_rgba(map_image_ma, alpha)
def _map_to_rgba(self, stats_map_ma, alpha) -> np.ndarray:
vmin = stats_map_ma.min()
vmax = stats_map_ma.max()
normalizer = colors.Normalize(vmin=vmin, vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap=self._colormap)
return mapper.to_rgba(stats_map_ma, alpha=alpha)
class MapImageThresholdsWidget(QtWidgets.QWidget):
"""The widget for manipulating stats/cnr map thresholds
"""
thresholds_manually_changed = QtCore.pyqtSignal(Thresholds)
MIN_THRESHOLD = 0
MAX_THRESHOLD = 255
STEP = 5
def __init__(self, parent: QtCore.QObject = None, colormap: ColormapType = HOT_COLORMAP):
super().__init__(parent)
self._colormap = colormap
self._lower_threshold_spinbox = QtWidgets.QDoubleSpinBox(self)
self._upper_threshold_spinbox = QtWidgets.QDoubleSpinBox(self)
self._colorbar_imageitem = pg.ImageItem()
self._colorbar_viewbox = pgext.ViewBoxWithoutPadding(
lockAspect=False,
enableMouse=False,
enableMenu=False,
)
self._colorbar_viewbox.addItem(self._colorbar_imageitem)
self._colorbar_layout = pg.GraphicsLayoutWidget(self)
size_policy = self._colorbar_layout.sizePolicy()
size_policy.setVerticalPolicy(QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalPolicy(QtWidgets.QSizePolicy.Ignored)
self._colorbar_layout.setSizePolicy(size_policy)
self._colorbar_layout.ci.layout.setContentsMargins(0, 0, 0, 0)
self._colorbar_layout.addItem(self._colorbar_viewbox)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._lower_threshold_spinbox)
layout.addWidget(self._colorbar_layout)
layout.addWidget(self._upper_threshold_spinbox)
layout.setStretch(1, 1)
self.setLayout(layout)
self.setMaximumWidth(400)
self._lower_threshold_spinbox.setMinimum(self.MIN_THRESHOLD)
self._lower_threshold_spinbox.setMaximum(self.MAX_THRESHOLD)
self._lower_threshold_spinbox.setSingleStep(self.STEP)
self._lower_threshold_spinbox.setValue(self.MIN_THRESHOLD)
self._upper_threshold_spinbox.setMinimum(self.MIN_THRESHOLD)
self._upper_threshold_spinbox.setMaximum(self.MAX_THRESHOLD)
self._upper_threshold_spinbox.setSingleStep(self.STEP)
self._upper_threshold_spinbox.setValue(self.MAX_THRESHOLD)
self._map_no_value = 0.0
self._auto_thresholds = True
self._thr_calculator = MapImageThresholdsCalculator(no_value=self._map_no_value)
self._make_colorbar()
self._lower_threshold_spinbox.valueChanged.connect(self._thresholds_changed)
self._upper_threshold_spinbox.valueChanged.connect(self._thresholds_changed)
@property
def auto_thresholds(self) -> bool:
return self._auto_thresholds
@auto_thresholds.setter
def auto_thresholds(self, value: bool):
self._auto_thresholds = value
def reset(self):
self._auto_thresholds = True
self._set_thresholds(Thresholds(self.MIN_THRESHOLD, self.MAX_THRESHOLD))
def compute_thresholds(self, map_values: np.ndarray):
if not self.auto_thresholds:
return
thresholds = self._thr_calculator(map_values)
if thresholds:
self._set_thresholds(thresholds)
else:
logger.warning('Cannot compute thresholds')
def compute_rgba(self, map_image, alpha: float = 1.0):
thresholds = self._get_thresholds()
rgba_stats_map = RgbaMapImage(colormap=self._colormap, no_value=self._map_no_value)
return rgba_stats_map(map_image, thresholds, alpha)
def resizeEvent(self, ev):
self._colorbar_layout.setFixedHeight(self._lower_threshold_spinbox.height())
self._colorbar_viewbox.autoRange()
def _make_colorbar(self):
w = self._colorbar_layout.width()
h = self._colorbar_layout.height()
colorbar_values = np.linspace(0., 1., w)
colorbar_image = np.array(colorbar_values, ndmin=2).repeat(h, axis=0)
colorbar_rgba = RgbaMapImage(colormap=self._colormap, no_value=-1)(colorbar_image)
self._colorbar_imageitem.setImage(colorbar_rgba.transpose((1, 0, 2)))
self._colorbar_viewbox.autoRange()
def _set_thresholds(self, thresholds):
self._lower_threshold_spinbox.blockSignals(True)
self._upper_threshold_spinbox.blockSignals(True)
self._lower_threshold_spinbox.setValue(thresholds.lower)
self._upper_threshold_spinbox.setValue(thresholds.upper)
self._lower_threshold_spinbox.blockSignals(False)
self._upper_threshold_spinbox.blockSignals(False)
def _get_thresholds(self):
lower = self._lower_threshold_spinbox.value()
upper = self._upper_threshold_spinbox.value()
return Thresholds(lower, upper)
def _thresholds_changed(self):
self.thresholds_manually_changed.emit(self._get_thresholds())
| gpl-3.0 | 756,554,017,271,098,900 | 31.732456 | 93 | 0.656572 | false |
fighter2011/ssbc | workers/simdht_worker.py | 25 | 15321 | #!/usr/bin/env python
# encoding: utf-8
"""
磁力搜索meta信息入库程序
[email protected]
2015.6 Forked CreateChen's Project: https://github.com/CreateChen/simDownloader
"""
import hashlib
import os
import time
import datetime
import traceback
import sys
import json
import socket
import threading
from hashlib import sha1
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from collections import deque
from Queue import Queue
import MySQLdb as mdb
try:
raise
import libtorrent as lt
import ltMetadata
except:
lt = None
import metautils
import simMetadata
from bencode import bencode, bdecode
DB_HOST = '127.0.0.1'
DB_USER = 'root'
DB_PASS = ''
BOOTSTRAP_NODES = (
("router.bittorrent.com", 6881),
("dht.transmissionbt.com", 6881),
("router.utorrent.com", 6881)
)
TID_LENGTH = 2
RE_JOIN_DHT_INTERVAL = 3
TOKEN_LENGTH = 2
MAX_QUEUE_LT = 25
MAX_QUEUE_PT = 200
def entropy(length):
return "".join(chr(randint(0, 255)) for _ in xrange(length))
def random_id():
h = sha1()
h.update(entropy(20))
return h.digest()
def decode_nodes(nodes):
n = []
length = len(nodes)
if (length % 26) != 0:
return n
for i in range(0, length, 26):
nid = nodes[i:i+20]
ip = inet_ntoa(nodes[i+20:i+24])
port = unpack("!H", nodes[i+24:i+26])[0]
n.append((nid, ip, port))
return n
def timer(t, f):
Timer(t, f).start()
def get_neighbor(target, nid, end=10):
return target[:end]+nid[end:]
class KNode(object):
def __init__(self, nid, ip, port):
self.nid = nid
self.ip = ip
self.port = port
class DHTClient(Thread):
def __init__(self, max_node_qsize):
Thread.__init__(self)
self.setDaemon(True)
self.max_node_qsize = max_node_qsize
self.nid = random_id()
self.nodes = deque(maxlen=max_node_qsize)
def send_krpc(self, msg, address):
try:
self.ufd.sendto(bencode(msg), address)
except Exception:
pass
def send_find_node(self, address, nid=None):
nid = get_neighbor(nid, self.nid) if nid else self.nid
tid = entropy(TID_LENGTH)
msg = {
"t": tid,
"y": "q",
"q": "find_node",
"a": {
"id": nid,
"target": random_id()
}
}
self.send_krpc(msg, address)
def join_DHT(self):
for address in BOOTSTRAP_NODES:
self.send_find_node(address)
def re_join_DHT(self):
if len(self.nodes) == 0:
self.join_DHT()
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def auto_send_find_node(self):
wait = 1.0 / self.max_node_qsize
while True:
try:
node = self.nodes.popleft()
self.send_find_node((node.ip, node.port), node.nid)
except IndexError:
pass
try:
sleep(wait)
except KeyboardInterrupt:
os._exit(0)
def process_find_node_response(self, msg, address):
nodes = decode_nodes(msg["r"]["nodes"])
for node in nodes:
(nid, ip, port) = node
if len(nid) != 20: continue
if ip == self.bind_ip: continue
n = KNode(nid, ip, port)
self.nodes.append(n)
class DHTServer(DHTClient):
def __init__(self, master, bind_ip, bind_port, max_node_qsize):
DHTClient.__init__(self, max_node_qsize)
self.master = master
self.bind_ip = bind_ip
self.bind_port = bind_port
self.process_request_actions = {
"get_peers": self.on_get_peers_request,
"announce_peer": self.on_announce_peer_request,
}
self.ufd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.ufd.bind((self.bind_ip, self.bind_port))
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def run(self):
self.re_join_DHT()
while True:
try:
(data, address) = self.ufd.recvfrom(65536)
msg = bdecode(data)
self.on_message(msg, address)
except Exception:
pass
def on_message(self, msg, address):
try:
if msg["y"] == "r":
if msg["r"].has_key("nodes"):
self.process_find_node_response(msg, address)
elif msg["y"] == "q":
try:
self.process_request_actions[msg["q"]](msg, address)
except KeyError:
self.play_dead(msg, address)
except KeyError:
pass
def on_get_peers_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
tid = msg["t"]
nid = msg["a"]["id"]
token = infohash[:TOKEN_LENGTH]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(infohash, self.nid),
"nodes": "",
"token": token
}
}
self.master.log_hash(infohash, address)
self.send_krpc(msg, address)
except KeyError:
pass
def on_announce_peer_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
token = msg["a"]["token"]
nid = msg["a"]["id"]
tid = msg["t"]
if infohash[:TOKEN_LENGTH] == token:
if msg["a"].has_key("implied_port ") and msg["a"]["implied_port "] != 0:
port = address[1]
else:
port = msg["a"]["port"]
self.master.log_announce(infohash, (address[0], port))
except Exception:
print 'error'
pass
finally:
self.ok(msg, address)
def play_dead(self, msg, address):
try:
tid = msg["t"]
msg = {
"t": tid,
"y": "e",
"e": [202, "Server Error"]
}
self.send_krpc(msg, address)
except KeyError:
pass
def ok(self, msg, address):
try:
tid = msg["t"]
nid = msg["a"]["id"]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(nid, self.nid)
}
}
self.send_krpc(msg, address)
except KeyError:
pass
class Master(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.queue = Queue()
self.metadata_queue = Queue()
self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, 'ssbc', charset='utf8')
self.dbconn.autocommit(False)
self.dbcurr = self.dbconn.cursor()
self.dbcurr.execute('SET NAMES utf8')
self.n_reqs = self.n_valid = self.n_new = 0
self.n_downloading_lt = self.n_downloading_pt = 0
self.visited = set()
def got_torrent(self):
utcnow = datetime.datetime.utcnow()
binhash, address, data, dtype, start_time = self.metadata_queue.get()
if dtype == 'pt':
self.n_downloading_pt -= 1
elif dtype == 'lt':
self.n_downloading_lt -= 1
if not data:
return
self.n_valid += 1
try:
info = self.parse_torrent(data)
if not info:
return
except:
traceback.print_exc()
return
info_hash = binhash.encode('hex')
info['info_hash'] = info_hash
# need to build tags
info['tagged'] = False
info['classified'] = False
info['requests'] = 1
info['last_seen'] = utcnow
info['source_ip'] = address[0]
if info.get('files'):
files = [z for z in info['files'] if not z['path'].startswith('_')]
if not files:
files = info['files']
else:
files = [{'path': info['name'], 'length': info['length']}]
files.sort(key=lambda z:z['length'], reverse=True)
bigfname = files[0]['path']
info['extension'] = metautils.get_extension(bigfname).lower()
info['category'] = metautils.get_category(info['extension'])
if 'files' in info:
try:
self.dbcurr.execute('INSERT INTO search_filelist VALUES(%s, %s)', (info['info_hash'], json.dumps(info['files'])))
except:
print self.name, 'insert error', sys.exc_info()[1]
del info['files']
try:
try:
print '\n', 'Saved', info['info_hash'], dtype, info['name'], (time.time()-start_time), 's', address[0],
except:
print '\n', 'Saved', info['info_hash'],
ret = self.dbcurr.execute('INSERT INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,' +
'length,create_time,last_seen,requests,comment,creator) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
(info['info_hash'], info['category'], info['data_hash'], info['name'], info['extension'], info['classified'],
info['source_ip'], info['tagged'], info['length'], info['create_time'], info['last_seen'], info['requests'],
info.get('comment',''), info.get('creator','')))
self.dbconn.commit()
except:
print self.name, 'save error', self.name, info
traceback.print_exc()
return
self.n_new += 1
def run(self):
self.name = threading.currentThread().getName()
print self.name, 'started'
while True:
while self.metadata_queue.qsize() > 0:
self.got_torrent()
address, binhash, dtype = self.queue.get()
if binhash in self.visited:
continue
if len(self.visited) > 100000:
self.visited = set()
self.visited.add(binhash)
self.n_reqs += 1
info_hash = binhash.encode('hex')
utcnow = datetime.datetime.utcnow()
date = (utcnow + datetime.timedelta(hours=8))
date = datetime.datetime(date.year, date.month, date.day)
# Check if we have this info_hash
self.dbcurr.execute('SELECT id FROM search_hash WHERE info_hash=%s', (info_hash,))
y = self.dbcurr.fetchone()
if y:
self.n_valid += 1
# 更新最近发现时间,请求数
self.dbcurr.execute('UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s', (utcnow, info_hash))
else:
if dtype == 'pt':
t = threading.Thread(target=simMetadata.download_metadata, args=(address, binhash, self.metadata_queue))
t.setDaemon(True)
t.start()
self.n_downloading_pt += 1
elif dtype == 'lt' and self.n_downloading_lt < MAX_QUEUE_LT:
t = threading.Thread(target=ltMetadata.download_metadata, args=(address, binhash, self.metadata_queue))
t.setDaemon(True)
t.start()
self.n_downloading_lt += 1
if self.n_reqs >= 1000:
self.dbcurr.execute('INSERT INTO search_statusreport(date,new_hashes,total_requests, valid_requests) VALUES(%s,%s,%s,%s) ON DUPLICATE KEY UPDATE ' +
'total_requests=total_requests+%s, valid_requests=valid_requests+%s, new_hashes=new_hashes+%s',
(date, self.n_new, self.n_reqs, self.n_valid, self.n_reqs, self.n_valid, self.n_new))
self.dbconn.commit()
print '\n', time.ctime(), 'n_reqs', self.n_reqs, 'n_valid', self.n_valid, 'n_new', self.n_new, 'n_queue', self.queue.qsize(),
print 'n_d_pt', self.n_downloading_pt, 'n_d_lt', self.n_downloading_lt,
self.n_reqs = self.n_valid = self.n_new = 0
def decode(self, s):
if type(s) is list:
s = ';'.join(s)
u = s
for x in (self.encoding, 'utf8', 'gbk', 'big5'):
try:
u = s.decode(x)
return u
except:
pass
return s.decode(self.encoding, 'ignore')
def decode_utf8(self, d, i):
if i+'.utf-8' in d:
return d[i+'.utf-8'].decode('utf8')
return self.decode(d[i])
def parse_torrent(self, data):
info = {}
self.encoding = 'utf8'
try:
torrent = bdecode(data)
if not torrent.get('name'):
return None
except:
return None
try:
info['create_time'] = datetime.datetime.fromtimestamp(float(torrent['creation date']))
except:
info['create_time'] = datetime.datetime.utcnow()
if torrent.get('encoding'):
self.encoding = torrent['encoding']
if torrent.get('announce'):
info['announce'] = self.decode_utf8(torrent, 'announce')
if torrent.get('comment'):
info['comment'] = self.decode_utf8(torrent, 'comment')[:200]
if torrent.get('publisher-url'):
info['publisher-url'] = self.decode_utf8(torrent, 'publisher-url')
if torrent.get('publisher'):
info['publisher'] = self.decode_utf8(torrent, 'publisher')
if torrent.get('created by'):
info['creator'] = self.decode_utf8(torrent, 'created by')[:15]
if 'info' in torrent:
detail = torrent['info']
else:
detail = torrent
info['name'] = self.decode_utf8(detail, 'name')
if 'files' in detail:
info['files'] = []
for x in detail['files']:
if 'path.utf-8' in x:
v = {'path': self.decode('/'.join(x['path.utf-8'])), 'length': x['length']}
else:
v = {'path': self.decode('/'.join(x['path'])), 'length': x['length']}
if 'filehash' in x:
v['filehash'] = x['filehash'].encode('hex')
info['files'].append(v)
info['length'] = sum([x['length'] for x in info['files']])
else:
info['length'] = detail['length']
info['data_hash'] = hashlib.md5(detail['pieces']).hexdigest()
if 'profiles' in detail:
info['profiles'] = detail['profiles']
return info
def log_announce(self, binhash, address=None):
self.queue.put([address, binhash, 'pt'])
def log_hash(self, binhash, address=None):
if not lt:
return
if self.n_downloading_lt < MAX_QUEUE_LT:
self.queue.put([address, binhash, 'lt'])
if __name__ == "__main__":
# max_node_qsize bigger, bandwith bigger, spped higher
master = Master()
master.start()
dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
dht.start()
dht.auto_send_find_node()
| gpl-2.0 | -7,332,594,788,571,477,000 | 30.960251 | 165 | 0.514303 | false |
jcpowermac/ansible | lib/ansible/modules/network/avi/avi_gslb.py | 15 | 6640 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslb
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Gslb Avi RESTful Object
description:
- This module is used to configure Gslb object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
clear_on_max_retries:
description:
- Max retries after which the remote site is treated as a fresh start.
- In fresh start all the configs are downloaded.
- Allowed values are 1-1024.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
client_ip_addr_group:
description:
- Group to specify if the client ip addresses are public or private.
- Field introduced in 17.1.2.
version_added: "2.4"
description:
description:
- User defined description for the object.
dns_configs:
description:
- Sub domain configuration for the gslb.
- Gslb service's fqdn must be a match one of these subdomains.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.4"
leader_cluster_uuid:
description:
- Mark this site as leader of gslb configuration.
- This site is the one among the avi sites.
maintenance_mode:
description:
- This field disables the configuration operations on the leader for all federated objects.
- Cud operations on gslb, gslbservice, gslbgeodbprofile and other federated objects will be rejected.
- The rest-api disabling helps in upgrade scenarios where we don't want configuration sync operations to the gslb member when the member is being
- upgraded.
- This configuration programmatically blocks the leader from accepting new gslb configuration when member sites are undergoing upgrade.
- Field introduced in 17.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
name:
description:
- Name for the gslb object.
required: true
send_interval:
description:
- Frequency with which group members communicate.
- Allowed values are 1-3600.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
- Units(SEC).
sites:
description:
- Select avi site member belonging to this gslb.
tenant_ref:
description:
- It is a reference to an object of type tenant.
third_party_sites:
description:
- Third party site member belonging to this gslb.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the gslb object.
view_id:
description:
- The view-id is used in change-leader mode to differentiate partitioned groups while they have the same gslb namespace.
- Each partitioned group will be able to operate independently by using the view-id.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Gslb object
avi_gslb:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslb
"""
RETURN = '''
obj:
description: Gslb (api/gslb) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
clear_on_max_retries=dict(type='int',),
client_ip_addr_group=dict(type='dict',),
description=dict(type='str',),
dns_configs=dict(type='list',),
is_federated=dict(type='bool',),
leader_cluster_uuid=dict(type='str',),
maintenance_mode=dict(type='bool',),
name=dict(type='str', required=True),
send_interval=dict(type='int',),
sites=dict(type='list',),
tenant_ref=dict(type='str',),
third_party_sites=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
view_id=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslb',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -7,390,903,138,975,387,000 | 36.514124 | 157 | 0.619729 | false |
emonty/pyos | samples/cloud_blockstorage/delete_volume.py | 1 | 1371 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyos
pyos.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyos.set_credential_file(creds_file)
cbs = pyos.cloud_blockstorage
# This assumes that you have are deleting the volumes named 'my_fast_volume'
# and 'my_standard_volume' that were created in create_volume.py.
for nm in ("my_fast_volume", "my_standard_volume"):
try:
vol = cbs.findall(name=nm)[0]
except IndexError:
print("There is no volume named '%s'. Skipping..." % nm)
vol = None
if vol:
print("Deleting", vol)
vol.delete()
print()
print("Done.")
print()
| apache-2.0 | 4,018,381,447,292,380,000 | 31.642857 | 78 | 0.691466 | false |
aleontiev/django-cli | djay/test.py | 2 | 1776 | import shutil
import os
from djay.commands.dj import execute
from djay.application import set_current_application, Application
from tempfile import mkdtemp
import multiprocessing
class TemporaryApplication(object):
DEFAULT_INIT_PARAMS = {
"app": "dummy",
"description": "dummy",
"author": "dummy",
"email": "[email protected]",
"version": "0.0.1",
"django_version": "1.10",
}
def __init__(self, params=None):
self._params = params or self.DEFAULT_INIT_PARAMS
self._initialized = False
self._directory = None
self._application = None
def _initialize(self):
if not self._initialized:
self._initialized = True
self._directory = mkdtemp()
self._application = Application(directory=self._directory)
set_current_application(self._application)
# generate initial blueprint
self._application.generate("init", self._params)
def __del__(self):
if self._initialized and self._directory:
shutil.rmtree(self._directory)
self._initialized = False
self._directory = None
self._application = None
set_current_application(None)
def execute(self, command, run_async=False):
def _execute(command):
cd = os.getcwd()
try:
os.chdir(self._directory)
result = execute(command)
return result
finally:
os.chdir(cd)
self._initialize()
if run_async:
job = multiprocessing.Process(target=_execute, args=(command,))
job.start()
return job
else:
return _execute(command)
| mit | -6,532,576,825,306,070,000 | 29.101695 | 75 | 0.569257 | false |
stargaser/astropy | astropy/table/pprint.py | 4 | 27490 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import re
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, terminal_size, conf
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode('utf-8', errors='replace')
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (str(val) if val is np.ma.masked
else format_func(format_, val))
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
def get_auto_format_func(
col=None,
possible_string_format_functions=_possible_string_format_functions):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val)
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError('Format function for value {} returned {} '
'instead of string type'
.format(val, type(val)))
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError('Format function for value {} failed: {}'
.format(val, err))
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError('the format passed in did nothing.')
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError('unable to parse format string {} for its '
'column.'.format(format_))
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,
show_dtype=False, show_length=None, html=False, align=None):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs)
col_strs = list(col_strs_iter)
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs['n_header']
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
val = '<{}>{}</{}>'.format(td, xml_escape(col_str.strip()), td)
row = ('<tr>' + val + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, '<table>')
col_strs.append('</table>')
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs['i_centers']:
col_strs[i] = col_strs[i].center(col_width)
if outs['i_dashes'] is not None:
col_strs[outs['i_dashes']] = '-' * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError("column align must be one of '<', '^', '>', or '='")
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group('fill')
align_char = match.group('align')
if align_char == '=':
if fill_char != '0':
raise ValueError("fill character must be '0' for '=' align")
fill_char = '' # str.zfill gets used which does not take fill char arg
else:
fill_char = ''
align_char = '>'
justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs['show_length']:
col_strs.append('Length = {} rows'.format(len(col)))
return col_strs, outs
def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,
show_dtype=False, show_length=None):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
multidims = getattr(col, 'shape', [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
trivial_multidims = np.prod(multidims) == 1
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
if multidims:
col_name += ' [{}]'.format(
','.join(str(n) for n in multidims))
n_header += 1
yield col_name
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or '')
if show_dtype:
i_centers.append(n_header)
n_header += 1
try:
dtype = dtype_info_name(col.dtype)
except AttributeError:
dtype = 'object'
yield str(dtype)
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield '---'
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, 'default_format',
None)
pssf = (getattr(col.info, 'possible_string_format_functions', None) or
_possible_string_format_functions)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate([np.arange(0, i0 + 1),
np.arange(i1 + 1, len(col))])
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if trivial_multidims:
return format_func(col_format, col[(idx,) + multidim0])
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f'{left} .. {right}'
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield '...'
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{}" for entry "{}" '
'in column "{}"'.format(col_format, col[idx],
col.info.name))
outs['show_length'] = show_length
outs['n_header'] = n_header
outs['i_centers'] = i_centers
outs['i_dashes'] = i_dashes
def _pformat_table(self, table, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False,
html=False, tableid=None, tableclass=None, align=None):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
cols = []
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError('got {} alignment values instead of '
'the number of columns ({})'
.format(len(align), n_cols))
else:
raise TypeError('align keyword must be str or list or tuple (got {})'
.format(type(align)))
for align_, col in zip(align, table.columns.values()):
lines, outs = self._pformat_col(col, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
align=align_)
if outs['show_length']:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ['<No columns>'], {'show_length': False}
# Use the values for the last column since they are all the same
n_header = outs['n_header']
n_rows = len(cols[0])
outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ['...'] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = 'table{id}'.format(id=id(table))
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = ' '.join(tableclass)
rows.append('<table id="{tid}" class="{tcls}">'.format(
tid=tableid, tcls=tableclass))
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
vals = ('<{}>{}</{}>'.format(td, xml_escape(col[i].strip()), td)
for col in cols)
row = ('<tr>' + ''.join(vals) + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
rows.append(row)
rows.append('</table>')
else:
for i in range(n_rows):
row = ' '.join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(self, tabcol, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = 'f br<>qhpn'
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
if hasattr(tabcol, 'columns'): # tabcol is a table
kwargs['max_width'] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system('cls' if os.name == 'nt' else 'clear')
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = ('red' if i < n_header else 'default'
for i in range(len(lines)))
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ')
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error('Console does not support getting a character'
' as required by more(). Use pprint() instead.')
return
if key in allowed_keys:
break
print(key)
if key.lower() == 'q':
break
elif key == ' ' or key == 'f':
i0 += delta_lines
elif key == 'b':
i0 = i0 - delta_lines
elif key == 'r':
pass
elif key == '<':
i0 = 0
elif key == '>':
i0 = len(tabcol)
elif key == 'p':
i0 -= 1
elif key == 'n':
i0 += 1
elif key == 'h':
showlines = False
print("""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""", end=' ')
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
| bsd-3-clause | 8,854,321,154,389,072,000 | 37.233658 | 92 | 0.527246 | false |
axbaretto/beam | sdks/python/apache_beam/examples/cookbook/bigquery_side_input.py | 5 | 4340 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Dataflow job that uses BigQuery sources as a side inputs.
Illustrates how to insert side-inputs into transforms in three different forms,
as a singleton, as a iterator, and as a list.
This workflow generate a set of tuples of the form (groupId, corpus, word) where
groupId is a generated identifier for the group and corpus and word are randomly
selected from corresponding rows in BQ dataset 'publicdata:samples.shakespeare'.
Users should specify the number of groups to form and optionally a corpus and/or
a word that should be ignored when forming groups.
"""
# pytype: skip-file
import argparse
import logging
from random import randrange
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.pvalue import AsList
from apache_beam.pvalue import AsSingleton
def create_groups(group_ids, corpus, word, ignore_corpus, ignore_word):
"""Generate groups given the input PCollections."""
def attach_corpus_fn(group, corpus, ignore):
selected = None
len_corpus = len(corpus)
while not selected:
c = list(corpus[randrange(0, len_corpus)].values())[0]
if c != ignore:
selected = c
yield (group, selected)
def attach_word_fn(group, words, ignore):
selected = None
len_words = len(words)
while not selected:
c = list(words[randrange(0, len_words)].values())[0]
if c != ignore:
selected = c
yield group + (selected, )
return (
group_ids
| 'attach corpus' >> beam.FlatMap(
attach_corpus_fn, AsList(corpus), AsSingleton(ignore_corpus))
| 'attach word' >> beam.FlatMap(
attach_word_fn, AsList(word), AsSingleton(ignore_word)))
def run(argv=None):
"""Run the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument('--output')
parser.add_argument('--ignore_corpus', default='')
parser.add_argument('--ignore_word', default='')
parser.add_argument('--num_groups')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
group_ids = []
for i in range(0, int(known_args.num_groups)):
group_ids.append('id' + str(i))
query_corpus = 'select UNIQUE(corpus) from publicdata:samples.shakespeare'
query_word = 'select UNIQUE(word) from publicdata:samples.shakespeare'
ignore_corpus = known_args.ignore_corpus
ignore_word = known_args.ignore_word
pcoll_corpus = p | 'read corpus' >> beam.io.ReadFromBigQuery(
query=query_corpus)
pcoll_word = p | 'read_words' >> beam.io.ReadFromBigQuery(query=query_word)
pcoll_ignore_corpus = p | 'create_ignore_corpus' >> beam.Create(
[ignore_corpus])
pcoll_ignore_word = p | 'create_ignore_word' >> beam.Create([ignore_word])
pcoll_group_ids = p | 'create groups' >> beam.Create(group_ids)
pcoll_groups = create_groups(
pcoll_group_ids,
pcoll_corpus,
pcoll_word,
pcoll_ignore_corpus,
pcoll_ignore_word)
# pylint:disable=expression-not-assigned
pcoll_groups | WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 | -6,556,777,141,261,816,000 | 35.166667 | 80 | 0.711751 | false |
MyAOSP/external_chromium_org | build/android/gyp/create_device_library_links.py | 29 | 3556 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import json
import optparse
import os
import sys
from util import build_device
from util import build_utils
from util import md5_check
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ReadJson(options.libraries_json)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushIfNeeded(options.script_host_path, options.script_device_path)
trigger_cmd = (
'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_device_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries-json',
help='Path to the json list of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
required_options = ['apk', 'libraries_json', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -4,259,378,947,479,805,000 | 30.192982 | 79 | 0.689258 | false |
adrianholovaty/django | django/template/defaulttags.py | 3 | 46745 | """Default tags used by the template system, available to all templates."""
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.encoding import smart_str, smart_unicode
from django.utils.safestring import mark_safe
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return mark_safe(u"")
else:
return mark_safe(u"<div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='%s' /></div>" % csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
import warnings
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return u''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = cycle_iter.next().resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
return smart_unicode(value)
return u''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._last_seen = None
self._varlist = varlist
self._id = str(id(self))
def render(self, context):
if 'forloop' in context and self._id not in context['forloop']:
self._last_seen = None
context['forloop'][self._id] = 1
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
compare_to = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
self._last_seen = compare_to
content = self.nodelist_true.render(context)
return content
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_str(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
view_name = self.view_name.resolve(context)
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which cause return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch as e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except ValueError:
raise TemplateSyntaxError("widthratio final argument must be an number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return '0'
except ValueError:
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict([(key, val.resolve(context)) for key, val in
self.extra_context.iteritems()])
context.update(values)
output = self.nodelist.render(context)
context.pop()
return output
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in (u'on', u'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% cycle 'row1' 'row2' as rowcolors silent %}{# no value here #}
{% for o in some_list %}
<tr class="{% cycle rowcolors %}">{# first value will be "row1" #}
...
</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% else %}{% if var2 %}
{{ var2|safe }}
{% else %}{% if var3 %}
{{ var3|safe }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits])
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = is_reversed and -3 or -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = lastbits_reversed[0][::-1]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
lastbits_reversed[2][::-1])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, TemplateTagNode.mapping.keys()))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value 100 %}' />
Above, if ``this_value`` is 175 and ``max_value`` is 200, the image in
the above example will be 88 pixels wide (because 175/200 = .875;
.875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width))
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| bsd-3-clause | -4,195,996,000,484,717,600 | 34.146617 | 171 | 0.575313 | false |
NoMoKeTo/choo | src/tests/models/test_base.py | 2 | 3111 | import pytest
from choo.apis import vrr
from choo.models import City, Stop
from choo.models.sourced import SourcedModelMixin
class TestModel:
def test_init(self):
assert Stop(name='Essen Hbf').name == 'Essen Hbf'
assert Stop(city__name='Essen').city__name == 'Essen'
with pytest.raises(AttributeError):
Stop(invalid_field='Essen')
with pytest.raises(TypeError):
Stop(city=27)
def test_serializing(self):
serialized = {
'@type': 'stop',
'name': 'Essen Hbf'
}
assert Stop(name='Essen Hbf').serialize() == serialized
assert Stop.unserialize(serialized).serialize() == serialized
with pytest.raises(AttributeError):
Stop.unserialize({
'@type': 'stop',
'invalid_field': 'Essen',
})
class TestSourcedModelMixin:
sourced_city = City.unserialize({
"@type": "city.sourced",
"source": "vrr",
"country": "de",
"state": "nrw",
"name": "Essen",
"ids": {
"de": "05113000",
"vrr": "placeID:5113000:18"
}
})
def test_init(self):
with pytest.raises(TypeError):
SourcedModelMixin()
assert self.sourced_city.source == vrr
def test_from_parser(self):
with pytest.raises(TypeError):
SourcedModelMixin()
with pytest.raises(TypeError):
Stop.Sourced.from_parser(City)
def test_mutable(self):
assert self.sourced_city.mutable().serialize() == {
"@type": "city",
"country": "de",
"state": "nrw",
"name": "Essen",
"ids": {
"de": "05113000",
"vrr": "placeID:5113000:18"
}
}
def test_immutable(self):
with pytest.raises(TypeError):
self.sourced_city.name = 'Duisburg'
with pytest.raises(TypeError):
del self.sourced_city.name
def test_custom_properties(self):
self.sourced_city.choo_testing_property = 42
assert self.sourced_city.choo_testing_property == 42
del self.sourced_city.choo_testing_property
with pytest.raises(AttributeError):
self.sourced_city.choo_testing_property
class TestModelWithIDs:
city1 = {
"@type": "city",
"country": "de",
"state": "nrw",
"name": "Essen",
"ids": {
"de": "05113000",
"vrr": "placeID:5113000:18"
}
}
city2 = {
"@type": "city",
"country": "de",
"state": "nrw",
"name": "Duisburg",
"ids": {
"de": "05112000",
"vrr": "placeID:5112000:20"
}
}
def test_eq(self):
assert (City.unserialize(self.city1) == City.unserialize(self.city1)) is True
assert (City.unserialize(self.city1) == City.unserialize(self.city2)) is None
assert (City.unserialize(self.city1) == Stop()) is False
assert (City.unserialize(self.city1) == 42) is False
| apache-2.0 | -521,428,045,872,590,200 | 27.027027 | 85 | 0.534233 | false |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive/08_image/labs/flowersmodel/model.py | 2 | 10620 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.logging.set_verbosity(v = tf.logging.INFO)
LIST_OF_LABELS = "daisy,dandelion,roses,sunflowers,tulips".split(',')
HEIGHT = 299
WIDTH = 299
NUM_CHANNELS = 3
NCLASSES = 5
def linear_model(img, mode, hparams):
X = tf.reshape(tensor = img, shape = [-1,HEIGHT * WIDTH]) #flatten
ylogits = tf.layers.dense(input = X, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def dnn_model(img, mode, hparams):
X = tf.reshape(tensor = img, shape = [-1, HEIGHT * WIDTH]) #flatten
h1 = tf.layers.dense(input = X, units = 300, activation = tf.nn.relu)
h2 = tf.layers.dense(input = h1, units = 100, activation = tf.nn.relu)
h3 = tf.layers.dense(input = h2, units = 30, activation = tf.nn.relu)
ylogits = tf.layers.dense(input = h3, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def dnn_dropout_model(img, mode, hparams):
dprob = hparams.get("dprob", 0.1)
X = tf.reshape(tensor = img, shape = [-1, HEIGHT * WIDTH]) #flatten
h1 = tf.layers.dense(input = X, units = 300, activation = tf.nn.relu)
h2 = tf.layers.dense(input = h1, units = 100, activation = tf.nn.relu)
h3 = tf.layers.dense(input = h2, units = 30, activation = tf.nn.relu)
h3d = tf.layers.dropout(inputs = h3, rate = dprob, training = (mode == tf.estimator.ModeKeys.TRAIN)) #only dropout when training
ylogits = tf.layers.dense(input = h3d, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def cnn_model(img, mode, hparams):
ksize1 = hparams.get("ksize1", 5)
ksize2 = hparams.get("ksize2", 5)
nfil1 = hparams.get("nfil1", 10)
nfil2 = hparams.get("nfil2", 20)
dprob = hparams.get("dprob", 0.25)
c1 = tf.layers.conv2d(inputs = img, filters = nfil1,
kernel_size = ksize1, strides = 1,
padding = "same", activation = tf.nn.relu) # shape = (batch_size, HEIGHT, WIDTH, nfil1)
p1 = tf.layers.max_pooling2d(inputs = c1, pool_size = 2, strides = 2) # shape = (batch_size, HEIGHT // 2, WIDTH // 2, nfil1)
c2 = tf.layers.conv2d(inputs = p1, filters = nfil2,
kernel_size = ksize2, strides = 1,
padding = "same", activation = tf.nn.relu) # shape = (batch_size, HEIGHT // 2, WIDTH // 2, nfil2)
p2 = tf.layers.max_pooling2d(inputs = c2, pool_size = 2, strides = 2) # shape = (batch_size, HEIGHT // 4, WIDTH // 4, nfil2)
outlen = p2.shape[1] * p2.shape[2] * p2.shape[3] # HEIGHT // 4 * WIDTH // 4 * nfil2
p2flat = tf.reshape(tensor = p2, shape = [-1, outlen]) # shape = (batch_size, HEIGHT // 4 * WIDTH // 4 * nfil2)
# Apply batch normalization
if hparams["batch_norm"]:
h3 = tf.layers.dense(inputs = p2flat, units = 300, activation = None)
h3 = tf.layers.batch_normalization(inputs = h3, training = (mode == tf.estimator.ModeKeys.TRAIN)) # only batchnorm when training
h3 = tf.nn.relu(features = h3)
else:
h3 = tf.layers.dense(inputs = p2flat, units = 300, activation = tf.nn.relu)
# Apply dropout
h3d = tf.layers.dropout(inputs = h3, rate = dprob, training = (mode == tf.estimator.ModeKeys.TRAIN))
ylogits = tf.layers.dense(inputs = h3d, units = NCLASSES, activation = None)
# Apply batch normalization once more
if hparams["batch_norm"]:
ylogits = tf.layers.batch_normalization(inputs = ylogits, training = (mode == tf.estimator.ModeKeys.TRAIN))
return ylogits, NCLASSES
def read_and_preprocess_with_augment(image_bytes, label = None):
return read_and_preprocess(image_bytes, label, augment = True)
def read_and_preprocess(image_bytes, label = None, augment = False):
# Decode the image, end up with pixel values that are in the -1, 1 range
image = #TODO: decode contents into JPEG
image = #TODO: convert JPEG tensor to floats between 0 and 1
image = tf.expand_dims(input = image, axis = 0) # resize_bilinear needs batches
if augment:
#TODO: Add image augmentation functions
else:
image = tf.image.resize_bilinear(images = image, size = [HEIGHT, WIDTH], align_corners = False)
image = tf.squeeze(input = image, axis = 0) # remove batch dimension
# Pixel values are in range [0,1], convert to [-1,1]
image = tf.subtract(x = image, y = 0.5)
image = tf.multiply(x = image, y = 2.0)
return {"image": image}, label
def serving_input_fn():
# Note: only handles one image at a time
feature_placeholders = {"image_bytes": tf.placeholder(dtype = tf.string, shape = [])}
image, _ = read_and_preprocess(tf.squeeze(input = feature_placeholders["image_bytes"]))
image["image"] = tf.expand_dims(image["image"], axis = 0)
return tf.estimator.export.ServingInputReceiver(features = image, receiver_tensors = feature_placeholders)
def make_input_fn(csv_of_filenames, batch_size, mode, augment=False):
def _input_fn():
def decode_csv(csv_row):
filename, label = tf.decode_csv(records = csv_row, record_defaults = [[""],[""]])
image_bytes = tf.read_file(filename = filename)
return image_bytes, label
# Create tf.data.dataset from filename
dataset = tf.data.TextLineDataset(filenames = csv_of_filenames).map(map_func = decode_csv)
if augment:
dataset = #TODO: map read_and_preprocess_with_augment
else:
dataset = #TODO: map read_and_preprocess
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(count = num_epochs).batch(batch_size = batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
def image_classifier(features, labels, mode, params):
model_functions = {
"linear": linear_model,
"dnn": dnn_model,
"dnn_dropout": dnn_dropout_model,
"cnn": cnn_model}
model_function = model_functions[params["model"]]
ylogits, nclasses = model_function(features["image"], mode, params)
probabilities = tf.nn.softmax(logits = ylogits)
class_int = tf.cast(x = tf.argmax(input = ylogits, axis = 1), dtype = tf.uint8)
class_str = tf.gather(params = LIST_OF_LABELS, indices = tf.cast(x = class_int, dtype = tf.int32))
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
# Convert string label to int
labels_table = tf.contrib.lookup.index_table_from_tensor(mapping = tf.constant(value = LIST_OF_LABELS, dtype = tf.string))
labels = labels_table.lookup(keys = labels)
loss = tf.reduce_mean(input_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(logits = ylogits, labels = tf.one_hot(indices = labels, depth = NCLASSES)))
if mode == tf.estimator.ModeKeys.TRAIN:
# This is needed for batch normalization, but has no effect otherwise
update_ops = tf.get_collection(key = tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(control_inputs = update_ops):
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = params["learning_rate"],
optimizer = "Adam")
eval_metric_ops = None
else:
train_op = None
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels = labels, predictions = class_int)}
else:
loss = None
train_op = None
eval_metric_ops = None
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = {"probabilities": probabilities,
"classid": class_int,
"class": class_str},
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = {"classes": tf.estimator.export.PredictOutput(
{"probabilities": probabilities,
"classid": class_int,
"class": class_str})}
)
def train_and_evaluate(output_dir, hparams):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
EVAL_INTERVAL = 300 # every 5 minutes
# Instantiate base estimator class for custom model function
estimator = tf.estimator.Estimator(
model_fn = image_classifier,
params = hparams,
config = tf.estimator.RunConfig(
save_checkpoints_secs = EVAL_INTERVAL),
model_dir = output_dir)
# Set estimator"s train_spec to use train_input_fn and train for so many steps
train_spec = tf.estimator.TrainSpec(
input_fn = make_input_fn(
hparams["train_data_path"],
hparams["batch_size"],
mode = tf.estimator.ModeKeys.TRAIN,
augment = hparams["augment"]),
max_steps = hparams["train_steps"])
# Create exporter that uses serving_input_fn to create saved_model for serving
exporter = tf.estimator.LatestExporter(
name = "exporter",
serving_input_receiver_fn = serving_input_fn)
# Set estimator"s eval_spec to use eval_input_fn and export saved_model
eval_spec = tf.estimator.EvalSpec(
input_fn = make_input_fn(
hparams["eval_data_path"],
hparams["batch_size"],
mode = tf.estimator.ModeKeys.EVAL),
steps = None,
exporters = exporter,
start_delay_secs = EVAL_INTERVAL,
throttle_secs = EVAL_INTERVAL)
# Run train_and_evaluate loop
tf.estimator.train_and_evaluate(
estimator = estimator,
train_spec = train_spec,
eval_spec = eval_spec) | apache-2.0 | 133,952,494,193,072,830 | 42.88843 | 163 | 0.629002 | false |
srcLurker/home-assistant | homeassistant/bootstrap.py | 3 | 21328 | """Provides methods to bootstrap a home assistant instance."""
import asyncio
import logging
import logging.handlers
import os
import sys
from collections import defaultdict
from types import ModuleType
from typing import Any, Optional, Dict
import voluptuous as vol
from voluptuous.humanize import humanize_error
import homeassistant.components as core_components
from homeassistant.components import persistent_notification
import homeassistant.config as conf_util
import homeassistant.core as core
import homeassistant.loader as loader
import homeassistant.util.package as pkg_util
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.const import EVENT_COMPONENT_LOADED, PLATFORM_FORMAT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
event_decorators, service, config_per_platform, extract_domain_configs)
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = 'component'
ERROR_LOG_FILENAME = 'home-assistant.log'
_PERSISTENT_ERRORS = {}
HA_COMPONENT_URL = '[{}](https://home-assistant.io/components/{}/)'
def setup_component(hass: core.HomeAssistant, domain: str,
config: Optional[Dict]=None) -> bool:
"""Setup a component and all its dependencies."""
return run_coroutine_threadsafe(
async_setup_component(hass, domain, config), loop=hass.loop).result()
@asyncio.coroutine
def async_setup_component(hass: core.HomeAssistant, domain: str,
config: Optional[Dict]=None) -> bool:
"""Setup a component and all its dependencies.
This method is a coroutine.
"""
if domain in hass.config.components:
_LOGGER.debug('Component %s already set up.', domain)
return True
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
if config is None:
config = defaultdict(dict)
components = loader.load_order_component(domain)
# OrderedSet is empty if component or dependencies could not be resolved
if not components:
_async_persistent_notification(hass, domain, True)
return False
for component in components:
res = yield from _async_setup_component(hass, component, config)
if not res:
_LOGGER.error('Component %s failed to setup', component)
_async_persistent_notification(hass, component, True)
return False
return True
def _handle_requirements(hass: core.HomeAssistant, component,
name: str) -> bool:
"""Install the requirements for a component.
This method needs to run in an executor.
"""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('deps')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
_async_persistent_notification(hass, name)
return False
return True
@asyncio.coroutine
def _async_setup_component(hass: core.HomeAssistant,
domain: str, config) -> bool:
"""Setup a component for Home Assistant.
This method is a coroutine.
"""
# pylint: disable=too-many-return-statements
if domain in hass.config.components:
return True
setup_lock = hass.data.get('setup_lock')
if setup_lock is None:
setup_lock = hass.data['setup_lock'] = asyncio.Lock(loop=hass.loop)
setup_progress = hass.data.get('setup_progress')
if setup_progress is None:
setup_progress = hass.data['setup_progress'] = []
if domain in setup_progress:
_LOGGER.error('Attempt made to setup %s during setup of %s',
domain, domain)
_async_persistent_notification(hass, domain, True)
return False
try:
# Used to indicate to discovery that a setup is ongoing and allow it
# to wait till it is done.
did_lock = False
if not setup_lock.locked():
yield from setup_lock.acquire()
did_lock = True
setup_progress.append(domain)
config = yield from async_prepare_setup_component(hass, config, domain)
if config is None:
return False
component = loader.get_component(domain)
if component is None:
_async_persistent_notification(hass, domain)
return False
async_comp = hasattr(component, 'async_setup')
try:
_LOGGER.info("Setting up %s", domain)
if async_comp:
result = yield from component.async_setup(hass, config)
else:
result = yield from hass.loop.run_in_executor(
None, component.setup, hass, config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error during setup of component %s', domain)
_async_persistent_notification(hass, domain, True)
return False
if result is False:
_LOGGER.error('component %s failed to initialize', domain)
_async_persistent_notification(hass, domain, True)
return False
elif result is not True:
_LOGGER.error('component %s did not return boolean if setup '
'was successful. Disabling component.', domain)
_async_persistent_notification(hass, domain, True)
loader.set_component(domain, None)
return False
hass.config.components.append(component.DOMAIN)
hass.bus.async_fire(
EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN}
)
return True
finally:
setup_progress.remove(domain)
if did_lock:
setup_lock.release()
def prepare_setup_component(hass: core.HomeAssistant, config: dict,
domain: str):
"""Prepare setup of a component and return processed config."""
return run_coroutine_threadsafe(
async_prepare_setup_component(hass, config, domain), loop=hass.loop
).result()
@asyncio.coroutine
def async_prepare_setup_component(hass: core.HomeAssistant, config: dict,
domain: str):
"""Prepare setup of a component and return processed config.
This method is a coroutine.
"""
# pylint: disable=too-many-return-statements
component = loader.get_component(domain)
missing_deps = [dep for dep in getattr(component, 'DEPENDENCIES', [])
if dep not in hass.config.components]
if missing_deps:
_LOGGER.error(
'Not initializing %s because not all dependencies loaded: %s',
domain, ", ".join(missing_deps))
return None
if hasattr(component, 'CONFIG_SCHEMA'):
try:
config = component.CONFIG_SCHEMA(config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
return None
elif hasattr(component, 'PLATFORM_SCHEMA'):
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component.PLATFORM_SCHEMA(p_config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
platform = yield from async_prepare_setup_platform(
hass, config, domain, p_name)
if platform is None:
continue
# Validate platform specific schema
if hasattr(platform, 'PLATFORM_SCHEMA'):
try:
# pylint: disable=no-member
p_validated = platform.PLATFORM_SCHEMA(p_validated)
except vol.Invalid as ex:
async_log_exception(ex, '{}.{}'.format(domain, p_name),
p_validated, hass)
continue
platforms.append(p_validated)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
filter_keys = extract_domain_configs(config, domain)
config = {key: value for key, value in config.items()
if key not in filter_keys}
config[domain] = platforms
res = yield from hass.loop.run_in_executor(
None, _handle_requirements, hass, component, domain)
if not res:
return None
return config
def prepare_setup_platform(hass: core.HomeAssistant, config, domain: str,
platform_name: str) -> Optional[ModuleType]:
"""Load a platform and makes sure dependencies are setup."""
return run_coroutine_threadsafe(
async_prepare_setup_platform(hass, config, domain, platform_name),
loop=hass.loop
).result()
@asyncio.coroutine
def async_prepare_setup_platform(hass: core.HomeAssistant, config, domain: str,
platform_name: str) \
-> Optional[ModuleType]:
"""Load a platform and makes sure dependencies are setup.
This method is a coroutine.
"""
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
platform_path = PLATFORM_FORMAT.format(domain, platform_name)
platform = loader.get_platform(domain, platform_name)
# Not found
if platform is None:
_LOGGER.error('Unable to find platform %s', platform_path)
_async_persistent_notification(hass, platform_path)
return None
# Already loaded
elif platform_path in hass.config.components:
return platform
# Load dependencies
for component in getattr(platform, 'DEPENDENCIES', []):
res = yield from async_setup_component(hass, component, config)
if not res:
_LOGGER.error(
'Unable to prepare setup for platform %s because '
'dependency %s could not be initialized', platform_path,
component)
_async_persistent_notification(hass, platform_path, True)
return None
res = yield from hass.loop.run_in_executor(
None, _handle_requirements, hass, platform, platform_path)
if not res:
return None
return platform
def from_config_dict(config: Dict[str, Any],
hass: Optional[core.HomeAssistant]=None,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
@asyncio.coroutine
def _async_init_from_config_dict(future):
try:
re_hass = yield from async_from_config_dict(
config, hass, config_dir, enable_log, verbose, skip_pip,
log_rotate_days)
future.set_result(re_hass)
# pylint: disable=broad-except
except Exception as exc:
future.set_exception(exc)
# run task
future = asyncio.Future(loop=hass.loop)
hass.loop.create_task(_async_init_from_config_dict(future))
hass.loop.run_until_complete(future)
return future.result()
@asyncio.coroutine
def async_from_config_dict(config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
core_config = config.get(core.DOMAIN, {})
try:
yield from conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as ex:
async_log_exception(ex, 'homeassistant', core_config, hass)
return None
yield from hass.loop.run_in_executor(
None, conf_util.process_ha_config_upgrade, hass)
if enable_log:
enable_logging(hass, verbose, log_rotate_days)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning('Skipping pip installation of required modules. '
'This may cause issues.')
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
# Make a copy because we are mutating it.
# Convert it to defaultdict so components can always have config dict
# Convert values to dictionaries if they are None
config = defaultdict(
dict, {key: value or {} for key, value in config.items()})
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
# setup components
# pylint: disable=not-an-iterable
res = yield from core_components.async_setup(hass, config)
if not res:
_LOGGER.error('Home Assistant core failed to initialize. '
'Further initialization aborted.')
return hass
yield from persistent_notification.async_setup(hass, config)
_LOGGER.info('Home Assistant core initialized')
# Give event decorators access to HASS
event_decorators.HASS = hass
service.HASS = hass
# Setup the components
for domain in loader.load_order_components(components):
yield from _async_setup_component(hass, domain, config)
return hass
def from_config_file(config_path: str,
hass: Optional[core.HomeAssistant]=None,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
@asyncio.coroutine
def _async_init_from_config_file(future):
try:
re_hass = yield from async_from_config_file(
config_path, hass, verbose, skip_pip, log_rotate_days)
future.set_result(re_hass)
# pylint: disable=broad-except
except Exception as exc:
future.set_exception(exc)
# run task
future = asyncio.Future(loop=hass.loop)
hass.loop.create_task(_async_init_from_config_file(future))
hass.loop.run_until_complete(future)
return future.result()
@asyncio.coroutine
def async_from_config_file(config_path: str,
hass: core.HomeAssistant,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
yield from hass.loop.run_in_executor(
None, mount_local_lib_path, config_dir)
enable_logging(hass, verbose, log_rotate_days)
try:
config_dict = yield from hass.loop.run_in_executor(
None, conf_util.load_yaml_config_file, config_path)
except HomeAssistantError:
return None
finally:
clear_secret_cache()
hass = yield from async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip)
return hass
def enable_logging(hass: core.HomeAssistant, verbose: bool=False,
log_rotate_days=None) -> None:
"""Setup the logging.
Async friendly.
"""
logging.basicConfig(level=logging.INFO)
fmt = ("%(log_color)s%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s%(reset)s")
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
fmt,
datefmt='%y-%m-%d %H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(
logging.Formatter('%(asctime)s %(name)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S'))
logger = logging.getLogger('')
logger.addHandler(err_handler)
logger.setLevel(logging.INFO)
else:
_LOGGER.error(
'Unable to setup error log %s (access denied)', err_log_path)
def log_exception(ex, domain, config, hass):
"""Generate log exception for config validation."""
run_callback_threadsafe(
hass.loop, async_log_exception, ex, domain, config, hass).result()
@core.callback
def _async_persistent_notification(hass: core.HomeAssistant, component: str,
link: Optional[bool]=False):
"""Print a persistent notification.
This method must be run in the event loop.
"""
_PERSISTENT_ERRORS[component] = _PERSISTENT_ERRORS.get(component) or link
_lst = [HA_COMPONENT_URL.format(name.replace('_', '-'), name)
if link else name for name, link in _PERSISTENT_ERRORS.items()]
message = ('The following components and platforms could not be set up:\n'
'* ' + '\n* '.join(list(_lst)) + '\nPlease check your config')
persistent_notification.async_create(
hass, message, 'Invalid config', 'invalid_config')
@core.callback
def async_log_exception(ex, domain, config, hass):
"""Generate log exception for config validation.
This method must be run in the event loop.
"""
message = 'Invalid config for [{}]: '.format(domain)
if hass is not None:
_async_persistent_notification(hass, domain, True)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join(str(m) for m in ex.path))
else:
message += '{}.'.format(humanize_error(config, ex))
domain_config = config.get(domain, config)
message += " (See {}:{}). ".format(
getattr(domain_config, '__config_file__', '?'),
getattr(domain_config, '__line__', '?'))
if domain != 'homeassistant':
message += ('Please check the docs at '
'https://home-assistant.io/components/{}/'.format(domain))
_LOGGER.error(message)
def mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
Async friendly.
"""
deps_dir = os.path.join(config_dir, 'deps')
if deps_dir not in sys.path:
sys.path.insert(0, os.path.join(config_dir, 'deps'))
return deps_dir
| mit | 9,182,423,502,761,517,000 | 34.194719 | 79 | 0.615294 | false |
tmxdyf/CouchPotatoServer | couchpotato/core/providers/torrent/sceneaccess/main.py | 5 | 3494 | from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class SceneAccess(TorrentProvider):
urls = {
'test': 'https://www.sceneaccess.eu/',
'login': 'https://www.sceneaccess.eu/login',
'login_check': 'https://www.sceneaccess.eu/inbox',
'detail': 'https://www.sceneaccess.eu/details?id=%s',
'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
'download': 'https://www.sceneaccess.eu/%s',
}
cat_ids = [
([22], ['720p', '1080p']),
([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([8], ['dvdr']),
]
http_time_between_calls = 1 #seconds
def _search(self, movie, quality, results):
cat = self.getCatId(quality['identifier'])
if not cat:
return
url = self.urls['search'] % (
cat[0],
cat[0]
)
arguments = tryUrlencode({
'search': movie['library']['identifier'],
'method': 3,
})
url = "%s&%s" % (url, arguments)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
resultsTable = html.find('table', attrs = {'id' : 'torrents-table'})
if resultsTable is None:
return
entries = resultsTable.find_all('tr', attrs = {'class' : 'tt_row'})
for result in entries:
link = result.find('td', attrs = {'class' : 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class' : 'td_dl'}).find('a')
leechers = result.find('td', attrs = {'class' : 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
results.append({
'id': torrent_id,
'name': link['title'],
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class' : 'ttr_size'}).contents[0]),
'seeders': tryInt(result.find('td', attrs = {'class' : 'ttr_seeders'}).find('a').string),
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'come on in',
}
def getMoreInfo(self, item):
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id':'details_table'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def loginSuccess(self, output):
return '/inbox' in output.lower()
loginCheckSuccess = loginSuccess
| gpl-3.0 | -8,374,766,890,605,706,000 | 34.292929 | 117 | 0.523183 | false |
umlaeute/pyjack | setup.py | 1 | 1659 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Distutils installer for PyJack
# Test for Jack2
#---------------------------------------------------#
import os
if os.path.exists("/usr/local/include/jack/jack.h"):
path = "/usr/local/include/jack/jack.h"
elif os.path.exists("/usr/include/jack/jack.h"):
path = "/usr/include/jack/jack.h"
else:
print("You don't seem to have the jack headers installed.\nPlease install them first")
exit(-1)
test = open(path).read()
pyjack_macros=[]
if ("jack_get_version_string" in test):
pyjack_macros+=[('JACK2', '1')]
else:
pyjack_macros+=[('JACK1', '1')]
#----------------------------------------------------#
from distutils.core import setup, Extension
import numpy.distutils
numpy_include_dirs = numpy.distutils.misc_util.get_numpy_include_dirs()
setup(
name = "pyjack",
version = "0.6",
description = "Python bindings for the Jack Audio Server",
author = "Andrew W. Schmeder, falkTX, IOhannes m zmölnig",
author_email = "[email protected]",
url = "http://sourceforge.net/projects/py-jack",
long_description = '''PyJack is a module written in C which exposes the Jack API to Python.
For information about Jack see http://jackaudio.org. This
enables a Python program to connect to and interact with pro-audio
applications which use the Jack Audio Server''',
license = "GNU LGPL2.1",
ext_modules = [Extension("jack",
["pyjack.c"],
libraries=["jack", "dl"],
include_dirs=numpy_include_dirs,
define_macros=pyjack_macros,
)],
)
| lgpl-2.1 | -3,156,911,221,869,027,000 | 32.16 | 95 | 0.585645 | false |
adaptivethreat/EmPyre | lib/modules/collection/linux/sniffer.py | 2 | 10971 | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'PcapSniffer',
# list of one or more authors for the module
'Author': ['@Killswitch_GUI'],
# more verbose multi-line description of the module
'Description': 'This module will sniff all interfaces on the target, and write in pcap format.',
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': "pcap",
# if the module needs administrative privileges
'NeedsAdmin': True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': False,
# list of any references/other comments
'Comments': ['For full comments and code: https://gist.github.com/killswitch-GUI/314e79581f2619a18d94c81d53e5466f']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run sniffer on.',
'Required' : True,
'Value' : ''
},
'IpFilter': {
'Description' : 'Set IP to filter on (dst & src).',
'Required' : False,
'Value' : '0'
},
'PortFilter': {
'Description' : 'Set port to filter on (dst & src).',
'Required' : False,
'Value' : '0'
},
'MaxSize': {
'Description' : 'Set max file size to save to disk/memory (MB).',
'Required' : True,
'Value' : '1'
},
'MaxPackets': {
'Description' : 'Set max packets to capture.',
'Required' : True,
'Value' : '100'
},
'InMemory': {
'Description' : 'Store binary data in memory, never drop to disk (WARNING: set MaxSize).',
'Required' : False,
'Value' : 'True'
},
'SavePath': {
'Description' : 'Path of the file to save (Not used if InMemory is True.',
'Required' : True,
'Value' : '/tmp/debug.pcap'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
savePath = self.options['SavePath']['Value']
inMemory = self.options['InMemory']['Value']
maxPackets = self.options['MaxPackets']['Value']
maxSize = self.options['MaxSize']['Value']
portFilter = self.options['PortFilter']['Value']
ipFilter = self.options['IpFilter']['Value']
if ipFilter != '0':
ipFilter = "'" + str(ipFilter) + "'"
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import socket, time
from datetime import datetime
import struct
def outputPcapPFile(fileName, inMemory=False):
pcapHeader = struct.pack("@IHHIIII",0xa1b2c3d4,2,4,0,0,0x040000,1)
if inMemory:
return pcapHeader
with open(str(fileName), 'wb+') as f:
f.write(pcapHeader)
def ouputPcapPacket(fileName, pLen, packet, inMemory=False):
t0, t1, t2, t3, t4, t5, t6, t7, t8 = time.gmtime()
tstamp = time.mktime((t0, t1, t2, t3, t4, t5, 0, 0, 0))
dt = datetime.now()
mstamp = dt.microsecond
pcapPacket = struct.pack("@IIII",tstamp,mstamp,pLen,pLen)
if inMemory:
return pcapPacket
with open(str(fileName), 'ab+') as f:
f.write(pcapPacket)
f.write(packet)
def parseEthernetHeader(data):
dst = struct.unpack('!BBBBBB',data[:6]) # destination host address
src = struct.unpack('!BBBBBB',data[6:12]) # source host address
nextType = struct.unpack('!H',data[12:14])[0] # IP? ARP? RARP? etc
return dst, src, nextType
def parseIpHeader(data):
ihl = struct.unpack('!B',data[14:15])[0] # 4 bit version 4 bit ihl
tos = struct.unpack('!B',data[15:16])[0] # Type of service
totalLen = struct.unpack('!H',data[16:18])[0] # IP header length
ident = struct.unpack('!H',data[18:20])[0] # IP ident
fragFlags = struct.unpack('!H',data[20:22])[0] # Frag_and_flags
ttl = struct.unpack('!B',data[22:23])[0] # Packet Time-to-Live
proto = struct.unpack('!B',data[23:24])[0] # Next protocol
checksum = struct.unpack('!H',data[24:26])[0] # checksum
sourceIp = struct.unpack('!I',data[26:30])[0] # Source IP addr
destIp = struct.unpack('!I',data[30:34])[0] # Dest IP addr
sourceIpStr = parseIpAddr(data[26:30]) # hton ip
destIpStr = parseIpAddr(data[30:34]) # hton ip
return proto, sourceIpStr, destIpStr
def parseTcpHeader(data):
sourcePort = struct.unpack('!H',data[34:36])[0] # source port (set pointer to end of IP Header)
destPort = struct.unpack('!H',data[36:38])[0] # destination port
sequence = struct.unpack('!I',data[38:42])[0] # sequence number - 32 bits
acknowledge = struct.unpack('!I',data[42:46])[0] # acknowledgement number - 32 bits
return sourcePort, destPort
def parseUdpHeader(data):
sourcePort = struct.unpack('!H',data[34:36])[0] # source port (set pointer to end of IP Header)
destPort = struct.unpack('!H',data[36:38])[0] # destination port
udpLength = struct.unpack('!H',data[38:40])[0] # Udp packet length
udpChecksum = struct.unpack('!H',data[40:42])[0] # Udp checksum (optional)
return sourcePort, destPort
def parseIcmpHeader(data):
typeCode = struct.unpack('!H',data[34:36])[0] # ICMP Error type
code = struct.unpack('!H',data[36:38])[0] # Type sub code
checksum = struct.unpack('!H',data[38:40])[0] # checksum
idCode = struct.unpack('!H',data[40:42])[0] # ICMP ID code
seq = struct.unpack('!H',data[42:44])[0] # Seq number
def parseIpAddr(data):
ipOct = []
ipOct.append(str(struct.unpack('!B', data[0:1])[0])) # octet 1
ipOct.append(str(struct.unpack('!B', data[1:2])[0])) # octet 2
ipOct.append(str(struct.unpack('!B', data[2:3])[0])) # octet 3
ipOct.append(str(struct.unpack('!B', data[3:4])[0])) # octet 4
ipStr = '.'.join(ipOct)
return ipStr
def socketSniffer(fileName,ipFilter,portFilter,maxSize, maxPackets, inMemory):
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW ,socket.ntohs(0x0003))
except socket.error , msg:
return
# build pcap file header and output
memoryPcap = ''
if inMemory:
memoryPcap += outputPcapPFile(fileName, inMemory=inMemory)
else:
outputPcapPFile(fileName, inMemory=inMemory)
packetCounter = 0
sizeCounter = 0
maxSize = maxSize * 1024 * 1024
while (packetCounter < maxPackets):
if (sizeCounter > maxSize):
break
packet = s.recvfrom(65565)
pLen = len(packet[0])
if (ipFilter or portFilter):
packetOut = False
dst, src, nextType = parseEthernetHeader(packet[0])
if (hex(nextType) == hex(0x800)):
proto, sourceIpStr, destIpStr = parseIpHeader(packet[0])
# ICMP (1)
# TCP (6)
# UDP (17)
if (proto == 6):
sourcePort, destPort = parseTcpHeader(packet[0])
if ipFilter and portFilter:
if (ipFilter == sourceIpStr or ipFilter == destIpStr) and (portFilter == sourcePort or portFilter == destPort):
packetOut = True
elif (ipFilter == sourceIpStr or ipFilter == destIpStr):
packetOut = True
elif (portFilter == sourcePort or portFilter == destPort):
packetOut = True
elif (proto == 17):
sourcePort, destPort = parseUdpHeader(packet[0])
if ipFilter and portFilter:
if (ipFilter == sourceIpStr or ipFilter == destIpStr) and (portFilter == sourcePort or portFilter == destPort):
packetOut = True
elif (ipFilter == sourceIpStr or ipFilter == destIpStr):
packetOut = True
elif (portFilter == sourcePort or portFilter == destPort):
packetOut = True
else:
if (ipFilter == sourceIpStr or ipFilter == destIpStr):
packetOut = True
if packetOut:
if inMemory:
memoryPcap += ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory)
memoryPcap += packet[0]
sizeCounter += pLen
packetCounter += 1
else:
ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory)
sizeCounter += pLen
packetCounter += 1
else:
if inMemory:
memoryPcap += ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory)
memoryPcap += packet[0]
sizeCounter += pLen
packetCounter += 1
else:
ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory)
sizeCounter += pLen
packetCounter += 1
try:
if inMemory:
print memoryPcap
else:
f = open('%s', 'rb')
data = base64.b64encode(f.read())
f.close()
run_command('rm -f %s')
print data
except Exception as e:
print e
fileNameSave = '%s'
ipFilter = %s
portFilter = %s
maxSize = %s
maxPackets = %s
inMemory = %s
socketSniffer(fileNameSave,ipFilter,portFilter,maxSize,maxPackets, inMemory)
""" % (savePath, savePath, savePath, ipFilter, portFilter, maxSize, maxPackets, inMemory)
return script
| bsd-3-clause | 3,695,886,570,560,215,000 | 39.040146 | 127 | 0.567587 | false |
F5Networks/f5-ansible | ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigiq_regkey_license_assignment.py | 2 | 4065 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigiq_regkey_license_assignment import (
ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_unmanaged(self):
args = dict(
pool='foo-pool',
key='XXXX-XXXX-XXXX-XXXX-XXXX',
device='1.1.1.1',
managed=False,
device_username='admin',
device_password='secret',
device_port='8443'
)
p = ModuleParameters(params=args)
assert p.pool == 'foo-pool'
assert p.key == 'XXXX-XXXX-XXXX-XXXX-XXXX'
assert p.device == '1.1.1.1'
assert p.managed is False
assert p.device_username == 'admin'
assert p.device_password == 'secret'
assert p.device_port == 8443
def test_module_parameters_managed(self):
args = dict(
pool='foo-pool',
key='XXXX-XXXX-XXXX-XXXX-XXXX',
device='1.1.1.1',
managed=True,
)
p = ModuleParameters(params=args)
assert p.pool == 'foo-pool'
assert p.key == 'XXXX-XXXX-XXXX-XXXX-XXXX'
assert p.device == '1.1.1.1'
assert p.managed is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p1 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigiq_regkey_license_assignment.bigiq_version')
self.m1 = self.p1.start()
self.m1.return_value = '6.1.0'
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigiq_regkey_license_assignment.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
self.p2.stop()
def test_create(self, *args):
set_module_args(dict(
pool='foo-pool',
key='XXXX-XXXX-XXXX-XXXX-XXXX',
device='1.1.1.1',
device_username='admin',
device_password='secret',
managed='no',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.wait_for_device_to_be_licensed = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 | -4,771,582,864,722,391,000 | 29.795455 | 130 | 0.6123 | false |
Subsets and Splits