Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def parse_definitions(self, class_, all=False):
"""Parse multiple definitions and yield them."""
while self.current is not None:
self.log.debug(
"parsing definition list, current token is %r (%s)",
self.current.kind,
self.current.value,
)
self.log.debug("got_newline: %s", self.stream.got_logical_newline)
if all and self.current.value == "__all__":
self.parse_all()
elif (
self.current.kind == tk.OP
and self.current.value == "@"
and self.stream.got_logical_newline
):
self.consume(tk.OP)
self.parse_decorators()
elif self.current.value in ["def", "class"]:
yield self.parse_definition(class_._nest(self.current.value))
elif self.current.kind == tk.INDENT:
self.consume(tk.INDENT)
for definition in self.parse_definitions(class_):
yield definition
elif self.current.kind == tk.DEDENT:
self.consume(tk.DEDENT)
return
elif self.current.value == "from":
self.parse_from_import_statement()
else:
self.stream.move() |
def parse_all(self):
"""Parse the __all__ definition in a module."""
assert self.current.value == "__all__"
self.consume(tk.NAME)
if self.current.value != "=":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
if self.current.value not in "([":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
self.all = []
all_content = "("
while self.current.kind != tk.OP or self.current.value not in ")]":
if self.current.kind in (tk.NL, tk.COMMENT):
pass
elif self.current.kind == tk.STRING or self.current.value == ",":
all_content += self.current.value
else:
raise AllError(
"Unexpected token kind in __all__: {!r}. ".format(
self.current.kind
)
)
self.stream.move()
self.consume(tk.OP)
all_content += ")"
try:
self.all = eval(all_content, {})
except BaseException as e:
raise AllError(
"Could not evaluate contents of __all__."
"\bThe value was {}. The exception was:\n{}".format(all_content, e)
) |
def parse_module(self):
"""Parse a module (and its children) and return a Module object."""
self.log.debug("parsing module.")
start = self.line
docstring = self.parse_docstring()
children = list(self.parse_definitions(Module, all=True))
assert self.current is None, self.current
end = self.line
cls = Module
if self.filename.endswith("__init__.py"):
cls = Package
module = cls(
self.filename,
self.source,
start,
end,
[],
docstring,
children,
None,
self.all,
None,
"",
)
for child in module.children:
child.parent = module
module.future_imports = self.future_imports
self.log.debug("finished parsing module.")
return module |
def check_current(self, kind=None, value=None):
"""Verify the current token is of type `kind` and equals `value`."""
msg = textwrap.dedent(
"""
Unexpected token at line {self.line}:
In file: {self.filename}
Got kind {self.current.kind!r}
Got value {self.current.value}
""".format(
self=self
)
)
kind_valid = self.current.kind == kind if kind else True
value_valid = self.current.value == value if value else True
assert kind_valid and value_valid, msg |
def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug("parsing from/import statement.")
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import) |
def _parse_from_import_names(self, is_future_import):
"""Parse the 'y' part in a 'from x import y' statement."""
if self.current.value == "(":
self.consume(tk.OP)
expected_end_kinds = (tk.OP,)
else:
expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER)
while self.current.kind not in expected_end_kinds and not (
self.current.kind == tk.OP and self.current.value == ";"
):
if self.current.kind != tk.NAME:
self.stream.move()
continue
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
)
if is_future_import:
self.log.debug("found future import: %s", self.current.value)
self.future_imports.add(self.current.value)
self.consume(tk.NAME)
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
)
if self.current.kind == tk.NAME and self.current.value == "as":
self.consume(tk.NAME) # as
if self.current.kind == tk.NAME:
self.consume(tk.NAME) # new name, irrelevant
if self.current.value == ",":
self.consume(tk.OP)
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
) |
def run(self):
"""Use docutils to check docstrings are valid RST."""
# Is there any reason not to call load_source here?
if self.err is not None:
assert self.source is None
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_load,
"Failed to load file: %s" % self.err,
)
yield 0, 0, msg, type(self)
module = []
try:
module = parse(StringIO(self.source), self.filename)
except SyntaxError as err:
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_parse,
"Failed to parse file: %s" % err,
)
yield 0, 0, msg, type(self)
module = []
except AllError:
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_all,
"Failed to parse __all__ entry.",
)
yield 0, 0, msg, type(self)
module = []
for definition in module:
if not definition.docstring:
# People can use flake8-docstrings to report missing
# docstrings
continue
try:
# Note we use the PEP257 trim algorithm to remove the
# leading whitespace from each line - this avoids false
# positive severe error "Unexpected section title."
unindented = trim(dequote_docstring(definition.docstring))
# Off load RST validation to reStructuredText-lint
# which calls docutils internally.
# TODO: Should we pass the Python filename as filepath?
rst_errors = list(rst_lint.lint(unindented))
except Exception as err:
# e.g. UnicodeDecodeError
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_lint,
"Failed to lint docstring: %s - %s" % (definition.name, err),
)
yield definition.start, 0, msg, type(self)
continue
for rst_error in rst_errors:
# TODO - make this a configuration option?
if rst_error.level <= 1:
continue
# Levels:
#
# 0 - debug --> we don't receive these
# 1 - info --> RST1## codes
# 2 - warning --> RST2## codes
# 3 - error --> RST3## codes
# 4 - severe --> RST4## codes
#
# Map the string to a unique code:
msg = rst_error.message.split("\n", 1)[0]
code = code_mapping(rst_error.level, msg)
assert code < 100, code
code += 100 * rst_error.level
msg = "%s%03i %s" % (rst_prefix, code, msg)
# This will return the line number by combining the
# start of the docstring with the offet within it.
# We don't know the column number, leaving as zero.
yield definition.start + rst_error.line, 0, msg, type(self) |
def load_source(self):
"""Load the source for the specified file."""
if self.filename in self.STDIN_NAMES:
self.filename = "stdin"
if sys.version_info[0] < 3:
self.source = sys.stdin.read()
else:
self.source = TextIOWrapper(sys.stdin.buffer, errors="ignore").read()
else:
# Could be a Python 2.7 StringIO with no context manager, sigh.
# with tokenize_open(self.filename) as fd:
# self.source = fd.read()
handle = tokenize_open(self.filename)
self.source = handle.read()
handle.close() |
def lab_to_rgb(l, a, b):
""" Converts CIE Lab to RGB components.
First we have to convert to XYZ color space.
Conversion involves using a white point,
in this case D65 which represents daylight illumination.
Algorithms adopted from:
http://www.easyrgb.com/math.php
"""
y = (l+16) / 116.0
x = a/500.0 + y
z = y - b/200.0
v = [x,y,z]
for i in range(3):
if pow(v[i],3) > 0.008856:
v[i] = pow(v[i],3)
else:
v[i] = (v[i]-16/116.0) / 7.787
# Observer = 2, Illuminant = D65
x = v[0] * 95.047/100
y = v[1] * 100.0/100
z = v[2] * 108.883/100
r = x * 3.2406 + y *-1.5372 + z *-0.4986
g = x *-0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y *-0.2040 + z * 1.0570
v = [r,g,b]
for i in range(3):
if v[i] > 0.0031308:
v[i] = 1.055 * pow(v[i], 1/2.4) - 0.055
else:
v[i] = 12.92 * v[i]
#r, g, b = v[0]*255, v[1]*255, v[2]*255
r, g, b = v[0], v[1], v[2]
return r, g, b |
def _darkest(self):
""" Returns the darkest swatch.
Knowing the contract between a light and a dark swatch
can help us decide how to display readable typography.
"""
rgb, n = (1.0, 1.0, 1.0), 3.0
for r,g,b in self:
if r+g+b < n:
rgb, n = (r,g,b), r+g+b
return rgb |
def parse_theme(self, xml):
""" Parses a theme from XML returned by Kuler.
Gets the theme's id, label and swatches.
All of the swatches are converted to RGB.
If we have a full description for a theme id in cache,
parse that to get tags associated with the theme.
"""
kt = KulerTheme()
kt.author = xml.getElementsByTagName("author")[0]
kt.author = kt.author.childNodes[1].childNodes[0].nodeValue
kt.id = int(self.parse_tag(xml, "id"))
kt.label = self.parse_tag(xml, "label")
mode = self.parse_tag(xml, "mode")
for swatch in xml.getElementsByTagName("swatch"):
c1 = float(self.parse_tag(swatch, "c1"))
c2 = float(self.parse_tag(swatch, "c2"))
c3 = float(self.parse_tag(swatch, "c3"))
c4 = float(self.parse_tag(swatch, "c4"))
if mode == "rgb":
kt.append((c1,c2,c3))
if mode == "cmyk":
kt.append(cmyk_to_rgb(c1,c2,c3,c4))
if mode == "hsv":
kt.append(colorsys.hsv_to_rgb(c1,c2,c3))
if mode == "hex":
kt.append(hex_to_rgb(c1))
if mode == "lab":
kt.append(lab_to_rgb(c1,c2,c3))
# If we have the full theme in cache,
# parse tags from it.
if self._cache.exists(self.id_string + str(kt.id)):
xml = self._cache.read(self.id_string + str(kt.id))
xml = minidom.parseString(xml)
for tags in xml.getElementsByTagName("tag"):
tags = self.parse_tag(tags, "label")
tags = tags.split(" ")
kt.tags.extend(tags)
return kt |
def create_listening_socket(host, port, handler):
"""
Create socket and set listening options
:param host:
:param port:
:param handler:
:return:
"""
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
GObject.io_add_watch(sock, GObject.IO_IN, handler)
return sock |
def listener(self, sock, *args):
'''Asynchronous connection listener. Starts a handler for each connection.'''
conn, addr = sock.accept()
f = conn.makefile(conn)
self.shell = ShoebotCmd(self.bot, stdin=f, stdout=f, intro=INTRO)
print(_("Connected"))
GObject.io_add_watch(conn, GObject.IO_IN, self.handler)
if self.shell.intro:
self.shell.stdout.write(str(self.shell.intro)+"\n")
self.shell.stdout.flush()
return True |
def handler(self, conn, *args):
'''
Asynchronous connection handler. Processes each line from the socket.
'''
# lines from cmd.Cmd
self.shell.stdout.write(self.shell.prompt)
line = self.shell.stdin.readline()
if not len(line):
line = 'EOF'
return False
else:
line = line.rstrip('\r\n')
line = self.shell.precmd(line)
stop = self.shell.onecmd(line)
stop = self.shell.postcmd(stop, line)
self.shell.stdout.flush()
self.shell.postloop()
# end lines from cmd.Cmd
if stop:
self.shell = None
conn.close()
return not stop |
def trusted_cmd(f):
"""
Trusted commands cannot be run remotely
:param f:
:return:
"""
def run_cmd(self, line):
if self.trusted:
f(self, line)
else:
print("Sorry cannot do %s here." % f.__name__[3:])
global trusted_cmds
trusted_cmds.add(f.__name__)
run_cmd.__doc__ = f.__doc__
return run_cmd |
def print_response(self, input='', keep=False, *args, **kwargs):
"""
print response, if cookie is set then print that each line
:param args:
:param keep: if True more output is to come
:param cookie: set a custom cookie,
if set to 'None' then self.cookie will be used.
if set to 'False' disables cookie output entirely
:return:
"""
cookie = kwargs.get('cookie')
if cookie is None:
cookie = self.cookie or ''
status = kwargs.get('status')
lines = input.splitlines()
if status and not lines:
lines = ['']
if cookie:
output_template = '{cookie} {status}{cookie_char}{line}'
else:
output_template = '{line}'
for i, line in enumerate(lines):
if i != len(lines) - 1 or keep is True:
cookie_char = '>'
else:
# last line
cookie_char = ':'
print(output_template.format(
cookie_char=cookie_char,
cookie=cookie,
status=status or '',
line=line.strip()), file=self.stdout) |
def do_escape_nl(self, arg):
"""
Escape newlines in any responses
"""
if arg.lower() == 'off':
self.escape_nl = False
else:
self.escape_nl = True |
def do_prompt(self, arg):
"""
Enable or disable prompt
:param arg: on|off
:return:
"""
if arg.lower() == 'off':
self.response_prompt = ''
self.prompt = ''
return
elif arg.lower() == 'on':
self.prompt = PROMPT
self.response_prompt = RESPONSE_PROMPT
self.print_response('prompt: %s' % self.prompt, '\n', 'response: %s' % self.response_prompt) |
def do_speed(self, speed):
"""
rewind
"""
if speed:
try:
self.bot._speed = float(speed)
except Exception as e:
self.print_response('%s is not a valid framerate' % speed)
return
self.print_response('Speed: %s FPS' % self.bot._speed) |
def do_restart(self, line):
"""
Attempt to restart the bot.
"""
self.bot._frame = 0
self.bot._namespace.clear()
self.bot._namespace.update(self.bot._initial_namespace) |
def do_pause(self, line):
"""
Toggle pause
"""
# along with stuff in socketserver and shell
if self.pause_speed is None:
self.pause_speed = self.bot._speed
self.bot._speed = 0
self.print_response('Paused')
else:
self.bot._speed = self.pause_speed
self.pause_speed = None
self.print_response('Playing') |
def do_play(self, line):
"""
Resume playback if bot is paused
"""
if self.pause_speed is None:
self.bot._speed = self.pause_speed
self.pause_speed = None
self.print_response("Play") |
def do_goto(self, line):
"""
Go to specific frame
:param line:
:return:
"""
self.print_response("Go to frame %s" % line)
self.bot._frame = int(line) |
def do_rewind(self, line):
"""
rewind
"""
self.print_response("Rewinding from frame %s to 0" % self.bot._frame)
self.bot._frame = 0 |
def do_vars(self, line):
"""
List bot variables and values
"""
if self.bot._vars:
max_name_len = max([len(name) for name in self.bot._vars])
for i, (name, v) in enumerate(self.bot._vars.items()):
keep = i < len(self.bot._vars) - 1
self.print_response("%s = %s" % (name.ljust(max_name_len), v.value), keep=keep)
else:
self.print_response("No vars") |
def do_load_base64(self, line):
"""
load filename=(file)
load base64=(base64 encoded)
Send new code to shoebot.
If it does not run successfully shoebot will attempt to role back.
Editors can enable livecoding by sending new code as it is edited.
"""
cookie = self.cookie
executor = self.bot._executor
def source_good():
self.print_response(status=RESPONSE_CODE_OK, cookie=cookie)
executor.clear_callbacks()
def source_bad(tb):
if called_good:
# good and bad callbacks shouldn't both be called
raise ValueError('Good AND Bad callbacks called !')
self.print_response(status=RESPONSE_REVERTED, keep=True, cookie=cookie)
self.print_response(tb.replace('\n', '\\n'), cookie=cookie)
executor.clear_callbacks()
called_good = False
source = str(base64.b64decode(line))
# Test compile
publish_event(SOURCE_CHANGED_EVENT, data=source, extra_channels="shoebot.source")
self.bot._executor.load_edited_source(source, good_cb=source_good, bad_cb=source_bad) |
def do_exit(self, line):
"""
Exit shell and shoebot
"""
if self.trusted:
publish_event(QUIT_EVENT)
self.print_response('Bye.\n')
return True |
def do_fullscreen(self, line):
"""
Make the current window fullscreen
"""
self.bot.canvas.sink.trigger_fullscreen_action(True)
print(self.response_prompt, file=self.stdout) |
def do_windowed(self, line):
"""
Un-fullscreen the current window
"""
self.bot.canvas.sink.trigger_fullscreen_action(False)
print(self.response_prompt, file=self.stdout) |
def do_EOF(self, line):
"""
Exit shell and shoebot
Alias for exit.
"""
print(self.response_prompt, file=self.stdout)
return self.do_exit(line) |
def do_help(self, arg):
"""
Show help on all commands.
"""
print(self.response_prompt, file=self.stdout)
return cmd.Cmd.do_help(self, arg) |
def do_set(self, line):
"""
Set a variable.
"""
try:
name, value = [part.strip() for part in line.split('=')]
if name not in self.bot._vars:
self.print_response('No such variable %s enter vars to see available vars' % name)
return
variable = self.bot._vars[name]
variable.value = variable.sanitize(value.strip(';'))
success, msg = self.bot.canvas.sink.var_changed(name, variable.value)
if success:
print('{}={}'.format(name, variable.value), file=self.stdout)
else:
print('{}\n'.format(msg), file=self.stdout)
except Exception as e:
print('Invalid Syntax.', e)
return |
def precmd(self, line):
"""
Allow commands to have a last parameter of 'cookie=somevalue'
TODO somevalue will be prepended onto any output lines so
that editors can distinguish output from certain kinds
of events they have sent.
:param line:
:return:
"""
args = shlex.split(line or "")
if args and 'cookie=' in args[-1]:
cookie_index = line.index('cookie=')
cookie = line[cookie_index + 7:]
line = line[:cookie_index].strip()
self.cookie = cookie
if line.startswith('#'):
return ''
elif '=' in line:
# allow somevar=somevalue
# first check if we really mean a command
cmdname = line.partition(" ")[0]
if hasattr(self, "do_%s" % cmdname):
return line
if not line.startswith("set "):
return "set " + line
else:
return line
if len(args) and args[0] in self.shortcuts:
return "%s %s" % (self.shortcuts[args[0]], " ".join(args[1:]))
else:
return line |
def drawdaisy(x, y, color='#fefefe'):
"""
Draw a daisy at x, y
"""
# save location, size etc
_ctx.push()
# save fill and stroke
_fill =_ctx.fill()
_stroke = _ctx.stroke()
sc = (1.0 / _ctx.HEIGHT) * float(y * 0.5) * 4.0
# draw stalk
_ctx.strokewidth(sc * 2.0)
_ctx.stroke('#3B240B')
_ctx.line(x + (sin(x * 0.1) * 10.0), y + 80, x + sin(_ctx.FRAME * 0.1), y)
# draw flower
_ctx.translate(-20, 0)
_ctx.scale(sc)
# draw petals
_ctx.fill(color)
_ctx.nostroke()
for angle in xrange(0, 360, 45):
_ctx.rotate(degrees=45)
_ctx.rect(x, y, 40, 8, 1)
# draw centre
_ctx.fill('#F7FE2E')
_ctx.ellipse(x + 15, y, 10, 10)
# restore fill and stroke
_ctx.fill(_fill)
_ctx.stroke(_stroke)
# restore location, size etc
_ctx.pop() |
def fft_bandpassfilter(data, fs, lowcut, highcut):
"""
http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801
"""
fft = np.fft.fft(data)
# n = len(data)
# timestep = 1.0 / fs
# freq = np.fft.fftfreq(n, d=timestep)
bp = fft.copy()
# Zero out fft coefficients
# bp[10:-10] = 0
# Normalise
# bp *= real(fft.dot(fft))/real(bp.dot(bp))
bp *= fft.dot(fft) / bp.dot(bp)
# must multipy by 2 to get the correct amplitude
ibp = 12 * np.fft.ifft(bp)
return ibp |
def flatten_fft(scale=1.0):
"""
Produces a nicer graph, I'm not sure if this is correct
"""
_len = len(audio.spectrogram)
for i, v in enumerate(audio.spectrogram):
yield scale * (i * v) / _len |
def scaled_fft(fft, scale=1.0):
"""
Produces a nicer graph, I'm not sure if this is correct
"""
data = np.zeros(len(fft))
for i, v in enumerate(fft):
data[i] = scale * (i * v) / NUM_SAMPLES
return data |
def get_source(self, doc):
"""
Grab contents of 'doc' and return it
:param doc: The active document
:return:
"""
start_iter = doc.get_start_iter()
end_iter = doc.get_end_iter()
source = doc.get_text(start_iter, end_iter, False)
return source |
def _create_view(self, name="shoebot-output"):
""" Create the gtk.TextView used for shell output """
view = gtk.TextView()
view.set_editable(False)
fontdesc = pango.FontDescription("Monospace")
view.modify_font(fontdesc)
view.set_name(name)
buff = view.get_buffer()
buff.create_tag('error', foreground='red')
return view |
def _create_view(self, name="shoebot-output"):
"""
Create the gtk.TextView inside a Gtk.ScrolledWindow
:return: container, text_view
"""
text_view = Gtk.TextView()
text_view.set_editable(False)
fontdesc = Pango.FontDescription("Monospace")
text_view.modify_font(fontdesc)
text_view.set_name(name)
buff = text_view.get_buffer()
buff.create_tag('error', foreground='red')
container = Gtk.ScrolledWindow()
container.add(text_view)
container.show_all()
return container, text_view |
def openAnything(source, searchpaths=None):
"""URI, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
Examples:
>>> from xml.dom import minidom
>>> sock = openAnything("http://localhost/kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>")
>>> doc = minidom.parse(sock)
>>> sock.close()
"""
if hasattr(source, "read"):
return source
if source == "-":
import sys
return sys.stdin
# try to open with urllib (if source is http, ftp, or file URL)
import urllib
try:
return urllib.urlopen(source)
except (IOError, OSError):
pass
# try to open with native open function (if source is pathname)
for path in searchpaths or ['.']:
try:
return open(os.path.join(path, source))
except (IOError, OSError):
pass
# treat source as string
import StringIO
return StringIO.StringIO(str(source)) |
def _load(self, source, searchpaths=None):
"""load XML input source, return parsed XML document
- a URL of a remote XML file ("http://diveintopython.org/kant.xml")
- a filename of a local XML file ("~/diveintopython/common/py/kant.xml")
- standard input ("-")
- the actual XML document, as a string
:param searchpaths: optional searchpaths if file is used.
"""
sock = openAnything(source, searchpaths=searchpaths)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc |
def loadGrammar(self, grammar, searchpaths=None):
"""load context-free grammar"""
self.grammar = self._load(grammar, searchpaths=searchpaths)
self.refs = {}
for ref in self.grammar.getElementsByTagName("ref"):
self.refs[ref.attributes["id"].value] = ref |
def loadSource(self, source, searchpaths=None):
"""load source"""
self.source = self._load(source, searchpaths=searchpaths) |
def getDefaultSource(self):
"""guess default source of the current grammar
The default source will be one of the <ref>s that is not
cross-referenced. This sounds complicated but it's not.
Example: The default source for kant.xml is
"<xref id='section'/>", because 'section' is the one <ref>
that is not <xref>'d anywhere in the grammar.
In most grammars, the default source will produce the
longest (and most interesting) output.
"""
xrefs = {}
for xref in self.grammar.getElementsByTagName("xref"):
xrefs[xref.attributes["id"].value] = 1
xrefs = xrefs.keys()
standaloneXrefs = [e for e in self.refs.keys() if e not in xrefs]
if not standaloneXrefs:
raise NoSourceError, "can't guess source, and no source specified"
return '<xref id="%s"/>' % random.choice(standaloneXrefs) |
def refresh(self):
"""reset output buffer, re-parse entire source file, and return output
Since parsing involves a good deal of randomness, this is an
easy way to get new output without having to reload a grammar file
each time.
"""
self.reset()
self.parse(self.source)
return self.output() |
def randomChildElement(self, node):
"""choose a random child element of a node
This is a utility method used by do_xref and do_choice.
"""
choices = [e for e in node.childNodes
if e.nodeType == e.ELEMENT_NODE]
chosen = random.choice(choices)
if _debug:
sys.stderr.write('%s available choices: %s\n' % \
(len(choices), [e.toxml() for e in choices]))
sys.stderr.write('Chosen: %s\n' % chosen.toxml())
return chosen |
def parse(self, node):
"""parse a single XML node
A parsed XML document (from minidom.parse) is a tree of nodes
of various types. Each node is represented by an instance of the
corresponding Python class (Element for a tag, Text for
text data, Document for the top-level document). The following
statement constructs the name of a class method based on the type
of node we're parsing ("parse_Element" for an Element node,
"parse_Text" for a Text node, etc.) and then calls the method.
"""
parseMethod = getattr(self, "parse_%s" % node.__class__.__name__)
parseMethod(node) |
def parse_Text(self, node):
"""parse a text node
The text of a text node is usually added to the output buffer
verbatim. The one exception is that <p class='sentence'> sets
a flag to capitalize the first letter of the next word. If
that flag is set, we capitalize the text and reset the flag.
"""
text = node.data
if self.capitalizeNextWord:
self.pieces.append(text[0].upper())
self.pieces.append(text[1:])
self.capitalizeNextWord = 0
else:
self.pieces.append(text) |
def parse_Element(self, node):
"""parse an element
An XML element corresponds to an actual tag in the source:
<xref id='...'>, <p chance='...'>, <choice>, etc.
Each element type is handled in its own method. Like we did in
parse(), we construct a method name based on the name of the
element ("do_xref" for an <xref> tag, etc.) and
call the method.
"""
handlerMethod = getattr(self, "do_%s" % node.tagName)
handlerMethod(node) |
def do_xref(self, node):
"""handle <xref id='...'> tag
An <xref id='...'> tag is a cross-reference to a <ref id='...'>
tag. <xref id='sentence'/> evaluates to a randomly chosen child of
<ref id='sentence'>.
"""
id = node.attributes["id"].value
self.parse(self.randomChildElement(self.refs[id])) |
def do_p(self, node):
"""handle <p> tag
The <p> tag is the core of the grammar. It can contain almost
anything: freeform text, <choice> tags, <xref> tags, even other
<p> tags. If a "class='sentence'" attribute is found, a flag
is set and the next word will be capitalized. If a "chance='X'"
attribute is found, there is an X% chance that the tag will be
evaluated (and therefore a (100-X)% chance that it will be
completely ignored)
"""
keys = node.attributes.keys()
if "class" in keys:
if node.attributes["class"].value == "sentence":
self.capitalizeNextWord = 1
if "chance" in keys:
chance = int(node.attributes["chance"].value)
doit = (chance > random.randrange(100))
else:
doit = 1
if doit:
for child in node.childNodes: self.parse(child) |
def replace_entities(ustring, placeholder=" "):
"""Replaces HTML special characters by readable characters.
As taken from Leif K-Brooks algorithm on:
http://groups-beta.google.com/group/comp.lang.python
"""
def _repl_func(match):
try:
if match.group(1): # Numeric character reference
return unichr( int(match.group(2)) )
else:
try: return cp1252[ unichr(int(match.group(3))) ].strip()
except: return unichr( name2codepoint[match.group(3)] )
except:
return placeholder
# Force to Unicode.
if not isinstance(ustring, unicode):
ustring = UnicodeDammit(ustring).unicode
# Don't want some weird unicode character here
# that truncate_spaces() doesn't know of:
ustring = ustring.replace(" ", " ")
# The ^> makes sure nothing inside a tag (i.e. href with query arguments) gets processed.
_entity_re = re.compile(r'&(?:(#)(\d+)|([^;^> ]+));')
return _entity_re.sub(_repl_func, ustring) |
def open(url, wait=10):
""" Returns a connection to a url which you can read().
When the wait amount is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError.
404 errors specifically return a HTTP404NotFound.
"""
# If the url is a URLParser, get any POST parameters.
post = None
if isinstance(url, URLParser) and url.method == "post":
post = urllib.urlencode(url.query)
# If the url is a URLParser (or a YahooResult or something),
# use its string representation.
url = str(url)
# Use urllib instead of urllib2 for local files.
if os.path.exists(url):
return urllib.urlopen(url)
else:
socket.setdefaulttimeout(wait)
try:
#connection = urllib2.urlopen(url, post)
request = urllib2.Request(url, post, {"User-Agent": USER_AGENT, "Referer": REFERER})
if PROXY:
p = urllib2.ProxyHandler({PROXY[1]: PROXY[0]})
o = urllib2.build_opener(p, urllib2.HTTPHandler)
urllib2.install_opener(o)
connection = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
raise HTTPError
except urllib2.URLError, e:
if e.reason[0] == 36: raise URLTimeout
raise URLError
return connection |
def not_found(url, wait=10):
""" Returns True when the url generates a "404 Not Found" error.
"""
try: connection = open(url, wait)
except HTTP404NotFound:
return True
except:
return False
return False |
def is_type(url, types=[], wait=10):
""" Determine the MIME-type of the document behind the url.
MIME is more reliable than simply checking the document extension.
Returns True when the MIME-type starts with anything in the list of types.
"""
# Types can also be a single string for convenience.
if isinstance(types, str):
types = [types]
try: connection = open(url, wait)
except:
return False
type = connection.info()["Content-Type"]
for t in types:
if type.startswith(t): return True
return False |
def requirements(debug=True, with_examples=True, with_pgi=None):
"""
Build requirements based on flags
:param with_pgi: Use 'pgi' instead of 'gi' - False on CPython, True elsewhere
:param with_examples:
:return:
"""
reqs = list(BASE_REQUIREMENTS)
if with_pgi is None:
with_pgi = is_jython
if debug:
print("setup options: ")
print("with_pgi: ", "yes" if with_pgi else "no")
print("with_examples: ", "yes" if with_examples else "no")
if with_pgi:
reqs.append("pgi")
if debug:
print("warning, as of April 2019 typography does not work with pgi")
else:
reqs.append(PYGOBJECT)
if with_examples:
reqs.extend(EXAMPLE_REQUIREMENTS)
if debug:
print("")
print("")
for req in reqs:
print(req)
return reqs |
def image(self, path, x, y, width=None, height=None, alpha=1.0, data=None, draw=True, **kwargs):
'''Draws a image form path, in x,y and resize it to width, height dimensions.
'''
return self.Image(path, x, y, width, height, alpha, data, **kwargs) |
def rect(self, x, y, width, height, roundness=0.0, draw=True, **kwargs):
'''
Draw a rectangle from x, y of width, height.
:param startx: top left x-coordinate
:param starty: top left y-coordinate
:param width: height Size of rectangle.
:roundness: Corner roundness defaults to 0.0 (a right-angle).
:draw: If True draws immediately.
:fill: Optionally pass a fill color.
:return: path representing the rectangle.
'''
path = self.BezierPath(**kwargs)
path.rect(x, y, width, height, roundness, self.rectmode)
if draw:
path.draw()
return path |
def rectmode(self, mode=None):
'''
Set the current rectmode.
:param mode: CORNER, CENTER, CORNERS
:return: rectmode if mode is None or valid.
'''
if mode in (self.CORNER, self.CENTER, self.CORNERS):
self.rectmode = mode
return self.rectmode
elif mode is None:
return self.rectmode
else:
raise ShoebotError(_("rectmode: invalid input")) |
def ellipsemode(self, mode=None):
'''
Set the current ellipse drawing mode.
:param mode: CORNER, CENTER, CORNERS
:return: ellipsemode if mode is None or valid.
'''
if mode in (self.CORNER, self.CENTER, self.CORNERS):
self.ellipsemode = mode
return self.ellipsemode
elif mode is None:
return self.ellipsemode
else:
raise ShoebotError(_("ellipsemode: invalid input")) |
def circle(self, x, y, diameter, draw=True, **kwargs):
'''Draw a circle
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param diameter: Diameter of circle.
:param draw: Draw immediately (defaults to True, set to False to inhibit drawing)
:return: Path object representing circle
'''
return self.ellipse(x, y, diameter, diameter, draw, **kwargs) |
def arrow(self, x, y, width, type=NORMAL, draw=True, **kwargs):
'''Draw an arrow.
Arrows can be two types: NORMAL or FORTYFIVE.
:param x: top left x-coordinate
:param y: top left y-coordinate
:param width: width of arrow
:param type: NORMAL or FORTYFIVE
:draw: If True draws arrow immediately
:return: Path object representing the arrow.
'''
# Taken from Nodebox
path = self.BezierPath(**kwargs)
if type == self.NORMAL:
head = width * .4
tail = width * .2
path.moveto(x, y)
path.lineto(x - head, y + head)
path.lineto(x - head, y + tail)
path.lineto(x - width, y + tail)
path.lineto(x - width, y - tail)
path.lineto(x - head, y - tail)
path.lineto(x - head, y - head)
path.lineto(x, y)
elif type == self.FORTYFIVE:
head = .3
tail = 1 + head
path.moveto(x, y)
path.lineto(x, y + width * (1 - head))
path.lineto(x - width * head, y + width)
path.lineto(x - width * head, y + width * tail * .4)
path.lineto(x - width * tail * .6, y + width)
path.lineto(x - width, y + width * tail * .6)
path.lineto(x - width * tail * .4, y + width * head)
path.lineto(x - width, y + width * head)
path.lineto(x - width * (1 - head), y)
path.lineto(x, y)
else:
raise NameError(_("arrow: available types for arrow() are NORMAL and FORTYFIVE\n"))
if draw:
path.draw()
return path |
def star(self, startx, starty, points=20, outer=100, inner=50, draw=True, **kwargs):
'''Draws a star.
'''
# Taken from Nodebox.
self.beginpath(**kwargs)
self.moveto(startx, starty + outer)
for i in range(1, int(2 * points)):
angle = i * pi / points
x = sin(angle)
y = cos(angle)
if i % 2:
radius = inner
else:
radius = outer
x = startx + radius * x
y = starty + radius * y
self.lineto(x, y)
return self.endpath(draw) |
def drawimage(self, image, x=None, y=None):
"""
:param image: Image to draw
:param x: optional, x coordinate (default is image.x)
:param y: optional, y coordinate (default is image.y)
:return:
"""
if x is None:
x = image.x
if y is None:
y = image.y
self.image(image.path, image.x, image.y, data=image.data) |
def relmoveto(self, x, y):
'''Move relatively to the last point.'''
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.relmoveto(x, y) |
def rellineto(self, x, y):
'''Draw a line using relative coordinates.'''
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.rellineto(x, y) |
def relcurveto(self, h1x, h1y, h2x, h2y, x, y):
'''Draws a curve relatively to the last point.
'''
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.relcurveto(h1x, h1y, h2x, h2y, x, y) |
def findpath(self, points, curvature=1.0):
"""Constructs a path between the given list of points.
Interpolates the list of points and determines
a smooth bezier path betweem them.
The curvature parameter offers some control on
how separate segments are stitched together:
from straight angles to smooth curves.
Curvature is only useful if the path has more than three points.
"""
# The list of points consists of Point objects,
# but it shouldn't crash on something straightforward
# as someone supplying a list of (x,y)-tuples.
for i, pt in enumerate(points):
if type(pt) == TupleType:
points[i] = Point(pt[0], pt[1])
if len(points) == 0:
return None
if len(points) == 1:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
return path
if len(points) == 2:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
path.lineto(points[1].x, points[1].y)
return path
# Zero curvature means straight lines.
curvature = max(0, min(1, curvature))
if curvature == 0:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)):
path.lineto(points[i].x, points[i].y)
return path
curvature = 4 + (1.0 - curvature) * 40
dx = {0: 0, len(points) - 1: 0}
dy = {0: 0, len(points) - 1: 0}
bi = {1: -0.25}
ax = {1: (points[2].x - points[0].x - dx[0]) / 4}
ay = {1: (points[2].y - points[0].y - dy[0]) / 4}
for i in range(2, len(points) - 1):
bi[i] = -1 / (curvature + bi[i - 1])
ax[i] = -(points[i + 1].x - points[i - 1].x - ax[i - 1]) * bi[i]
ay[i] = -(points[i + 1].y - points[i - 1].y - ay[i - 1]) * bi[i]
r = range(1, len(points) - 1)
r.reverse()
for i in r:
dx[i] = ax[i] + dx[i + 1] * bi[i]
dy[i] = ay[i] + dy[i + 1] * bi[i]
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points) - 1):
path.curveto(points[i].x + dx[i],
points[i].y + dy[i],
points[i + 1].x - dx[i + 1],
points[i + 1].y - dy[i + 1],
points[i + 1].x,
points[i + 1].y)
return path |
def transform(self, mode=None):
'''
Set the current transform mode.
:param mode: CENTER or CORNER'''
if mode:
self._canvas.mode = mode
return self._canvas.mode |
def translate(self, xt, yt, mode=None):
'''
Translate the current position by (xt, yt) and
optionally set the transform mode.
:param xt: Amount to move horizontally
:param yt: Amount to move vertically
:mode: Set the transform mode to CENTER or CORNER
'''
self._canvas.translate(xt, yt)
if mode:
self._canvas.mode = mode |
def scale(self, x=1, y=None):
'''
Set a scale at which to draw objects.
1.0 draws objects at their natural size
:param x: Scale on the horizontal plane
:param y: Scale on the vertical plane
'''
if not y:
y = x
if x == 0:
# Cairo borks on zero values
x = 1
if y == 0:
y = 1
self._canvas.scale(x, y) |
def fill(self, *args):
'''Sets a fill color, applying it to new paths.
:param args: color in supported format
'''
if args is not None:
self._canvas.fillcolor = self.color(*args)
return self._canvas.fillcolor |
def stroke(self, *args):
'''Set a stroke color, applying it to new paths.
:param args: color in supported format
'''
if args is not None:
self._canvas.strokecolor = self.color(*args)
return self._canvas.strokecolor |
def nostroke(self):
''' Stop applying strokes to new paths.
:return: stroke color before nostroke was called.
'''
c = self._canvas.strokecolor
self._canvas.strokecolor = None
return c |
def strokewidth(self, w=None):
'''Set the stroke width.
:param w: Stroke width.
:return: If no width was specified then current width is returned.
'''
if w is not None:
self._canvas.strokewidth = w
else:
return self._canvas.strokewidth |
def font(self, fontpath=None, fontsize=None):
'''Set the font to be used with new text instances.
:param fontpath: path to truetype or opentype font.
:param fontsize: size of font
:return: current current fontpath (if fontpath param not set)
Accepts TrueType and OpenType files. Depends on FreeType being
installed.'''
if fontpath is not None:
self._canvas.fontfile = fontpath
else:
return self._canvas.fontfile
if fontsize is not None:
self._canvas.fontsize = fontsize |
def fontsize(self, fontsize=None):
'''
Set or return size of current font.
:param fontsize: Size of font.
:return: Size of font (if fontsize was not specified)
'''
if fontsize is not None:
self._canvas.fontsize = fontsize
else:
return self._canvas.fontsize |
def text(self, txt, x, y, width=None, height=1000000, outline=False, draw=True, **kwargs):
'''
Draws a string of text according to current font settings.
:param txt: Text to output
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param width: text width
:param height: text height
:param outline: If True draws outline text (defaults to False)
:param draw: Set to False to inhibit immediate drawing (defaults to True)
:return: Path object representing the text.
'''
txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)
if outline:
path = txt.path
if draw:
path.draw()
return path
else:
return txt |
def textheight(self, txt, width=None):
'''Returns the height of a string of text according to the current
font settings.
:param txt: string to measure
:param width: width of a line of text in a block
'''
w = width
return self.textmetrics(txt, width=w)[1] |
def graph_background(s):
""" Graph background color.
"""
if s.background == None:
s._ctx.background(None)
else:
s._ctx.background(s.background)
if s.depth:
try:
clr = colors.color(s.background).darker(0.2)
p = s._ctx.rect(0, 0, s._ctx.WIDTH, s._ctx.HEIGHT, draw=False)
colors.gradientfill(p, clr, clr.lighter(0.35))
colors.shadow(dx=0, dy=0, blur=2, alpha=0.935, clr=s.background)
except:
pass |
def graph_traffic(s, node, alpha=1.0):
""" Visualization of traffic-intensive nodes (based on their centrality).
"""
r = node.__class__(None).r
r += (node.weight+0.5) * r * 5
s._ctx.nostroke()
if s.traffic:
s._ctx.fill(
s.traffic.r,
s.traffic.g,
s.traffic.b,
s.traffic.a * alpha
)
s._ctx.oval(node.x-r, node.y-r, r*2, r*2) |
def node(s, node, alpha=1.0):
""" Visualization of a default node.
"""
if s.depth:
try: colors.shadow(dx=5, dy=5, blur=10, alpha=0.5*alpha)
except: pass
s._ctx.nofill()
s._ctx.nostroke()
if s.fill:
s._ctx.fill(
s.fill.r,
s.fill.g,
s.fill.b,
s.fill.a * alpha
)
if s.stroke:
s._ctx.strokewidth(s.strokewidth)
s._ctx.stroke(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a * alpha * 3
)
r = node.r
s._ctx.oval(node.x-r, node.y-r, r*2, r*2) |
def node_label(s, node, alpha=1.0):
""" Visualization of a node's id.
"""
if s.text:
#s._ctx.lineheight(1)
s._ctx.font(s.font)
s._ctx.fontsize(s.fontsize)
s._ctx.nostroke()
s._ctx.fill(
s.text.r,
s.text.g,
s.text.b,
s.text.a * alpha
)
# Cache an outlined label text and translate it.
# This enhances the speed and avoids wiggling text.
try: p = node._textpath
except:
txt = node.label
try: txt = unicode(txt)
except:
try: txt = txt.decode("utf-8")
except:
pass
# Abbreviation.
#root = node.graph.root
#if txt != root and txt[-len(root):] == root:
# txt = txt[:len(txt)-len(root)]+root[0]+"."
dx, dy = 0, 0
if s.align == 2: #CENTER
dx = -s._ctx.textwidth(txt, s.textwidth) / 2
dy = s._ctx.textheight(txt) / 2
node._textpath = s._ctx.textpath(txt, dx, dy, width=s.textwidth)
p = node._textpath
if s.depth:
try: __colors.shadow(dx=2, dy=4, blur=5, alpha=0.3*alpha)
except: pass
s._ctx.push()
s._ctx.translate(node.x, node.y)
s._ctx.scale(alpha)
s._ctx.drawpath(p.copy())
s._ctx.pop() |
def edges(s, edges, alpha=1.0, weighted=False, directed=False):
""" Visualization of the edges in a network.
"""
p = s._ctx.BezierPath()
if directed and s.stroke:
pd = s._ctx.BezierPath()
if weighted and s.fill:
pw = [s._ctx.BezierPath() for i in range(11)]
# Draw the edges in a single BezierPath for speed.
# Weighted edges are divided into ten BezierPaths,
# depending on their weight rounded between 0 and 10.
if len(edges) == 0: return
for e in edges:
try: s2 = e.node1.graph.styles[e.node1.style]
except: s2 = s
if s2.edge:
s2.edge(s2, p, e, alpha)
if directed and s.stroke:
s2.edge_arrow(s2, pd, e, radius=10)
if weighted and s.fill:
s2.edge(s2, pw[int(e.weight*10)], e, alpha)
s._ctx.autoclosepath(False)
s._ctx.nofill()
s._ctx.nostroke()
# All weighted edges use the default fill.
if weighted and s.fill:
r = e.node1.__class__(None).r
s._ctx.stroke(
s.fill.r,
s.fill.g,
s.fill.b,
s.fill.a * 0.65 * alpha
)
for w in range(1, len(pw)):
s._ctx.strokewidth(r*w*0.1)
s._ctx.drawpath(pw[w].copy())
# All edges use the default stroke.
if s.stroke:
s._ctx.strokewidth(s.strokewidth)
s._ctx.stroke(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a * 0.65 * alpha
)
s._ctx.drawpath(p.copy())
if directed and s.stroke:
#clr = s._ctx.stroke().copy()
clr=s._ctx.color(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a * 0.65 * alpha
)
clr.a *= 1.3
s._ctx.stroke(clr)
s._ctx.drawpath(pd.copy())
for e in edges:
try: s2 = self.styles[e.node1.style]
except: s2 = s
if s2.edge_label:
s2.edge_label(s2, e, alpha) |
def edge(s, path, edge, alpha=1.0):
""" Visualization of a single edge between two nodes.
"""
path.moveto(edge.node1.x, edge.node1.y)
if edge.node2.style == BACK:
path.curveto(
edge.node1.x,
edge.node2.y,
edge.node2.x,
edge.node2.y,
edge.node2.x,
edge.node2.y,
)
else:
path.lineto(
edge.node2.x,
edge.node2.y
) |
def edge_label(s, edge, alpha=1.0):
""" Visualization of the label accompanying an edge.
"""
if s.text and edge.label != "":
s._ctx.nostroke()
s._ctx.fill(
s.text.r,
s.text.g,
s.text.b,
s.text.a * alpha*0.75
)
s._ctx.lineheight(1)
s._ctx.font(s.font)
s._ctx.fontsize(s.fontsize*0.75)
# Cache an outlined label text and translate it.
# This enhances the speed and avoids wiggling text.
try: p = edge._textpath
except:
try: txt = unicode(edge.label)
except:
try: txt = edge.label.decode("utf-8")
except:
pass
edge._textpath = s._ctx.textpath(txt, s._ctx.textwidth(" "), 0, width=s.textwidth)
p = edge._textpath
# Position the label centrally along the edge line.
a = degrees( atan2(edge.node2.y-edge.node1.y, edge.node2.x-edge.node1.x) )
d = sqrt((edge.node2.x-edge.node1.x)**2 +(edge.node2.y-edge.node1.y)**2)
d = abs(d-s._ctx.textwidth(edge.label)) * 0.5
s._ctx.push()
s._ctx.transform(CORNER)
s._ctx.translate(edge.node1.x, edge.node1.y)
s._ctx.rotate(-a)
s._ctx.translate(d, s.fontsize*1.0)
s._ctx.scale(alpha)
# Flip labels on the left hand side so they are legible.
if 90 < a%360 < 270:
s._ctx.translate(s._ctx.textwidth(edge.label), -s.fontsize*2.0)
s._ctx.transform(CENTER)
s._ctx.rotate(180)
s._ctx.transform(CORNER)
s._ctx.drawpath(p.copy())
s._ctx.pop() |
def path(s, graph, path):
""" Visualization of a shortest path between two nodes.
"""
def end(n):
r = n.r * 0.35
s._ctx.oval(n.x-r, n.y-r, r*2, r*2)
if path and len(path) > 1 and s.stroke:
s._ctx.nofill()
s._ctx.stroke(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a
)
if s.name != DEFAULT:
s._ctx.strokewidth(s.strokewidth)
else:
s._ctx.strokewidth(s.strokewidth*2)
first = True
for id in path:
n = graph[id]
if first:
first = False
s._ctx.beginpath(n.x, n.y)
end(n)
else:
s._ctx.lineto(n.x, n.y)
s._ctx.endpath()
end(n) |
def create(self, stylename, **kwargs):
""" Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter.
"""
if stylename == "default":
self[stylename] = style(stylename, self._ctx, **kwargs)
return self[stylename]
k = kwargs.get("template", "default")
s = self[stylename] = self[k].copy(stylename)
for attr in kwargs:
if s.__dict__.has_key(attr):
s.__dict__[attr] = kwargs[attr]
return s |
def copy(self, graph):
""" Returns a copy of all styles and a copy of the styleguide.
"""
s = styles(graph)
s.guide = self.guide.copy(graph)
dict.__init__(s, [(v.name, v.copy()) for v in self.values()])
return s |
def apply(self):
""" Check the rules for each node in the graph and apply the style.
"""
sorted = self.order + self.keys()
unique = []; [unique.append(x) for x in sorted if x not in unique]
for node in self.graph.nodes:
for s in unique:
if self.has_key(s) and self[s](self.graph, node):
node.style = s |
def copy(self, graph):
""" Returns a copy of the styleguide for the given graph.
"""
g = styleguide(graph)
g.order = self.order
dict.__init__(g, [(k, v) for k, v in self.iteritems()])
return g |
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end |
def open_socket(self):
"""
Opens the socket and binds to the given host and port. Uses
SO_REUSEADDR to be as robust as possible.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind((self.host, self.port)) |
def load_profiles(self):
"""
Loads all possible TUIO profiles and returns a dictionary with the
profile addresses as keys and an instance of a profile as the value
"""
_profiles = {}
for name, klass in inspect.getmembers(profiles):
if inspect.isclass(klass) and name.endswith('Profile') and name != 'TuioProfile':
# Adding profile to the self.profiles dictionary
profile = klass()
_profiles[profile.address] = profile
# setting convenient variable to access objects of profile
try:
setattr(self, profile.list_label, profile.objs)
except AttributeError:
continue
# Mapping callback method to every profile
self.manager.add(self.callback, profile.address)
return _profiles |
def update(self):
"""
Tells the connection manager to receive the next 1024 byte of messages
to analyze.
"""
try:
self.manager.handle(self.socket.recv(1024))
except socket.error:
pass |
def callback(self, *incoming):
"""
Gets called by the CallbackManager if a new message was received
"""
message = incoming[0]
if message:
address, command = message[0], message[2]
profile = self.get_profile(address)
if profile is not None:
try:
getattr(profile, command)(self, message)
except AttributeError:
pass |
def copytree(src, dst, symlinks=False, ignore=None):
"""
copytree that works even if folder already exists
"""
# http://stackoverflow.com/questions/1868714/how-do-i-copy-an-entire-directory-of-files-into-an-existing-directory-using-pyth
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d) |
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj) |
def search(q, start=0, wait=10, asynchronous=False, cached=False):
""" Returns a Google web query formatted as a GoogleSearch list object.
"""
service = GOOGLE_SEARCH
return GoogleSearch(q, start, service, "", wait, asynchronous, cached) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.