Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def draw_list(markup, x, y, w, padding=5, callback=None):
""" Draws list markup with indentation in NodeBox.
Draw list markup at x, y coordinates
using indented bullets or numbers.
The callback is a command that takes a str and an int.
"""
try: from web import _ctx
except: pass
i = 1
for chunk in markup.split("\n"):
if callback != None:
callback(chunk, i)
m = re.search("^([0-9]{1,3}\. )", chunk.lstrip())
if m:
indent = re.search("[0-9]", chunk).start()*padding*2
bullet = m.group(1)
dx = textwidth("000.")
chunk = chunk.lstrip(m.group(1)+"\t")
if chunk.lstrip().startswith("*"):
indent = chunk.find("*")*padding*2
bullet = u"•"
dx = textwidth("*")
chunk = chunk.lstrip("* \t")
_ctx.text(bullet, x+indent, y)
dx += padding + indent
_ctx.text(chunk, x+dx, y, width=w-dx)
y += _ctx.textheight(chunk, width=w-dx)
y += _ctx.textheight(" ") * 0.25
i += 1 |
def draw_table(table, x, y, w, padding=5):
""" This is a very poor algorithm to draw Wikipedia tables in NodeBox.
"""
try: from web import _ctx
except: pass
f = _ctx.fill()
_ctx.stroke(f)
h = _ctx.textheight(" ") + padding*2
row_y = y
if table.title != "":
_ctx.fill(f)
_ctx.rect(x, row_y, w, h)
_ctx.fill(1)
_ctx.text(table.title, x+padding, row_y+_ctx.fontsize()+ padding)
row_y += h
# A table of flags marking how long a cell
# from a previous row is still spanning in a column.
rowspans = [1 for i in range(10)]
previous_cell_w = 0
for row in table:
cell_x = x
# The width of a cell is the total table width
# evenly divided by the number of cells.
# Previous rows' cells still spanning will push cells
# to the right and decrease their width.
cell_w = 1.0 * w
cell_w -= previous_cell_w * len([n for n in rowspans if n > 1])
cell_w /= len(row)
# The height of each cell is the highest cell in the row.
# The height depends on the amount of text in the cell.
cell_h = 0
for cell in row:
this_h = _ctx.textheight(cell, width=cell_w-padding*2) + padding*2
cell_h = max(cell_h, this_h)
# Traverse each cell in this row.
i = 0
for cell in row:
# If a previous row's cell is still spanning,
# push this cell to the right.
if rowspans[i] > 1:
rowspans[i] -= 1
cell_x += previous_cell_w
i += 1
# Get the rowspan attribute for this cell.
m = re.search("rowspan=\"(.*?)\"", cell.properties)
if m:
rowspan = int(m.group(1))
rowspans[i] = rowspan
else:
rowspan = 1
# Padded cell text.
# Horizontal line above each cell.
# Vertical line before each cell.
_ctx.fill(f)
_ctx.text(cell, cell_x+padding, row_y+_ctx.fontsize()+padding, cell_w-padding*2)
_ctx.line(cell_x, row_y, cell_x+cell_w, row_y)
if cell_x > x:
_ctx.nofill()
_ctx.line(cell_x, row_y, cell_x, row_y+cell_h)
cell_x += cell_w
i += 1
# Move to next row.
row_y += cell_h
previous_cell_w = cell_w
# Table's bounding rectangle.
_ctx.nofill()
_ctx.rect(x, y, w, row_y-y) |
def parse(self, light=False):
""" Parses data from Wikipedia page markup.
The markup comes from Wikipedia's edit page.
We parse it here into objects containing plain text.
The light version parses only links to other articles, it's faster than a full parse.
"""
markup = self.markup
self.disambiguation = self.parse_disambiguation(markup)
self.categories = self.parse_categories(markup)
self.links = self.parse_links(markup)
if not light:
# Conversion of HTML markup to Wikipedia markup.
markup = self.convert_pre(markup)
markup = self.convert_li(markup)
markup = self.convert_table(markup)
markup = replace_entities(markup)
# Harvest references from the markup
# and replace them by footnotes.
markup = markup.replace("{{Cite", "{{cite")
markup = re.sub("\{\{ {1,2}cite", "{{cite", markup)
self.references, markup = self.parse_references(markup)
# Make sure there are no legend linebreaks in image links.
# Then harvest images and strip them from the markup.
markup = re.sub("\n+(\{\{legend)", "\\1", markup)
self.images, markup = self.parse_images(markup)
self.images.extend(self.parse_gallery_images(markup))
self.paragraphs = self.parse_paragraphs(markup)
self.tables = self.parse_tables(markup)
self.translations = self.parse_translations(markup)
self.important = self.parse_important(markup) |
def plain(self, markup):
""" Strips Wikipedia markup from given text.
This creates a "plain" version of the markup,
stripping images and references and the like.
Does some commonsense maintenance as well,
like collapsing multiple spaces.
If you specified full_strip=False for WikipediaPage instance,
some markup is preserved as HTML (links, bold, italic).
"""
# Strip bold and italic.
if self.full_strip:
markup = markup.replace("'''", "")
markup = markup.replace("''", "")
else:
markup = re.sub("'''([^']*?)'''", "<b>\\1</b>", markup)
markup = re.sub("''([^']*?)''", "<i>\\1</i>", markup)
# Strip image gallery sections.
markup = re.sub(self.re["gallery"], "", markup)
# Strip tables.
markup = re.sub(self.re["table"], "", markup)
markup = markup.replace("||", "")
markup = markup.replace("|}", "")
# Strip links, keeping the display alias.
# We'll strip the ending ]] later.
if self.full_strip:
markup = re.sub(r"\[\[[^\]]*?\|", "", markup)
else:
markup = re.sub(r"\[\[([^]|]*|)\]\]", '<a href="\\1">\\1</a>', markup)
markup = re.sub(r"\[\[([^]|]*|)\|([^]]*)\]\]", '<a href="\\1">\\2</a>', markup)
# Strip translations, users, etc.
markup = re.sub(self.re["translation"], "", markup)
# This math TeX is not supported:
markup = markup.replace("\displaytyle", "")
markup = markup.replace("\textstyle", "")
markup = markup.replace("\scriptstyle", "")
markup = markup.replace("\scriptscriptstyle", "")
# Before stripping [ and ] brackets,
# make sure they are retained inside <math></math> equations.
markup = re.sub("(<math>.*?)\[(.*?</math>)", "\\1MATH___OPEN\\2", markup)
markup = re.sub("(<math>.*?)\](.*?</math>)", "\\1MATH___CLOSE\\2", markup)
markup = markup.replace("[", "")
markup = markup.replace("]", "")
markup = markup.replace("MATH___OPEN", "[")
markup = markup.replace("MATH___CLOSE", "]")
# a) Strip references.
# b) Strip <ref></ref> tags.
# c) Strip <ref name="" /> tags.
# d) Replace --REF--(12) by [12].
# e) Remove space between [12] and trailing punctuation .,
# f) Remove HTML comment <!-- -->
# g) Keep the Latin Extended-B template: {{latinx| }}
# h) Strip Middle-Earth references.
# i) Keep quotes: {{quote| }}
# j) Remove templates
markup = re.sub(self.re["reference"], "", markup) # a
markup = re.sub("</{0,1}ref.*?>", "", markup) # b
markup = re.sub("<ref name=\".*?\" {0,1}/>", "", markup) # c
markup = re.sub(self.ref+"\(([0-9]*?)\)", "[\\1] ", markup) # d
markup = re.sub("\] ([,.\"\?\)])", "]\\1", markup) # e
markup = re.sub(self.re["comment"], "", markup) # f
markup = re.sub("\{\{latinx\|(.*?)\}\}", "\\1", markup) # g
markup = re.sub("\{\{ME-ref.*?\}\}", "", markup) # h
markup = re.sub("\{\{quote\|(.*?)\}\}", "\"\\1\"", markup) # i
markup = re.sub(re.compile("\{\{.*?\}\}", re.DOTALL), "", markup) # j
markup = markup.replace("}}", "")
# Collapse multiple spaces between words,
# unless they appear in preformatted text.
markup = re.sub("<br.*?/{0,1}>", " ", markup)
markup = markup.split("\n")
for i in range(len(markup)):
if not markup[i].startswith(" "):
markup[i] = re.sub(r"[ ]+", " ", markup[i])
markup = "\n".join(markup)
markup = markup.replace(" .", ".")
# Strip all HTML except <math> tags.
if self.full_strip:
markup = strip_tags(markup, exclude=["math"], linebreaks=True)
markup = markup.strip()
return markup |
def convert_pre(self, markup):
""" Substitutes <pre> to Wikipedia markup by adding a space at the start of a line.
"""
for m in re.findall(self.re["preformatted"], markup):
markup = markup.replace(m, m.replace("\n", "\n "))
markup = re.sub("<pre.*?>\n{0,}", "", markup)
markup = re.sub("\W{0,}</pre>", "", markup)
return markup |
def convert_li(self, markup):
""" Subtitutes <li> content to Wikipedia markup.
"""
for li in re.findall("<li;*?>", markup):
markup = re.sub(li, "\n* ", markup)
markup = markup.replace("</li>", "")
return markup |
def convert_table(self, markup):
""" Subtitutes <table> content to Wikipedia markup.
"""
for table in re.findall(self.re["html-table"], markup):
wiki = table
wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki)
wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki)
wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki)
wiki = wiki.replace("</td>", "\n")
wiki = wiki.replace("</tr>", "\n")
wiki = wiki.replace("</table>", "\n|}")
markup = markup.replace(table, wiki)
return markup |
def parse_links(self, markup):
""" Returns a list of internal Wikipedia links in the markup.
# A Wikipedia link looks like:
# [[List of operating systems#Embedded | List of embedded operating systems]]
# It does not contain a colon, this indicates images, users, languages, etc.
The return value is a list containing the first part of the link,
without the anchor.
"""
links = []
m = re.findall(self.re["link"], markup)
for link in m:
# We don't like [[{{{1|Universe (disambiguation)}}}]]
if link.find("{") >= 0:
link = re.sub("\{{1,3}[0-9]{0,2}\|", "", link)
link = link.replace("{", "")
link = link.replace("}", "")
link = link.split("|")
link[0] = link[0].split("#")
page = link[0][0].strip()
#anchor = u""
#display = u""
#if len(link[0]) > 1:
# anchor = link[0][1].strip()
#if len(link) > 1:
# display = link[1].strip()
if not page in links:
links.append(page)
#links[page] = WikipediaLink(page, anchor, display)
links.sort()
return links |
def parse_images(self, markup, treshold=6):
""" Returns a list of images found in the markup.
An image has a pathname, a description in plain text
and a list of properties Wikipedia uses to size and place images.
# A Wikipedia image looks like:
# [[Image:Columbia Supercomputer - NASA Advanced Supercomputing Facility.jpg|right|thumb|
# The [[NASA]] [[Columbia (supercomputer)|Columbia Supercomputer]].]]
# Parts are separated by "|".
# The first part is the image file, the last part can be a description.
# In between are display properties, like "right" or "thumb".
"""
images = []
m = re.findall(self.re["image"], markup)
for p in m:
p = self.parse_balanced_image(p)
img = p.split("|")
path = img[0].replace("[[Image:", "").strip()
description = u""
links = {}
properties = []
if len(img) > 1:
img = "|".join(img[1:])
links = self.parse_links(img)
properties = self.plain(img).split("|")
description = u""
# Best guess: an image description is normally
# longer than six characters, properties like
# "thumb" and "right" are less than six characters.
if len(properties[-1]) > treshold:
description = properties[-1]
properties = properties[:-1]
img = WikipediaImage(path, description, links, properties)
images.append(img)
markup = markup.replace(p, "")
return images, markup.strip() |
def parse_balanced_image(self, markup):
""" Corrects Wikipedia image markup.
Images have a description inside their link markup that
can contain link markup itself, make sure the outer "[" and "]" brackets
delimiting the image are balanced correctly (e.g. no [[ ]] ]]).
Called from parse_images().
"""
opened = 0
closed = 0
for i in range(len(markup)):
if markup[i] == "[": opened += 1
if markup[i] == "]": closed += 1
if opened == closed:
return markup[:i+1]
return markup |
def parse_gallery_images(self, markup):
""" Parses images from the <gallery></gallery> section.
Images inside <gallery> tags do not have outer "[[" brackets.
Add these and then parse again.
"""
gallery = re.search(self.re["gallery"], markup)
if gallery:
gallery = gallery.group(1)
gallery = gallery.replace("Image:", "[[Image:")
gallery = gallery.replace("\n", "]]\n")
images, markup = self.parse_images(gallery)
return images
return [] |
def parse_paragraph(self, markup):
""" Creates a list from lines of text in a paragraph.
Each line of text is a new item in the list,
except lists and preformatted chunks (<li> and <pre>),
these are kept together as a single chunk.
Lists are formatted using parse_paragraph_list().
Empty lines are stripped from the output.
Indentation (i.e. lines starting with ":") is ignored.
Called from parse_paragraphs() method.
"""
s = self.plain(markup)
# Add an extra linebreak between the last list item
# and the normal line following after it, so they don't stick together, e.g.
# **[[Alin Magic]], magic used in the videogame ''[[Rise of Nations: Rise of Legends]]''
# In '''popular culture''':
# * [[Magic (film)|''Magic'' (film)]], a 1978 film starring Anthony Hopkins and Ann-Margret
s = re.sub(re.compile("\n([*#;].*?)\n([^*#?])", re.DOTALL), "\n\\1\n\n\\2", s)
# This keeps list items with linebreaks
# between them nicely together.
s = re.sub("\n{2,3}([*#;])", "\n\\1", s)
chunks = []
ch = ""
i = 1
for chunk in s.split("\n"):
if chunk.startswith(":"):
chunk = chunk.lstrip(":")
if len(chunk.strip()) > 1:
# Leave out taxoboxes and infoboxes.
if not chunk.startswith("|"):
ch += chunk + "\n"
if ch.strip() != "":
if not re.search("^[ *#;]", chunk):
ch = self.parse_paragraph_list(ch)
chunks.append(ch.rstrip())
ch = ""
if ch.strip() != "":
ch = self.parse_paragraph_list(ch)
chunks.append(ch.strip())
return chunks |
def parse_paragraph_list(self, markup, indent="\t"):
""" Formats bullets and numbering of Wikipedia lists.
List items are marked by "*", "#" or ";" at the start of a line.
We treat ";" the same as "*",
and replace "#" with real numbering (e.g. "2.").
Sublists (e.g. *** and ###) get indented by tabs.
Called from parse_paragraphs() method.
"""
def lastleft(ch, str):
n = 0
while n < len(str) and str[n] == ch: n += 1
return n
tally = [1 for i in range(10)]
chunks = markup.split("\n")
for i in range(len(chunks)):
if chunks[i].startswith("#"):
j = min(lastleft("#", chunks[i]), len(tally)-1)
chunks[i] = indent*(j-1) + str(tally[j])+". " + chunks[i][j:]
chunks[i] = chunks[i].replace(". ", ". ")
tally[j] += 1
# Reset the numbering of sublists.
for k in range(j+1, len(tally)):
tally[k] = 1
if chunks[i].startswith(";"):
chunks[i] = "*" + chunks[i][1:]
if chunks[i].startswith("*"):
j = lastleft("*", chunks[i])
chunks[i] = indent*(j-1) + "* " + chunks[i][j:]
chunks[i] = chunks[i].replace("* ", "* ")
return "\n".join(chunks) |
def connect_paragraph(self, paragraph, paragraphs):
""" Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
"""
if paragraph.depth > 0:
n = range(len(paragraphs))
n.reverse()
for i in n:
if paragraphs[i].depth == paragraph.depth-1:
paragraph.parent = paragraphs[i]
paragraphs[i].children.append(paragraph)
break
return paragraph |
def parse_paragraph_references(self, markup):
""" Updates references with content from specific paragraphs.
The "references", "notes", "external links" paragraphs
are double-checked for references. Not all items in the list
might have been referenced inside the article, or the item
might contain more info than we initially parsed from it.
Called from parse_paragraphs() method.
"""
for chunk in markup.split("\n"):
# We already parsed this, it contains the self.ref mark.
# See if we can strip more notes from it.
m = re.search(self.ref+"\(([0-9]*?)\)", chunk)
if m:
chunk = chunk.strip("* ")
chunk = chunk.replace(m.group(0), "")
chunk = self.plain(chunk)
i = int(m.group(1))
if chunk != "":
self.references[i-1].note = chunk
# If it's not a citation we don't have this reference yet.
elif chunk.strip().startswith("*") \
and chunk.find("{{cite") < 0:
chunk = chunk.strip("* ")
chunk = self.plain(chunk)
if chunk != "":
r = WikipediaReference()
r.note = chunk
self.references.append(r) |
def parse_paragraphs(self, markup):
""" Returns a list of paragraphs in the markup.
A paragraph has a title and multiple lines of plain text.
A paragraph might have parent and child paragraphs,
denoting subtitles or bigger chapters.
A paragraph might have links to additional articles.
Formats numbered lists by replacing # by 1.
Formats bulleted sublists like ** or *** with indentation.
"""
# Paragraphs to exclude.
refs = ["references", "notes", "notes and references", "external links", "further reading"]
exclude = ["see also", "media", "gallery", "related topics", "lists", "gallery", "images"]
exclude.extend(refs)
paragraphs = []
paragraph = WikipediaParagraph(self.title)
paragraph_data = ""
for chunk in markup.split("\n"):
# Strip each line of whitespace,
# unless it's a preformatted line (starts with a space).
if not chunk.startswith(" "):
chunk = chunk.strip()
# A title wrapped in "=", "==", "==="...
# denotes a new paragraphs section.
if chunk.startswith("="):
if paragraph.title.lower() in refs \
or (paragraph.parent and paragraph.parent.title.lower() in refs):
self.parse_paragraph_references(paragraph_data)
paragraph.extend(self.parse_paragraph(paragraph_data))
paragraphs.append(paragraph)
# Initialise a new paragraph.
# Create parent/child links to other paragraphs.
title = chunk.strip().strip("=")
title = self.plain(title)
paragraph = WikipediaParagraph(title)
paragraph.depth = self.parse_paragraph_heading_depth(chunk)
if paragraph.title.lower() not in exclude:
paragraph = self.connect_paragraph(paragraph, paragraphs)
paragraph_data = ""
# Underneath a title might be links to in-depth articles,
# e.g. Main articles: Computer program and Computer programming
# which in wiki markup would be {{main|Computer program|Computer programming}}
# The second line corrects" {{Main|Credit (finance)}} or {{Main|Usury}}".
elif re.search(re.compile("^{{main", re.I), chunk):
paragraph.main = [link.strip("} ") for link in chunk.split("|")[1:]]
paragraph.main = [re.sub(re.compile("}}.*?{{main", re.I), "", link)
for link in paragraph.main]
# At the bottom might be links to related articles,
# e.g. See also: Abundance of the chemical elements
# which in wiki markup would be {{see also|Abundance of the chemical elements}}
elif re.search(re.compile("^{{see {0,1}also", re.I), chunk):
paragraph.related = [link.strip("} ") for link in chunk.split("|")[1:]]
# Accumulate the data in this paragraph,
# we'll process it once a new paragraph starts.
else:
paragraph_data += chunk +"\n"
# Append the last paragraph.
if paragraph.title.lower() in refs \
or (paragraph.parent and paragraph.parent.title.lower() in refs):
self.parse_paragraph_references(paragraph_data)
paragraph.extend(self.parse_paragraph(paragraph_data))
paragraphs.append(paragraph)
# The "See also" paragraph is an enumeration of links
# which we already parsed so don't show them.
# We also did references, and other paragraphs are not that relevant.
paragraphs_exclude = []
for paragraph in paragraphs:
if paragraph.title.lower() not in exclude \
and not (paragraph.parent and paragraph.parent.title.lower() in exclude):
paragraphs_exclude.append(paragraph)
if len(paragraphs_exclude) == 1 and \
len(paragraphs_exclude[0]) == 0:
return []
return paragraphs_exclude |
def parse_table_row(self, markup, row):
""" Parses a row of cells in a Wikipedia table.
Cells in the row are separated by "||".
A "!" indicates a row of heading columns.
Each cell can contain properties before a "|",
# e.g. align="right" | Cell 2 (right aligned).
"""
if row == None:
row = WikipediaTableRow()
markup = markup.replace("!!", "||")
for cell in markup.lstrip("|!").split("||"):
# The "|" after the properties can't be part of a link.
i = cell.find("|")
j = cell.find("[[")
if i>0 and (j<0 or i<j):
data = self.plain(cell[i+1:])
properties = cell[:i].strip()
else:
data = self.plain(cell)
properties = u""
cell = WikipediaTableCell(data)
cell.properties = properties
row.append(cell)
return row |
def connect_table(self, table, chunk, markup):
""" Creates a link from the table to paragraph and vice versa.
Finds the first heading above the table in the markup.
This is the title of the paragraph the table belongs to.
"""
k = markup.find(chunk)
i = markup.rfind("\n=", 0, k)
j = markup.find("\n", i+1)
paragraph_title = markup[i:j].strip().strip("= ")
for paragraph in self.paragraphs:
if paragraph.title == paragraph_title:
paragraph.tables.append(table)
table.paragraph = paragraph |
def parse_tables(self, markup):
""" Returns a list of tables in the markup.
A Wikipedia table looks like:
{| border="1"
|-
|Cell 1 (no modifier - not aligned)
|-
|align="right" |Cell 2 (right aligned)
|-
|}
"""
tables = []
m = re.findall(self.re["table"], markup)
for chunk in m:
table = WikipediaTable()
table.properties = chunk.split("\n")[0].strip("{|").strip()
self.connect_table(table, chunk, markup)
# Tables start with "{|".
# On the same line can be properties, e.g. {| border="1"
# The table heading starts with "|+".
# A new row in the table starts with "|-".
# The end of the table is marked with "|}".
row = None
for chunk in chunk.split("\n"):
chunk = chunk.strip()
if chunk.startswith("|+"):
title = self.plain(chunk.strip("|+"))
table.title = title
elif chunk.startswith("|-"):
if row:
row.properties = chunk.strip("|-").strip()
table.append(row)
row = None
elif chunk.startswith("|}"):
pass
elif chunk.startswith("|") \
or chunk.startswith("!"):
row = self.parse_table_row(chunk, row)
# Append the last row.
if row: table.append(row)
if len(table) > 0:
tables.append(table)
return tables |
def parse_references(self, markup):
""" Returns a list of references found in the markup.
References appear inline as <ref> footnotes,
http:// external links, or {{cite}} citations.
We replace it with (1)-style footnotes.
Additional references data is gathered in
parse_paragraph_references() when we parse paragraphs.
References can also appear in image descriptions,
tables and taxoboxes, so they might not always pop up in a paragraph.
The plain() method finally replaces (1) by [1].
"""
references = []
# A Wikipedia reference note looks like:
# <ref>In 1946, [[ENIAC]] consumed an estimated 174 kW.
# By comparison, a typical personal computer may use around 400 W;
# over four hundred times less. {{Ref harvard|kempf1961|Kempf 1961|a}}</ref>
m = re.findall(self.re["reference"], markup)
for reference in m:
reference = re.sub("<ref> {0,1}cite", "<ref>{{cite", reference)
if not reference.strip().startswith("[http://") and \
not re.search("\{\{cite", reference):
r = WikipediaReference()
r.note = self.plain(re.sub("</{0,1}ref.*?>", "", reference))
if r.note != "":
references.append(r)
p = " "+self.ref+"("+str(len(references))+")"
markup = markup.replace(reference, p, 1)
else:
# References containing a citation or url
# are better handled by the next patterns.
pass
# A Wikipedia citation looks like:
# {{cite journal
# | last = Einstein
# | first = Albert
# | authorlink = Albert Einstein
# | title = Sidelights on Relativity (Geometry and Experience)
# | publisher = P. Dutton., Co
# | date = 1923}}
m = re.findall(self.re["citation"], markup)
for citation in m:
c = citation.replace("\n", "")
r = WikipediaReference()
for key in r.__dict__.keys():
value = re.search("\| {0,1}"+key+"(.*?)[\|}]", c)
if value:
value = value.group(1)
value = value.replace("link", "")
value = value.strip().strip(" =[]")
value = self.plain(value)
setattr(r, key, value)
if r.first != "" and r.last != "":
r.author = r.first + " " + r.last
references.append(r)
p = " "+self.ref+"("+str(len(references))+")"
markup = markup.replace(citation, p, 1)
# A Wikipedia embedded url looks like:
# [http://www.pbs.org/wnet/hawking/html/home.html ''Stephen Hawking's Universe'']
m = re.findall(self.re["url"], markup)
for url in m:
r = WikipediaReference()
i = url.find(" ")
if i > 0:
r.url = url[:i].strip()
r.note = self.plain(url[i:])
else:
r.url = url.strip()
references.append(r)
p = r.note+" "+self.ref+"("+str(len(references))+")"
markup = markup.replace("["+url+"]", p, 1)
# Since we parsed all citations first and then all notes and urls,
# the ordering will not be correct in the markup,
# e.g. (1) (11) (12) (2) (3).
sorted = []
m = re.findall(self.ref+"\(([0-9]*)\)", markup)
for i in m:
sorted.append(references[int(i)-1])
markup = markup.replace(
self.ref+"("+i+")",
self.ref+"**("+str(len(sorted))+")"
)
markup = markup.replace(self.ref+"**", self.ref)
for r in references:
if r not in sorted:
sorted.append(r)
references = sorted
return references, markup.strip() |
def parse_categories(self, markup):
""" Returns a list of categories the page belongs to.
# A Wikipedia category link looks like:
# [[Category:Computing]]
# This indicates the page is included in the given category.
# If "Category" is preceded by ":" this indicates a link to a category.
"""
categories = []
m = re.findall(self.re["category"], markup)
for category in m:
category = category.split("|")
page = category[0].strip()
display = u""
if len(category) > 1:
display = category[1].strip()
#if not categories.has_key(page):
# categories[page] = WikipediaLink(page, u"", display)
if not page in categories:
categories.append(page)
return categories |
def parse_translations(self, markup):
""" Returns a dictionary of translations for the page title.
A Wikipedia language link looks like: [[af:Rekenaar]].
The parser will also fetch links like "user:" and "media:"
but these are stripped against the dictionary of
Wikipedia languages.
You can get a translated page by searching Wikipedia
with the appropriate language code and supplying
the translated title as query.
"""
global languages
translations = {}
m = re.findall(self.re["translation"], markup)
for language, translation in m:
if language in languages:
translations[language] = translation
return translations |
def parse_disambiguation(self, markup):
""" Gets the Wikipedia disambiguation page for this article.
A Wikipedia disambiguation link refers to other pages
with the same title but of smaller significance,
e.g. {{dablink|For the IEEE magazine see [[Computer (magazine)]].}}
"""
m = re.search(self.re["disambiguation"], markup)
if m:
return self.parse_links(m.group(1))
else:
return [] |
def parse_important(self, markup):
""" Returns a list of words that appear in bold in the article.
Things like table titles are not added to the list,
these are probably bold because it makes the layout nice,
not necessarily because they are important.
"""
important = []
table_titles = [table.title for table in self.tables]
m = re.findall(self.re["bold"], markup)
for bold in m:
bold = self.plain(bold)
if not bold in table_titles:
important.append(bold.lower())
return important |
def sanitize(self, val):
"""Given a Variable and a value, cleans it out"""
if self.type == NUMBER:
try:
return clamp(self.min, self.max, float(val))
except ValueError:
return 0.0
elif self.type == TEXT:
try:
return unicode(str(val), "utf_8", "replace")
except:
return ""
elif self.type == BOOLEAN:
if unicode(val).lower() in ("true", "1", "yes"):
return True
else:
return False |
def compliesTo(self, v):
"""Return whether I am compatible with the given var:
- Type should be the same
- My value should be inside the given vars' min/max range.
"""
if self.type == v.type:
if self.type == NUMBER:
if self.value < self.min or self.value > self.max:
return False
return True
return False |
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType)) |
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str) |
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built |
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self |
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self |
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild |
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs) |
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs) |
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs) |
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs) |
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) |
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs) |
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs) |
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs) |
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r |
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs) |
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results |
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s |
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i |
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x |
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract() |
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s) |
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r |
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs) |
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap |
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n) |
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name) |
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag |
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass) |
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction) |
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data) |
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data) |
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j |
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True |
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub |
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata |
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding |
def shoebot_example(**shoebot_kwargs):
"""
Decorator to run some code in a bot instance.
"""
def decorator(f):
def run():
from shoebot import ShoebotInstallError # https://github.com/shoebot/shoebot/issues/206
print(" Shoebot - %s:" % f.__name__.replace("_", " "))
try:
import shoebot
outputfile = "/tmp/shoebot-%s.png" % f.__name__
bot = shoebot.create_bot(outputfile=outputfile)
f(bot)
bot.finish()
print(' [passed] : %s' % outputfile)
print('')
except ShoebotInstallError as e:
print(' [failed]', e.args[0])
print('')
except Exception:
print(' [failed] - traceback:')
for line in traceback.format_exc().splitlines():
print(' %s' % line)
print('')
return run
return decorator |
def _get_center(self):
'''Returns the center point of the path, disregarding transforms.
'''
x = (self.x + self.width / 2)
y = (self.y + self.height / 2)
return (x, y) |
def scale_context_and_center(self, cr):
"""
Scale context based on difference between bot size and widget
"""
bot_width, bot_height = self.bot_size
if self.width != bot_width or self.height != bot_height:
# Scale up by largest dimension
if self.width < self.height:
scale_x = float(self.width) / float(bot_width)
scale_y = scale_x
cr.translate(0, (self.height - (bot_height * scale_y)) / 2.0)
elif self.width > self.height:
scale_y = float(self.height) / float(bot_height)
scale_x = scale_y
cr.translate((self.width - (bot_width * scale_x)) / 2.0, 0)
else:
scale_x = 1.0
scale_y = 1.0
cr.scale(scale_x, scale_y)
self.input_device.scale_x = scale_y
self.input_device.scale_y = scale_y |
def draw(self, widget, cr):
'''
Draw just the exposed part of the backing store, scaled to fit
'''
if self.bot_size is None:
# No bot to draw yet.
self.draw_default_image(cr)
return
cr = driver.ensure_pycairo_context(cr)
surface = self.backing_store.surface
cr.set_source_surface(surface)
cr.paint() |
def create_rcontext(self, size, frame):
'''
Creates a recording surface for the bot to draw on
:param size: The width and height of bot
'''
self.frame = frame
width, height = size
meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height))
ctx = cairo.Context(meta_surface)
return ctx |
def do_drawing(self, size, frame, cairo_ctx):
'''
Update the backing store from a cairo context and
schedule a redraw (expose event)
:param size: width, height in pixels of bot
:param frame: frame # thar was drawn
:param cairo_ctx: cairo context the bot was drawn on
'''
if self.get_window() and not self.bot_size:
# Get initial size for window
self.set_size_request(*size)
self.bot_size = size
self.backing_store = BackingStore.get_backingstore(self.width, self.height)
cr = pycairo.Context(self.backing_store.surface)
if self.scale_fit:
self.scale_context_and_center(cr)
cairo_ctx = driver.ensure_pycairo_context(cairo_ctx)
cr.set_source_surface(cairo_ctx.get_target())
# Create the cairo context
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
self.queue_draw()
while Gtk.events_pending():
Gtk.main_iteration_do(False) |
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks) |
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers) |
def get_key_map(self):
'''
Return a dict in the form of
SHOEBOT_KEY_NAME, GTK_VALUE
Shoebot key names look like KEY_LEFT, whereas Gdk uses KEY_Left
- Shoebot key names are derived from Nodebox 1, which was a mac
app.
'''
kdict = {}
for gdk_name in dir(Gdk):
nb_name = gdk_name.upper()
kdict[nb_name] = getattr(Gdk, gdk_name)
return kdict |
def _grow(self, generation, rule, angle, length, time=maxint, draw=True):
""" Recurse through the system.
When a segment is drawn, the LSsytem.segment() method will be called.
You can customize this method to create your own visualizations.
It takes an optional time parameter.
If you divide this parameter by LSsytem.duration() you get
a number between 0.0 and 1.0 you can use as an alpha value for example.
The method also has an id parameter which is a unique number
between 0 and LSystem.segments.
"""
if generation == 0:
# We are at the bottom of the system so now we now the total time needed.
self._duration = 1 + maxint-time
if length <= self.threshold:
# Segment length has fallen below the threshold, stop recursing.
self._duration = 1 + maxint-time
return
if rule in self.commands:
# Custom command symbols:
# If the rule is a key in the LSsytem.commands dictionary,
# execute its value which is a function taking 6 parameters:
# lsystem, generation, rule, angle, length and time.
self.commands[rule](self, generation, rule, angle, length, time)
if draw:
# Standard command symbols:
# f signifies a move,
# + and - rotate either left or right, | rotates 180 degrees,
# [ and ] are for push() and pop(), e.g. offshoot branches,
# < and > decrease or increases the segment length,
# ( and ) decrease or increases the rotation angle.
if rule == "f": _ctx.translate(0, -min(length, length*time))
elif rule == "-": _ctx.rotate(max(-angle, -angle*time))
elif rule == "+": _ctx.rotate(min(+angle, +angle*time))
elif rule == "|": _ctx.rotate(180)
elif rule == "[": _ctx.push()
elif rule == "]": _ctx.pop()
if rule in self.rules \
and generation > 0 \
and time > 0:
# Recursion:
# Occurs when there is enough "life" (i.e. generation or time).
# Generation is decreased and segment length scaled down.
# Also, F symbols in the rule have a cost that depletes time.
for cmd in self.rules[rule]:
# Modification command symbols:
if cmd == "F": time -= self.cost
elif cmd == "!": angle = -angle
elif cmd == "(": angle *= 1.1
elif cmd == ")": angle *= 0.9
elif cmd == "<": length *= 0.9
elif cmd == ">": length *= 1.1
self._grow(
generation-1,
cmd,
angle,
length*self.decrease,
time,
draw
)
elif rule == "F" \
or (rule in self.rules and self.rules[rule] == ""):
# Draw segment:
# If the rule is an F symbol or empty (e.g. in Penrose tiles).
# Segment length grows to its full size as time progresses.
self._segments += 1
if draw and time > 0:
length = min(length, length*time)
if self._timed:
self.segment(length, generation, time, id=self._segments)
else:
self.segment(length, generation, None, id=self._segments)
_ctx.translate(0, -length) |
def _output_file(self, frame):
"""
If filename was used output a filename, along with multifile
numbered filenames will be used.
If buff was specified it is returned.
:return: Output buff or filename.
"""
if self.buff:
return self.buff
elif self.multifile:
return self.file_root + "_%03d" % frame + self.file_ext
else:
return self.filename |
def create_rcontext(self, size, frame):
"""
Called when CairoCanvas needs a cairo context to draw on
"""
if self.format == 'pdf':
surface = cairo.PDFSurface(self._output_file(frame), *size)
elif self.format in ('ps', 'eps'):
surface = cairo.PSSurface(self._output_file(frame), *size)
elif self.format == 'svg':
surface = cairo.SVGSurface(self._output_file(frame), *size)
elif self.format == 'surface':
surface = self.target
else:
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)
return cairo.Context(surface) |
def rendering_finished(self, size, frame, cairo_ctx):
"""
Called when CairoCanvas has rendered a bot
"""
surface = cairo_ctx.get_target()
if self.format == 'png':
surface.write_to_png(self._output_file(frame))
surface.finish()
surface.flush() |
def output_closure(self, target, file_number=None):
'''
Function to output to a cairo surface
target is a cairo Context or filename
if file_number is set, then files will be numbered
(this is usually set to the current frame number)
'''
def output_context(ctx):
target_ctx = target
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_surface(ctx):
target_ctx = cairo.Context(target)
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_file(ctx):
root, extension = os.path.splitext(target)
if file_number:
filename = '%s_%04d%s' % (root, file_number, extension)
else:
filename = target
extension = extension.lower()
if extension == '.png':
surface = ctx.get_target()
surface.write_to_png(target)
elif extension == '.pdf':
target_ctx = cairo.Context(cairo.PDFSurface(filename, *self.size_or_default()))
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension in ('.ps', '.eps'):
target_ctx = cairo.Context(cairo.PSSurface(filename, *self.size_or_default()))
if extension == '.eps':
target_ctx.set_eps(extension='.eps')
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension == '.svg':
target_ctx = cairo.Context(cairo.SVGSurface(filename, *self.size_or_default()))
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return filename
if isinstance(target, cairo.Context):
return output_context
elif isinstance(target, cairo.Surface):
return output_surface
else:
return output_file |
def _parse(self):
""" Strips links from the definition and gathers them in a links property.
"""
p1 = "\[.*?\](.*?)\[\/.*?\]"
p2 = "\[(.*?)\]"
self.links = []
for p in (p1,p2):
for link in re.findall(p, self.description):
self.links.append(link)
self.description = re.sub(p, "\\1", self.description)
self.description = self.description.strip() |
def create_canvas(src, format=None, outputfile=None, multifile=False, buff=None, window=False, title=None,
fullscreen=None, show_vars=False):
"""
Create canvas and sink for attachment to a bot
canvas is what draws images, 'sink' is the final consumer of the images
:param src: Defaults for title or outputfile if not specified.
:param format: CairoImageSink image format, if using buff instead of outputfile
:param buff: CairoImageSink buffer object to send output to
:param outputfile: CairoImageSink output filename e.g. "hello.svg"
:param multifile: CairoImageSink if True,
:param title: ShoebotWindow - set window title
:param fullscreen: ShoebotWindow - set window title
:param show_vars: ShoebotWindow - display variable window
Two kinds of sink are provided: CairoImageSink and ShoebotWindow
ShoebotWindow
Displays a window to draw shoebot inside.
CairoImageSink
Output to a filename (or files if multifile is set), or a buffer object.
"""
from core import CairoCanvas, CairoImageSink # https://github.com/shoebot/shoebot/issues/206
if outputfile:
sink = CairoImageSink(outputfile, format, multifile, buff)
elif window or show_vars:
from gui import ShoebotWindow
if not title:
if src and os.path.isfile(src):
title = os.path.splitext(os.path.basename(src))[0] + ' - Shoebot'
else:
title = 'Untitled - Shoebot'
sink = ShoebotWindow(title, show_vars, fullscreen=fullscreen)
else:
if src and isinstance(src, cairo.Surface):
outputfile = src
format = 'surface'
elif src and os.path.isfile(src):
outputfile = os.path.splitext(os.path.basename(src))[0] + '.' + (format or 'svg')
else:
outputfile = 'output.svg'
sink = CairoImageSink(outputfile, format, multifile, buff)
canvas = CairoCanvas(sink)
return canvas |
def create_bot(src=None, grammar=NODEBOX, format=None, outputfile=None, iterations=1, buff=None, window=False,
title=None, fullscreen=None, server=False, port=7777, show_vars=False, vars=None, namespace=None):
"""
Create a canvas and a bot with the same canvas attached to it
bot parameters
:param grammar: DRAWBOT or NODEBOX - decides what kind of bot is created
:param vars: preset dictionary of vars from the called
canvas parameters:
... everything else ...
See create_canvas for details on those parameters.
"""
canvas = create_canvas(src, format, outputfile, iterations > 1, buff, window, title, fullscreen=fullscreen,
show_vars=show_vars)
if grammar == DRAWBOT:
from shoebot.grammar import DrawBot
bot = DrawBot(canvas, namespace=namespace, vars=vars)
else:
from shoebot.grammar import NodeBot
bot = NodeBot(canvas, namespace=namespace, vars=vars)
if server:
from shoebot.sbio import SocketServer
socket_server = SocketServer(bot, "", port=port)
return bot |
def run(src,
grammar=NODEBOX,
format=None,
outputfile=None,
iterations=1,
buff=None,
window=True,
title=None,
fullscreen=None,
close_window=False,
server=False,
port=7777,
show_vars=False,
vars=None,
namespace=None,
run_shell=False,
args=[],
verbose=False,
background_thread=True):
"""
Create and run a bot, the arguments all correspond to sanitized
commandline options.
:param background_thread: If True then use a background thread.
Other args are split into create_args and run_args
See create_bot for details on create_args
run_args are passed to bot.run - see Nodebot.run or Drawbot.run
Background thread:
readline in python is blocking, running the app in a background
thread opens up the main thread for IO on stdin/stdout, which
can be used for communication with shoebot when livecoding is
enabled.
See shoebot.io for implementation of the shell, and the gedit
plugin for an example of using livecoding.
"""
# Munge shoebogt sys.argv
sys.argv = [sys.argv[
0]] + args # Remove shoebot parameters so sbot can be used in place of the python interpreter (e.g. for sphinx).
# arguments for create_bot
create_args = [src,
grammar,
format,
outputfile,
iterations,
buff,
window,
title,
fullscreen,
server,
port,
show_vars]
create_kwargs = dict(vars=vars, namespace=namespace)
run_args = [src]
run_kwargs = dict(
iterations=iterations,
frame_limiter=window,
verbose=verbose,
# run forever except 1. windowed mode is off 2. if --close-window was specified and
# 3. if an output file was indicated
run_forever=window and not (close_window or bool(outputfile)),
)
# Run shoebot in a background thread so we can run a cmdline shell in the current thread
if background_thread:
sbot_thread = ShoebotThread(
create_args=create_args,
create_kwargs=create_kwargs,
run_args=run_args,
run_kwargs=run_kwargs,
send_sigint=run_shell
)
sbot_thread.start()
sbot = sbot_thread.sbot
else:
print('background thread disabled')
# This is a debug option, things should always work using the
# background thread (crosses fingers)
if run_shell:
# python readline is blocking, so ui must run in a seperate
# thread
raise ValueError('UI Must run in a separate thread to shell and shell needs main thread')
sbot_thread = None
sbot = create_bot(*create_args, **create_kwargs)
sbot.run(*run_args, **run_kwargs)
if run_shell:
import shoebot.sbio.shell
shell = shoebot.sbio.shell.ShoebotCmd(sbot, trusted=True)
try:
shell.cmdloop()
except KeyboardInterrupt as e:
publish_event(QUIT_EVENT) # Handle Ctrl-C
# KeyboardInterrupt is generated by os.kill from the other thread
if verbose:
raise
else:
return
elif background_thread:
try:
while sbot_thread.is_alive():
sleep(1)
except KeyboardInterrupt:
publish_event(QUIT_EVENT)
if all((background_thread, sbot_thread)):
sbot_thread.join()
return sbot |
def save_as(self):
"""
Return True if the buffer was saved
"""
chooser = ShoebotFileChooserDialog(_('Save File'), None, Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
chooser.set_do_overwrite_confirmation(True)
chooser.set_transient_for(self)
saved = chooser.run() == Gtk.ResponseType.ACCEPT
if saved:
old_filename = self.filename
self.source_buffer.filename = chooser.get_filename()
if not self.save():
self.filename = old_filename
chooser.destroy()
return saved |
def add_variables(self):
"""
Add all widgets to specified vbox
:param container:
:return:
"""
for k, v in self.bot._vars.items():
if not hasattr(v, 'type'):
raise AttributeError(
'%s is not a Shoebot Variable - see https://shoebot.readthedocs.io/en/latest/commands.html#dynamic-variables' % k)
self.add_variable(v) |
def update_var(self, name, value):
"""
:return: success, err_msg_if_failed
"""
widget = self.widgets.get(name)
if widget is None:
return False, 'No widget found matching, {}'.format(name)
try:
if isinstance(widget, Gtk.CheckButton):
widget.set_active(value)
return True, widget.get_active()
elif isinstance(widget, Gtk.Entry):
widget.set_text(value)
return True, widget.get_text()
else:
widget.set_value(value)
return True, widget.get_value()
except Exception as e:
return False, str(e) |
def widget_changed(self, widget, v):
''' Called when a slider is adjusted. '''
# set the appropriate bot var
if v.type is NUMBER:
self.bot._namespace[v.name] = widget.get_value()
self.bot._vars[v.name].value = widget.get_value() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) # pretty dumb for now
elif v.type is BOOLEAN:
self.bot._namespace[v.name] = widget.get_active()
self.bot._vars[v.name].value = widget.get_active() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) # pretty dumb for now
elif v.type is TEXT:
self.bot._namespace[v.name] = widget.get_text()
self.bot._vars[v.name].value = widget.get_text() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) |
def var_added(self, v):
"""
var was added in the bot while it ran, possibly
by livecoding
:param v:
:return:
"""
self.add_variable(v)
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all() |
def var_deleted(self, v):
"""
var was added in the bot
:param v:
:return:
"""
widget = self.widgets[v.name]
# widgets are all in a single container ..
parent = widget.get_parent()
self.container.remove(parent)
del self.widgets[v.name]
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all() |
def parse(svg, cached=False, _copy=True):
""" Returns cached copies unless otherwise specified.
"""
if not cached:
dom = parser.parseString(svg)
paths = parse_node(dom, [])
else:
id = _cache.id(svg)
if not _cache.has_key(id):
dom = parser.parseString(svg)
_cache.save(id, parse_node(dom, []))
paths = _cache.load(id, _copy)
return paths |
def get_attribute(element, attribute, default=0):
""" Returns XML element's attribute, or default if none.
"""
a = element.getAttribute(attribute)
if a == "":
return default
return a |
def parse_node(node, paths=[], ignore=["pattern"]):
""" Recurse the node tree and find drawable tags.
Recures all the children in the node.
If a child is something we can draw,
a line, rect, oval or path,
parse it to a PathElement drawable with drawpath()
"""
# Ignore paths in Illustrator pattern swatches etc.
if node.nodeType == node.ELEMENT_NODE and node.tagName in ignore:
return []
if node.hasChildNodes():
for child in node.childNodes:
paths = parse_node(child, paths)
if node.nodeType == node.ELEMENT_NODE:
if node.tagName == "line":
paths.append(parse_line(node))
elif node.tagName == "rect":
paths.append(parse_rect(node))
elif node.tagName == "circle":
paths.append(parse_circle(node))
elif node.tagName == "ellipse":
paths.append(parse_oval(node))
elif node.tagName == "polygon":
paths.append(parse_polygon(node))
elif node.tagName == "polyline":
paths.append(parse_polygon(node))
elif node.tagName == "path":
paths.append(parse_path(node))
if node.tagName in ("line", "rect", "circle", "ellipse", "polygon", "polyline", "path"):
paths[-1] = parse_transform(node, paths[-1])
paths[-1] = add_color_info(node, paths[-1])
return paths |
def parse_transform(e, path):
""" Transform the path according to a defined matrix.
Attempts to extract a transform="matrix()|translate()" attribute.
Transforms the path accordingly.
"""
t = get_attribute(e, "transform", default="")
for mode in ("matrix", "translate"):
if t.startswith(mode):
v = t.replace(mode, "").lstrip("(").rstrip(")")
v = v.replace(", ", ",").replace(" ", ",")
v = [float(x) for x in v.split(",")]
from nodebox.graphics import Transform
t = Transform()
if mode == "matrix":
t._set_matrix(v)
elif mode == "translate":
t.translate(*v)
path = t.transformBezierPath(path)
break
# Transformations can also be defined as <g transform="matrix()"><path /><g>
# instead of <g><path transform="matrix() /></g>.
e = e.parentNode
if e and e.tagName == "g":
path = parse_transform(e, path)
return path |
def add_color_info(e, path):
""" Expand the path with color information.
Attempts to extract fill and stroke colors
from the element and adds it to path attributes.
"""
_ctx.colormode(RGB, 1.0)
def _color(hex, alpha=1.0):
if hex == "none": return None
n = int(hex[1:],16)
r = (n>>16)&0xff
g = (n>>8)&0xff
b = n&0xff
return _ctx.color(r/255.0, g/255.0, b/255.0, alpha)
path.fill = (0,0,0,0)
path.stroke = (0,0,0,0)
path.strokewidth = 0
# See if we can find an opacity attribute,
# which is the color's alpha.
alpha = get_attribute(e, "opacity", default="")
if alpha == "":
alpha = 1.0
else:
alpha = float(alpha)
# Colors stored as fill="" or stroke="" attributes.
try: path.fill = _color(get_attribute(e, "fill", default="#00000"), alpha)
except:
pass
try: path.stroke = _color(get_attribute(e, "stroke", default="none"), alpha)
except:
pass
try: path.strokewidth = float(get_attribute(e, "stroke-width", default="1"))
except:
pass
# Colors stored as a CSS style attribute, for example:
# style="fill:#ff6600;stroke:#ffe600;stroke-width:0.06742057"
style = get_attribute(e, "style", default="").split(";")
for s in style:
try:
if s.startswith("fill:"):
path.fill = _color(s.replace("fill:", ""))
elif s.startswith("stroke:"):
path.stroke = _color(s.replace("stroke:", ""))
elif s.startswith("stroke-width:"):
path.strokewidth = float(s.replace("stroke-width:", ""))
except:
pass
# A path with beginning and ending coordinate
# at the same location is considered closed.
# Unless it contains a MOVETO somewhere in the middle.
path.closed = False
if path[0].x == path[len(path)-1].x and \
path[0].y == path[len(path)-1].y:
path.closed = True
for i in range(1,-1):
if path[i].cmd == MOVETO:
path.closed = False
return path |
def download(self, size=SIZE_LARGE, thumbnail=False, wait=60, asynchronous=False):
""" Downloads this image to cache.
Calling the download() method instantiates an asynchronous URLAccumulator.
Once it is done downloading, this image will have its path property
set to an image file in the cache.
"""
if thumbnail == True: size = SIZE_THUMBNAIL # backwards compatibility
self._size = disambiguate_size(size)
if self._size == SIZE_THUMBNAIL:
url = self.url.replace("/preview/", "/med/")
else:
url = self.url
cache = "morguefile"
extension = os.path.splitext(url)[1]
URLAccumulator.__init__(self, url, wait, asynchronous, cache, extension, 2)
if not asynchronous:
return self.path |
def copy(self, graph):
""" Returns a copy of the event handler, remembering the last node clicked.
"""
e = events(graph, self._ctx)
e.clicked = self.clicked
return e |
def update(self):
""" Interacts with the graph by clicking or dragging nodes.
Hovering a node fires the callback function events.hover().
Clicking a node fires the callback function events.click().
"""
if self.mousedown:
# When not pressing or dragging, check each node.
if not self.pressed and not self.dragged:
for n in self.graph.nodes:
if self.mouse in n:
self.pressed = n
break
# If a node is pressed, check if a drag is started.
elif self.pressed and not self.mouse in self.pressed:
self.dragged = self.pressed
self.pressed = None
# Drag the node (right now only for springgraphs).
elif self.dragged and self.graph.layout.type == "spring":
self.drag(self.dragged)
self.graph.layout.i = min(100, max(2, self.graph.layout.n-100))
# Mouse is clicked on a node, fire callback.
elif self.pressed and self.mouse in self.pressed:
self.clicked = self.pressed
self.pressed = None
self.graph.layout.i = 2
self.click(self.clicked)
# Mouse up.
else:
self.hovered = None
self.pressed = None
self.dragged = None
# Hovering over a node?
for n in self.graph.nodes:
if self.mouse in n:
self.hovered = n
self.hover(n)
break |
def drag(self, node):
""" Drags given node to mouse location.
"""
dx = self.mouse.x - self.graph.x
dy = self.mouse.y - self.graph.y
# A dashed line indicates the drag vector.
s = self.graph.styles.default
self._ctx.nofill()
self._ctx.nostroke()
if s.stroke:
self._ctx.strokewidth(s.strokewidth)
self._ctx.stroke(
s.stroke.r,
s.stroke.g,
s.stroke.g,
0.75
)
p = self._ctx.line(node.x, node.y, dx, dy, draw=False)
try: p._nsBezierPath.setLineDash_count_phase_([2,4], 2, 50)
except:
pass
self._ctx.drawpath(p)
r = node.__class__(None).r * 0.75
self._ctx.oval(dx-r/2, dy-r/2, r, r)
node.vx = dx / self.graph.d
node.vy = dy / self.graph.d |
def hover(self, node):
""" Displays a popup when hovering over a node.
"""
if self.popup == False: return
if self.popup == True or self.popup.node != node:
if self.popup_text.has_key(node.id):
texts = self.popup_text[node.id]
else:
texts = None
self.popup = popup(self._ctx, node, texts)
self.popup.draw() |
def textpath(self, i):
""" Returns a cached textpath of the given text in queue.
"""
if len(self._textpaths) == i:
self._ctx.font(self.font, self.fontsize)
txt = self.q[i]
if len(self.q) > 1:
# Indicate current text (e.g. 5/13).
txt += " ("+str(i+1)+"/" + str(len(self.q))+")"
p = self._ctx.textpath(txt, 0, 0, width=self._w)
h = self._ctx.textheight(txt, width=self._w)
self._textpaths.append((p, h))
return self._textpaths[i] |
def update(self):
""" Rotates the queued texts and determines display time.
"""
if self.delay > 0:
# It takes a while for the popup to appear.
self.delay -= 1; return
if self.fi == 0:
# Only one text in queue, displayed infinitely.
if len(self.q) == 1:
self.fn = float("inf")
# Else, display time depends on text length.
else:
self.fn = len(self.q[self.i]) / self.speed
self.fn = max(self.fn, self.mf)
self.fi += 1
if self.fi > self.fn:
# Rotate to the next text in queue.
self.fi = 0
self.i = (self.i+1) % len(self.q) |
def draw(self):
""" Draws a popup rectangle with a rotating text queue.
"""
if len(self.q) > 0:
self.update()
if self.delay == 0:
# Rounded rectangle in the given background color.
p, h = self.textpath(self.i)
f = self.fontsize
self._ctx.fill(self.background)
self._ctx.rect(
self.node.x + f*1.0,
self.node.y + f*0.5,
self._w + f,
h + f*1.5,
roundness=0.2
)
# Fade in/out the current text.
alpha = 1.0
if self.fi < 5:
alpha = 0.2 * self.fi
if self.fn-self.fi < 5:
alpha = 0.2 * (self.fn-self.fi)
self._ctx.fill(
self.text.r,
self.text.g,
self.text.b,
self.text.a * alpha
)
self._ctx.translate(self.node.x + f*2.0, self.node.y + f*2.5)
self._ctx.drawpath(p) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.