Search is not available for this dataset
text
stringlengths
75
104k
def nearest_hue(self, primary=False): """ Returns the name of the nearest named hue. For example, if you supply an indigo color (a color between blue and violet), the return value is "violet". If primary is set to True, the return value is "purple". Primary colors leave out the fuzzy lime, teal, cyan, azure and violet hues. """ if self.is_black: return "black" elif self.is_white: return "white" elif self.is_grey: return "grey" if primary: hues = primary_hues else: hues = named_hues.keys() nearest, d = "", 1.0 for hue in hues: if abs(self.hue - named_hues[hue]) % 1 < d: nearest, d = hue, abs(self.hue - named_hues[hue]) % 1 return nearest
def blend(self, clr, factor=0.5): """ Returns a mix of two colors. """ r = self.r * (1 - factor) + clr.r * factor g = self.g * (1 - factor) + clr.g * factor b = self.b * (1 - factor) + clr.b * factor a = self.a * (1 - factor) + clr.a * factor return Color(r, g, b, a, mode="rgb")
def distance(self, clr): """ Returns the Euclidean distance between two colors (0.0-1.0). Consider colors arranged on the color wheel: - hue is the angle of a color along the center - saturation is the distance of a color from the center - brightness is the elevation of a color from the center (i.e. we're on color a sphere) """ coord = lambda a, d: (cos(radians(a)) * d, sin(radians(a)) * d) x0, y0 = coord(self.h * 360, self.s) x1, y1 = coord(clr.h * 360, clr.s) z0 = self.brightness z1 = clr.brightness d = sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2 + (z1 - z0) ** 2) return d
def swatch(self, x, y, w=35, h=35, roundness=0): """ Rectangle swatch for this color. """ _ctx.fill(self) _ctx.rect(x, y, w, h, roundness)
def image_to_rgb(self, path, n=10): """ Returns a list of colors based on pixel values in the image. The Core Image library must be present to determine pixel colors. F. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05 """ from PIL import Image img = Image.open(path) p = img.getdata() f = lambda p: choice(p) for i in _range(n): rgba = f(p) rgba = _list(rgba) if len(rgba) == 3: rgba.append(255) r, g, b, a = [v / 255.0 for v in rgba] clr = color(r, g, b, a, mode="rgb") self.append(clr)
def context_to_rgb(self, str): """ Returns the colors that have the given word in their context. For example, the word "anger" appears in black, orange and red contexts, so the list will contain those three colors. """ matches = [] for clr in context: tags = context[clr] for tag in tags: if tag.startswith(str) \ or str.startswith(tag): matches.append(clr) break matches = [color(name) for name in matches] return matches
def _context(self): """ Returns the intersection of each color's context. Get the nearest named hue of each color, and finds overlapping tags in each hue's colors. For example, a list containing yellow, deeppink and olive yields: femininity, friendship, happiness, joy. """ tags1 = None for clr in self: overlap = [] if clr.is_black: name = "black" elif clr.is_white: name = "white" elif clr.is_grey: name = "grey" else: name = clr.nearest_hue(primary=True) if name == "orange" and clr.brightness < 0.6: name = "brown" tags2 = context[name] if tags1 is None: tags1 = tags2 else: for tag in tags2: if tag in tags1: if tag not in overlap: overlap.append(tag) tags1 = overlap overlap.sort() return overlap
def copy(self): """ Returns a deep copy of the list. """ return ColorList( [color(clr.r, clr.g, clr.b, clr.a, mode="rgb") for clr in self], name=self.name, tags=self.tags )
def _darkest(self): """ Returns the darkest color from the list. Knowing the contrast between a light and a dark swatch can help us decide how to display readable typography. """ min, n = (1.0, 1.0, 1.0), 3.0 for clr in self: if clr.r + clr.g + clr.b < n: min, n = clr, clr.r + clr.g + clr.b return min
def _average(self): """ Returns one average color for the colors in the list. """ r, g, b, a = 0, 0, 0, 0 for clr in self: r += clr.r g += clr.g b += clr.b a += clr.alpha r /= len(self) g /= len(self) b /= len(self) a /= len(self) return color(r, g, b, a, mode="rgb")
def sort_by_distance(self, reversed=False): """ Returns a list with the smallest distance between two neighboring colors. The algorithm has a factorial complexity so it may run slow. """ if len(self) == 0: return ColorList() # Find the darkest color in the list. root = self[0] for clr in self[1:]: if clr.brightness < root.brightness: root = clr # Remove the darkest color from the stack, # put it in the sorted list as starting element. stack = [clr for clr in self] stack.remove(root) sorted = [root] # Now find the color in the stack closest to that color. # Take this color from the stack and add it to the sorted list. # Now find the color closest to that color, etc. while len(stack) > 1: closest, distance = stack[0], stack[0].distance(sorted[-1]) for clr in stack[1:]: d = clr.distance(sorted[-1]) if d < distance: closest, distance = clr, d stack.remove(closest) sorted.append(closest) sorted.append(stack[0]) if reversed: _list.reverse(sorted) return ColorList(sorted)
def _sorted_copy(self, comparison, reversed=False): """ Returns a sorted copy with the colors arranged according to the given comparison. """ sorted = self.copy() _list.sort(sorted, comparison) if reversed: _list.reverse(sorted) return sorted
def cluster_sort(self, cmp1="hue", cmp2="brightness", reversed=False, n=12): """ Sorts the list by cmp1, then cuts it into n pieces which are sorted by cmp2. If you want to cluster by hue, use n=12 (since there are 12 primary/secondary hues). The resulting list will not contain n even slices: n is used rather to slice up the cmp1 property of the colors, e.g. cmp1=brightness and n=3 will cluster colors by brightness >= 0.66, 0.33, 0.0 """ sorted = self.sort(cmp1) clusters = ColorList() d = 1.0 i = 0 for j in _range(len(sorted)): if getattr(sorted[j], cmp1) < d: clusters.extend(sorted[i:j].sort(cmp2)) d -= 1.0 / n i = j clusters.extend(sorted[i:].sort(cmp2)) if reversed: _list.reverse(clusters) return clusters
def reverse(self): """ Returns a reversed copy of the list. """ colors = ColorList.copy(self) _list.reverse(colors) return colors
def repeat(self, n=2, oscillate=False, callback=None): """ Returns a list that is a repetition of the given list. When oscillate is True, moves from the end back to the beginning, and then from the beginning to the end, and so on. """ colorlist = ColorList() colors = ColorList.copy(self) for i in _range(n): colorlist.extend(colors) if oscillate: colors = colors.reverse() if callback: colors = callback(colors) return colorlist
def swatch(self, x, y, w=35, h=35, padding=0, roundness=0): """ Rectangle swatches for all the colors in the list. """ for clr in self: clr.swatch(x, y, w, h, roundness) y += h + padding
def swarm(self, x, y, r=100): """ Fancy random ovals for all the colors in the list. """ sc = _ctx.stroke(0, 0, 0, 0) sw = _ctx.strokewidth(0) _ctx.push() _ctx.transform(_ctx.CORNER) _ctx.translate(x, y) for i in _range(r * 3): clr = choice(self).copy() clr.alpha -= 0.5 * random() _ctx.fill(clr) clr = choice(self) _ctx.stroke(clr) _ctx.strokewidth(10 * random()) _ctx.rotate(360 * random()) r2 = r * 0.5 * random() _ctx.oval(r * random(), 0, r2, r2) _ctx.pop() _ctx.strokewidth(sw) if sc is None: _ctx.nostroke() else: _ctx.stroke(sc)
def _interpolate(self, colors, n=100): """ Returns intermediary colors for given list of colors. """ gradient = [] for i in _range(n): l = len(colors) - 1 x = int(1.0 * i / n * l) x = min(x + 0, l) y = min(x + 1, l) base = 1.0 * n / l * x d = (i - base) / (1.0 * n / l) r = colors[x].r * (1 - d) + colors[y].r * d g = colors[x].g * (1 - d) + colors[y].g * d b = colors[x].b * (1 - d) + colors[y].b * d a = colors[x].a * (1 - d) + colors[y].a * d gradient.append(color(r, g, b, a, mode="rgb")) gradient.append(colors[-1]) return gradient
def _cache(self): """ Populates the list with a number of gradient colors. The list has Gradient.steps colors that interpolate between the fixed base Gradient.colors. The spread parameter controls the midpoint of the gradient, you can shift it right and left. A separate gradient is calculated for each half and then glued together. """ n = self.steps # Only one color in base list. if len(self._colors) == 1: ColorList.__init__(self, [self._colors[0] for i in _range(n)]) return # Expand the base list so we can chop more accurately. colors = self._interpolate(self._colors, 40) # Chop into left half and right half. # Make sure their ending and beginning match colors. left = colors[:len(colors) / 2] right = colors[len(colors) / 2:] left.append(right[0]) right.insert(0, left[-1]) # Calculate left and right gradient proportionally to spread. gradient = self._interpolate(left, int(n * self.spread))[:-1] gradient.extend( self._interpolate(right, n - int(n * self.spread))[1:] ) if self.spread > 1: gradient = gradient[:n] if self.spread < 0: gradient = gradient[-n:] ColorList.__init__(self, gradient)
def copy(self, clr=None, d=0.0): """ Returns a copy of the range. Optionally, supply a color to get a range copy limited to the hue of that color. """ cr = ColorRange() cr.name = self.name cr.h = deepcopy(self.h) cr.s = deepcopy(self.s) cr.b = deepcopy(self.b) cr.a = deepcopy(self.a) cr.grayscale = self.grayscale if not self.grayscale: cr.black = self.black.copy() cr.white = self.white.copy() if clr != None: cr.h, cr.a = clr.h + d * (random() * 2 - 1), clr.a return cr
def color(self, clr=None, d=0.035): """ Returns a color with random values in the defined h, s b, a ranges. If a color is given, use that color's hue and alpha, and generate its saturation and brightness from the shade. The hue is varied with the given d. In this way you could have a "warm" color range that returns all kinds of warm colors. When a red color is given as parameter it would generate all kinds of warm red colors. """ # Revert to grayscale for black, white and grey hues. if clr != None and not isinstance(clr, Color): clr = color(clr) if clr != None and not self.grayscale: if clr.is_black: return self.black.color(clr, d) if clr.is_white: return self.white.color(clr, d) if clr.is_grey: return choice( (self.black.color(clr, d), self.white.color(clr, d)) ) h, s, b, a = self.h, self.s, self.b, self.a if clr != None: h, a = clr.h + d * (random() * 2 - 1), clr.a hsba = [] for v in [h, s, b, a]: if isinstance(v, _list): min, max = choice(v) elif isinstance(v, tuple): min, max = v else: min, max = v, v hsba.append(min + (max - min) * random()) h, s, b, a = hsba return color(h, s, b, a, mode="hsb")
def contains(self, clr): """ Returns True if the given color is part of this color range. Check whether each h, s, b, a component of the color falls within the defined range for that component. If the given color is grayscale, checks against the definitions for black and white. """ if not isinstance(clr, Color): return False if not isinstance(clr, _list): clr = [clr] for clr in clr: if clr.is_grey and not self.grayscale: return (self.black.contains(clr) or \ self.white.contains(clr)) for r, v in [(self.h, clr.h), (self.s, clr.s), (self.b, clr.brightness), (self.a, clr.a)]: if isinstance(r, _list): pass elif isinstance(r, tuple): r = [r] else: r = [(r, r)] for min, max in r: if not (min <= v <= max): return False return True
def _weight_by_hue(self): """ Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined. """ grouped = {} weights = [] for clr, rng, weight in self.ranges: h = clr.nearest_hue(primary=False) if grouped.has_key(h): ranges, total_weight = grouped[h] ranges.append((clr, rng, weight)) total_weight += weight grouped[h] = (ranges, total_weight) else: grouped[h] = ([(clr, rng, weight)], weight) # Calculate the normalized (0.0-1.0) weight for each hue, # and transform the dictionary to a list. s = 1.0 * sum([w for r, w in grouped.values()]) grouped = [(grouped[h][1], grouped[h][1] / s, h, grouped[h][0]) for h in grouped] grouped.sort() grouped.reverse() return grouped
def _xml(self): """ Returns the color information as XML. The XML has the following structure: <colors query=""> <color name="" weight="" /> <rgb r="" g="" b="" /> <shade name="" weight="" /> </color> </colors> Notice that ranges are stored by name and retrieved in the _load() method with the shade() command - and are thus expected to be shades (e.g. intense, warm, ...) unless the shade() command would return any custom ranges as well. This can be done by appending custom ranges to the shades list. """ grouped = self._weight_by_hue() xml = "<colors query=\"" + self.name + "\" tags=\"" + ", ".join(self.tags) + "\">\n\n" for total_weight, normalized_weight, hue, ranges in grouped: if hue == self.blue: hue = "blue" clr = color(hue) xml += "\t<color name=\"" + clr.name + "\" weight=\"" + str(normalized_weight) + "\">\n " xml += "\t\t<rgb r=\"" + str(clr.r) + "\" g=\"" + str(clr.g) + "\" " xml += "b=\"" + str(clr.b) + "\" a=\"" + str(clr.a) + "\" />\n " for clr, rng, wgt in ranges: xml += "\t\t<shade name=\"" + str(rng) + "\" weight=\"" + str(wgt / total_weight) + "\" />\n " xml = xml.rstrip(" ") + "\t</color>\n\n" xml += "</colors>" return xml
def _save(self): """ Saves the color information in the cache as XML. """ if not os.path.exists(self.cache): os.makedirs(self.cache) path = os.path.join(self.cache, self.name + ".xml") f = open(path, "w") f.write(self.xml) f.close()
def _load(self, top=5, blue="blue", archive=None, member=None): """ Loads a theme from aggregated web data. The data must be old-style Prism XML: <color>s consisting of <shade>s. Colors named "blue" will be overridden with the blue parameter. archive can be a file like object (e.g. a ZipFile) and will be used along with 'member' if specified. """ if archive is None: path = os.path.join(self.cache, self.name + ".xml") xml = open(path).read() else: assert member is not None xml = archive.read(member) dom = parseString(xml).documentElement attr = lambda e, a: e.attributes[a].value for e in dom.getElementsByTagName("color")[:top]: w = float(attr(e, "weight")) try: rgb = e.getElementsByTagName("rgb")[0] clr = color( float(attr(rgb, "r")), float(attr(rgb, "g")), float(attr(rgb, "b")), float(attr(rgb, "a")), mode="rgb" ) try: clr.name = attr(e, "name") if clr.name == "blue": clr = color(blue) except: pass except: name = attr(e, "name") if name == "blue": name = blue clr = color(name) for s in e.getElementsByTagName("shade"): self.ranges.append(( clr, shade(attr(s, "name")), w * float(attr(s, "weight")) ))
def color(self, d=0.035): """ Returns a random color within the theme. Fetches a random range (the weight is taken into account, so ranges with a bigger weight have a higher chance of propagating) and hues it with the associated color. """ s = sum([w for clr, rng, w in self.ranges]) r = random() for clr, rng, weight in self.ranges: if weight / s >= r: break r -= weight / s return rng(clr, d)
def colors(self, n=10, d=0.035): """ Returns a number of random colors from the theme. """ s = sum([w for clr, rng, w in self.ranges]) colors = colorlist() for i in _range(n): r = random() for clr, rng, weight in self.ranges: if weight / s >= r: break r -= weight / s colors.append(rng(clr, d)) return colors
def recombine(self, other, d=0.7): """ Genetic recombination of two themes using cut and splice technique. """ a, b = self, other d1 = max(0, min(d, 1)) d2 = d1 c = ColorTheme( name=a.name[:int(len(a.name) * d1)] + b.name[int(len(b.name) * d2):], ranges=a.ranges[:int(len(a.ranges) * d1)] + b.ranges[int(len(b.ranges) * d2):], top=a.top, cache=os.path.join(DEFAULT_CACHE, "recombined"), blue=a.blue, length=a.length * d1 + b.length * d2 ) c.tags = a.tags[:int(len(a.tags) * d1)] c.tags += b.tags[int(len(b.tags) * d2):] return c
def swatch(self, x, y, w=35, h=35, padding=4, roundness=0, n=12, d=0.035, grouped=None): """ Draws a weighted swatch with approximately n columns and rows. When the grouped parameter is True, colors are grouped in blocks of the same hue (also see the _weight_by_hue() method). """ if grouped is None: # should be True or False grouped = self.group_swatches # If we dont't need to make groups, # just display an individual column for each weight # in the (color, range, weight) tuples. if not grouped: s = sum([wgt for clr, rng, wgt in self.ranges]) for clr, rng, wgt in self.ranges: cols = max(1, int(wgt / s * n)) for i in _range(cols): rng.colors(clr, n=n, d=d).swatch(x, y, w, h, padding=padding, roundness=roundness) x += w + padding return x, y + n * (h + padding) # When grouped, combine hues and display them # in batches of rows, then moving on to the next hue. grouped = self._weight_by_hue() for total_weight, normalized_weight, hue, ranges in grouped: dy = y rc = 0 for clr, rng, weight in ranges: dx = x cols = int(normalized_weight * n) cols = max(1, min(cols, n - len(grouped))) if clr.name == "black": rng = rng.black if clr.name == "white": rng = rng.white for i in _range(cols): rows = int(weight / total_weight * n) rows = max(1, rows) # Each column should add up to n rows, # if not due to rounding errors, add a row at the bottom. if (clr, rng, weight) == ranges[-1] and rc + rows < n: rows += 1 rng.colors(clr, n=rows, d=d).swatch(dx, dy, w, h, padding=padding, roundness=roundness) dx += w + padding dy += (w + padding) * rows # + padding rc = rows x += (w + padding) * cols + padding return x, dy
def fseq(self, client, message): """ fseq messages associate a unique frame id with a set of set and alive messages """ client.last_frame = client.current_frame client.current_frame = message[3]
def objs(self): """ Returns a generator list of tracked objects which are recognized with this profile and are in the current session. """ for obj in self.objects.itervalues(): if obj.sessionid in self.sessions: yield obj
def _append_element(self, render_func, pe): ''' Append a render function and the parameters to pass an equivilent PathElement, or the PathElement itself. ''' self._render_funcs.append(render_func) self._elements.append(pe)
def _get_bounds(self): ''' Return cached bounds of this Grob. If bounds are not cached, render to a meta surface, and keep the meta surface and bounds cached. ''' if self._bounds: return self._bounds record_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (-1, -1, 1, 1)) dummy_ctx = cairo.Context(record_surface) self._traverse(dummy_ctx) self._bounds = dummy_ctx.path_extents() return self._bounds
def contains(self, x, y): ''' Return cached bounds of this Grob. If bounds are not cached, render to a meta surface, and keep the meta surface and bounds cached. ''' if self._bounds: return self._bounds record_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (-1, -1, 1, 1)) dummy_ctx = cairo.Context(record_surface) self._traverse(dummy_ctx) in_fill = dummy_ctx.in_fill(x, y) return in_fill
def _get_center(self): ''' Return cached bounds of this Grob. If bounds are not cached, render to a meta surface, and keep the meta surface and bounds cached. ''' if self._center: return self._center # get the center point (x1, y1, x2, y2) = self._get_bounds() x = (x1 + x2) / 2 y = (y1 + y2) / 2 center = self._center = x, y # TODO Cache function that draws using the RecordingSurface # Save the context or surface (without the bounding box strokecolor) # to optimise drawing return center
def _render_closure(self): '''Use a closure so that draw attributes can be saved''' fillcolor = self.fill strokecolor = self.stroke strokewidth = self.strokewidth def _render(cairo_ctx): ''' At the moment this is based on cairo. TODO: Need to work out how to move the cairo specific bits somewhere else. ''' # Go to initial point (CORNER or CENTER): transform = self._call_transform_mode(self._transform) if fillcolor is None and strokecolor is None: # Fixes _bug_FillStrokeNofillNostroke.bot return cairo_ctx.set_matrix(transform) # Run the path commands on the cairo context: self._traverse(cairo_ctx) # Matrix affects stroke, so we need to reset it: cairo_ctx.set_matrix(cairo.Matrix()) if fillcolor is not None and strokecolor is not None: if strokecolor[3] < 1: # Draw onto intermediate surface so that stroke # does not overlay fill cairo_ctx.push_group() cairo_ctx.set_source_rgba(*fillcolor) cairo_ctx.fill_preserve() e = cairo_ctx.stroke_extents() cairo_ctx.set_source_rgba(*strokecolor) cairo_ctx.set_operator(cairo.OPERATOR_SOURCE) cairo_ctx.set_line_width(strokewidth) cairo_ctx.stroke() cairo_ctx.pop_group_to_source() cairo_ctx.paint() else: # Fast path if no alpha in stroke cairo_ctx.set_source_rgba(*fillcolor) cairo_ctx.fill_preserve() cairo_ctx.set_source_rgba(*strokecolor) cairo_ctx.set_line_width(strokewidth) cairo_ctx.stroke() elif fillcolor is not None: cairo_ctx.set_source_rgba(*fillcolor) cairo_ctx.fill() elif strokecolor is not None: cairo_ctx.set_source_rgba(*strokecolor) cairo_ctx.set_line_width(strokewidth) cairo_ctx.stroke() return _render
def _get_contours(self): """ Returns a list of contours in the path, as BezierPath objects. A contour is a sequence of lines and curves separated from the next contour by a MOVETO. For example, the glyph "o" has two contours: the inner circle and the outer circle. """ # Originally from nodebox-gl contours = [] current_contour = None empty = True for i, el in enumerate(self._get_elements()): if el.cmd == MOVETO: if not empty: contours.append(current_contour) current_contour = BezierPath(self._bot) current_contour.moveto(el.x, el.y) empty = True elif el.cmd == LINETO: empty = False current_contour.lineto(el.x, el.y) elif el.cmd == CURVETO: empty = False current_contour.curveto(el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y) elif el.cmd == CLOSE: current_contour.closepath() if not empty: contours.append(current_contour) return contours
def _locate(self, t, segments=None): """ Locates t on a specific segment in the path. Returns (index, t, PathElement) A path is a combination of lines and curves (segments). The returned index indicates the start of the segment that contains point t. The returned t is the absolute time on that segment, in contrast to the relative t on the whole of the path. The returned point is the last MOVETO, any subsequent CLOSETO after i closes to that point. When you supply the list of segment lengths yourself, as returned from length(path, segmented=True), point() works about thirty times faster in a for-loop since it doesn't need to recalculate the length during each iteration. """ # Originally from nodebox-gl if segments is None: segments = self._segment_lengths(relative=True) if len(segments) == 0: raise PathError, "The given path is empty" for i, el in enumerate(self._get_elements()): if i == 0 or el.cmd == MOVETO: closeto = Point(el.x, el.y) if t <= segments[i] or i == len(segments) - 1: break else: t -= segments[i] try: t /= segments[i] except ZeroDivisionError: pass if i == len(segments) - 1 and segments[i] == 0: i -= 1 return (i, t, closeto)
def point(self, t, segments=None): """ Returns the PathElement at time t (0.0-1.0) on the path. Returns coordinates for point at t on the path. Gets the length of the path, based on the length of each curve and line in the path. Determines in what segment t falls. Gets the point on that segment. When you supply the list of segment lengths yourself, as returned from length(path, segmented=True), point() works about thirty times faster in a for-loop since it doesn't need to recalculate the length during each iteration. """ # Originally from nodebox-gl if len(self._elements) == 0: raise PathError("The given path is empty") if self._segments is None: self._segments = self._get_length(segmented=True, precision=10) i, t, closeto = self._locate(t, segments=self._segments) x0, y0 = self[i].x, self[i].y p1 = self[i + 1] if p1.cmd == CLOSE: x, y = self._linepoint(t, x0, y0, closeto.x, closeto.y) return PathElement(LINETO, x, y) elif p1.cmd in (LINETO, MOVETO): x1, y1 = p1.x, p1.y x, y = self._linepoint(t, x0, y0, x1, y1) return PathElement(LINETO, x, y) elif p1.cmd == CURVETO: # Note: the handles need to be interpreted differenty than in a BezierPath. # In a BezierPath, ctrl1 is how the curve started, and ctrl2 how it arrives in this point. # Here, ctrl1 is how the curve arrives, and ctrl2 how it continues to the next point. x3, y3, x1, y1, x2, y2 = p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y x, y, c1x, c1y, c2x, c2y = self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3) return PathElement(CURVETO, c1x, c1y, c2x, c2y, x, y) else: raise PathError("Unknown cmd '%s' for p1 %s" % (p1.cmd, p1))
def points(self, amount=100, start=0.0, end=1.0, segments=None): """ Returns an iterator with a list of calculated points for the path. To omit the last point on closed paths: end=1-1.0/amount """ # Originally from nodebox-gl if len(self._elements) == 0: raise PathError("The given path is empty") n = end - start d = n if amount > 1: # The delta value is divided by amount-1, because we also want the last point (t=1.0) # If we don't use amount-1, we fall one point short of the end. # If amount=4, we want the point at t 0.0, 0.33, 0.66 and 1.0. # If amount=2, we want the point at t 0.0 and 1.0. d = float(n) / (amount - 1) for i in xrange(int(amount)): yield self.point(start + d * i, segments)
def _linepoint(self, t, x0, y0, x1, y1): """ Returns coordinates for point at t on the line. Calculates the coordinates of x and y for a point at t on a straight line. The t parameter is a number between 0.0 and 1.0, x0 and y0 define the starting point of the line, x1 and y1 the ending point of the line. """ # Originally from nodebox-gl out_x = x0 + t * (x1 - x0) out_y = y0 + t * (y1 - y0) return (out_x, out_y)
def _linelength(self, x0, y0, x1, y1): """ Returns the length of the line. """ # Originally from nodebox-gl a = pow(abs(x0 - x1), 2) b = pow(abs(y0 - y1), 2) return sqrt(a + b)
def _curvepoint(self, t, x0, y0, x1, y1, x2, y2, x3, y3, handles=False): """ Returns coordinates for point at t on the spline. Calculates the coordinates of x and y for a point at t on the cubic bezier spline, and its control points, based on the de Casteljau interpolation algorithm. The t parameter is a number between 0.0 and 1.0, x0 and y0 define the starting point of the spline, x1 and y1 its control point, x3 and y3 the ending point of the spline, x2 and y2 its control point. If the handles parameter is set, returns not only the point at t, but the modified control points of p0 and p3 should this point split the path as well. """ # Originally from nodebox-gl mint = 1 - t x01 = x0 * mint + x1 * t y01 = y0 * mint + y1 * t x12 = x1 * mint + x2 * t y12 = y1 * mint + y2 * t x23 = x2 * mint + x3 * t y23 = y2 * mint + y3 * t out_c1x = x01 * mint + x12 * t out_c1y = y01 * mint + y12 * t out_c2x = x12 * mint + x23 * t out_c2y = y12 * mint + y23 * t out_x = out_c1x * mint + out_c2x * t out_y = out_c1y * mint + out_c2y * t if not handles: return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y) else: return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y, x01, y01, x23, y23)
def _curvelength(self, x0, y0, x1, y1, x2, y2, x3, y3, n=20): """ Returns the length of the spline. Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3, by adding the lengths of lineair lines between points at t. The number of points is defined by n (n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on). The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01. """ # Originally from nodebox-gl length = 0 xi = x0 yi = y0 for i in range(n): t = 1.0 * (i + 1) / n pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y = \ self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3) c = sqrt(pow(abs(xi - pt_x), 2) + pow(abs(yi - pt_y), 2)) length += c xi = pt_x yi = pt_y return length
def _segment_lengths(self, relative=False, n=20): """ Returns a list with the lengths of each segment in the path. """ # From nodebox_gl lengths = [] first = True for el in self._get_elements(): if first is True: close_x, close_y = el.x, el.y first = False elif el.cmd == MOVETO: close_x, close_y = el.x, el.y lengths.append(0.0) elif el.cmd == CLOSE: lengths.append(self._linelength(x0, y0, close_x, close_y)) elif el.cmd == LINETO: lengths.append(self._linelength(x0, y0, el.x, el.y)) elif el.cmd == CURVETO: x3, y3, x1, y1, x2, y2 = el.x, el.y, el.c1x, el.c1y, el.c2x, el.c2y # (el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y) lengths.append(self._curvelength(x0, y0, x1, y1, x2, y2, x3, y3, n)) if el.cmd != CLOSE: x0 = el.x y0 = el.y if relative: length = sum(lengths) try: # Relative segment lengths' sum is 1.0. return map(lambda l: l / length, lengths) except ZeroDivisionError: # If the length is zero, just return zero for all segments return [0.0] * len(lengths) else: return lengths
def _get_length(self, segmented=False, precision=10): """ Returns the length of the path. Calculates the length of each spline in the path, using n as a number of points to measure. When segmented is True, returns a list containing the individual length of each spline as values between 0.0 and 1.0, defining the relative length of each spline in relation to the total path length. """ # Originally from nodebox-gl if not segmented: return sum(self._segment_lengths(n=precision), 0.0) else: return self._segment_lengths(relative=True, n=precision)
def _get_elements(self): ''' Yields all elements as PathElements ''' for index, el in enumerate(self._elements): if isinstance(el, tuple): el = PathElement(*el) self._elements[index] = el yield el
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True): """ Simple, multi-purpose depth-first search. Visits all the nodes connected to the root, depth-first. The visit function is called on each node. Recursion will stop if it returns True, and ubsequently dfs() will return True. The traversable function takes the current node and edge, and returns True if we are allowed to follow this connection to the next node. For example, the traversable for directed edges is follows: lambda node, edge: node == edge.node1 Note: node._visited is expected to be False for all nodes. """ stop = visit(root) root._visited = True for node in root.links: if stop: return True if not traversable(root, root.links.edge(node)): continue if not node._visited: stop = depth_first_search(node, visit, traversable) return stop
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None): """ An edge weight map indexed by node id's. A dictionary indexed by node id1's in which each value is a dictionary of connected node id2's linking to the edge weight. If directed, edges go from id1 to id2, but not the other way. If stochastic, all the weights for the neighbors of a given node sum to 1. A heuristic can be a function that takes two node id's and returns and additional cost for movement between the two nodes. """ v = {} for n in graph.nodes: v[n.id] = {} for e in graph.edges: id1 = e.node1.id id2 = e.node2.id if reversed: id1, id2 = id2, id1 #if not v.has_key(id1): v[id1] = {} #if not v.has_key(id2): v[id2] = {} v[id1][id2] = 1.0 - e.weight*0.5 if heuristic: v[id1][id2] += heuristic(id1, id2) if not directed: v[id2][id1] = v[id1][id2] if stochastic: for id1 in v: d = sum(v[id1].values()) for id2 in v[id1]: v[id1][id2] /= d return v
def brandes_betweenness_centrality(graph, normalized=True): """ Betweenness centrality for nodes in the graph. Betweenness centrality is a measure of the number of shortests paths that pass through a node. Nodes in high-density areas will get a good score. The algorithm is Brandes' betweenness centrality, from NetworkX 0.35.1: Aric Hagberg, Dan Schult and Pieter Swart, based on Dijkstra's algorithm for shortest paths modified from Eppstein. https://networkx.lanl.gov/wiki """ G = graph.keys() W = adjacency(graph) betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G for s in G: S = [] P = {} for v in G: P[v] = [] sigma = dict.fromkeys(G, 0) # sigma[v]=0 for v in G D = {} sigma[s] = 1 seen = { s: 0 } Q = [] # use Q as heap with (distance, node id) tuples heapq.heappush(Q, (0, s, s)) while Q: (dist, pred, v) = heapq.heappop(Q) if v in D: continue # already searched this node sigma[v] = sigma[v] + sigma[pred] # count paths S.append(v) D[v] = seen[v] for w in graph[v].links: w = w.id vw_dist = D[v] + W[v][w] if w not in D and (w not in seen or vw_dist < seen[w]): seen[w] = vw_dist heapq.heappush(Q, (vw_dist, v, w)) P[w] = [v] elif vw_dist == seen[w]: # handle equal paths sigma[w] = sigma[w] + sigma[v] P[w].append(v) delta = dict.fromkeys(G,0) while S: w = S.pop() for v in P[w]: delta[v] = delta[v] + (float(sigma[v]) / float(sigma[w])) * (1.0 + delta[w]) if w != s: betweenness[w] = betweenness[w] + delta[w] #----------------------------------- if normalized: # Normalize between 0.0 and 1.0. m = max(betweenness.values()) if m == 0: m = 1 else: m = 1 betweenness = dict([(id, w/m) for id, w in betweenness.iteritems()]) return betweenness
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, start=None, iterations=100, tolerance=0.0001): """ Eigenvector centrality for nodes in the graph (like Google's PageRank). Eigenvector centrality is a measure of the importance of a node in a directed network. It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes. Nodes with no incoming connections have a score of zero. If you want to measure outgoing connections, reversed should be False. The eigenvector calculation is done by the power iteration method. It has no guarantee of convergence. A starting vector for the power iteration can be given in the start dict. You can adjust the importance of a node with the rating dictionary, which links node id's to a score. The algorithm is adapted from NetworkX, Aric Hagberg ([email protected]): https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py """ G = graph.keys() W = adjacency (graph, directed=True, reversed=reversed) def _normalize(x): s = sum(x.values()) if s != 0: s = 1.0 / s for k in x: x[k] *= s x = start if x is None: x = dict([(n, random()) for n in G]) _normalize(x) # Power method: y = Ax multiplication. for i in range(iterations): x0 = x x = dict.fromkeys(x0.keys(), 0) for n in x: for nbr in W[n]: r = 1 if rating.has_key(n): r = rating[n] x[n] += 0.01 + x0[nbr] * W[n][nbr] * r _normalize(x) e = sum([abs(x[n]-x0[n]) for n in x]) if e < len(graph.nodes) * tolerance: if normalized: # Normalize between 0.0 and 1.0. m = max(x.values()) if m == 0: m = 1 x = dict([(id, w/m) for id, w in x.iteritems()]) return x #raise NoConvergenceError warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning) return dict([(n, 0) for n in G])
def examples_menu(root_dir=None, depth=0): """ :return: xml for menu, [(bot_action, label), ...], [(menu_action, label), ...] """ # pre 3.12 menus examples_dir = ide_utils.get_example_dir() if not examples_dir: return "", [], [] root_dir = root_dir or examples_dir file_tmpl = '<menuitem name="{name}" action="{action}"/>' dir_tmpl = '<menu name="{name}" action="{action}">{menu}</menu>' file_actions = [] submenu_actions = [] xml = "" for fn in sorted(os.listdir(root_dir)): path = os.path.join(root_dir, fn) rel_path = path[len(examples_dir):] if os.path.isdir(path): action = 'ShoebotExampleMenu {0}'.format(rel_path) label = fn.capitalize() sm_xml, sm_file_actions, sm_menu_actions = examples_menu(os.path.join(root_dir, fn), depth+1) submenu_actions.extend(sm_menu_actions) file_actions.extend(sm_file_actions) submenu_actions.append((action, label)) xml += dir_tmpl.format(name=fn, action=action, menu=sm_xml) elif os.path.splitext(path)[1] in ['.bot', '.py'] and not fn.startswith('_'): action = 'ShoebotExampleOpen {0}'.format(rel_path) label = ide_utils.make_readable_filename(fn) xml += file_tmpl.format(name=fn, action=action) file_actions.append((action, label)) return xml, file_actions, submenu_actions
def mk_examples_menu(text, root_dir=None, depth=0): """ :return: base_item, rel_paths """ # 3.12+ menus examples_dir = ide_utils.get_example_dir() if not examples_dir: return None, [] root_dir = root_dir or examples_dir file_actions = [] menu = Gio.Menu.new() base_item = Gio.MenuItem.new_submenu(text, menu) for fn in sorted(os.listdir(root_dir)): path = os.path.join(root_dir, fn) rel_path = path[len(examples_dir):] if os.path.isdir(path): label = fn.capitalize() item, sm_file_actions = mk_examples_menu(label, os.path.join(root_dir, fn)) menu.append_item(item) file_actions.extend(sm_file_actions) elif os.path.splitext(path)[1] in ['.bot', '.py'] and not fn.startswith('_'): label = ide_utils.make_readable_filename(fn) # the only way I could work out to attach the data to the menu item is in the name :/ action_name = "win.open_example__%s" % encode_relpath(rel_path) menu.append(label, action_name) file_actions.append(rel_path) return base_item, file_actions
def get_child_by_name(parent, name): """ Iterate through a gtk container, `parent`, and return the widget with the name `name`. """ # http://stackoverflow.com/questions/2072976/access-to-widget-in-gtk def iterate_children(widget, name): if widget.get_name() == name: return widget try: for w in widget.get_children(): result = iterate_children(w, name) if result is not None: return result else: continue except AttributeError: pass return iterate_children(parent, name)
def venv_has_script(script): """ :param script: script to look for in bin folder """ def f(venv): path=os.path.join(venv, 'bin', script) if os.path.isfile(path): return True return f
def is_venv(directory, executable='python'): """ :param directory: base directory of python environment """ path=os.path.join(directory, 'bin', executable) return os.path.isfile(path)
def vw_envs(filter=None): """ :return: python environments in ~/.virtualenvs :param filter: if this returns False the venv will be ignored >>> vw_envs(filter=venv_has_script('pip')) """ vw_root=os.path.abspath(os.path.expanduser(os.path.expandvars('~/.virtualenvs'))) venvs=[] for directory in os.listdir(vw_root): venv=os.path.join(vw_root, directory) if os.path.isdir(os.path.join(venv)): if filter and not filter(venv): continue venvs.append(venv) return sorted(venvs)
def sbot_executable(): """ Find shoebot executable """ gsettings=load_gsettings() venv = gsettings.get_string('current-virtualenv') if venv == 'Default': sbot = which('sbot') elif venv == 'System': # find system python env_venv = os.environ.get('VIRTUAL_ENV') if not env_venv: return which('sbot') # First sbot in path that is not in current venv for p in os.environ['PATH'].split(os.path.pathsep): sbot='%s/sbot' % p if not p.startswith(env_venv) and os.path.isfile(sbot): return sbot else: sbot = os.path.join(venv, 'bin/sbot') if not os.path.isfile(sbot): print('Shoebot not found, reverting to System shoebot') sbot = which('sbot') return os.path.realpath(sbot)
def _description(self): """ Returns the meta description in the page. """ meta = self.find("meta", {"name":"description"}) if isinstance(meta, dict) and \ meta.has_key("content"): return meta["content"] else: return u""
def _keywords(self): """ Returns the meta keywords in the page. """ meta = self.find("meta", {"name":"keywords"}) if isinstance(meta, dict) and \ meta.has_key("content"): keywords = [k.strip() for k in meta["content"].split(",")] else: keywords = [] return keywords
def links(self, external=True): """ Retrieves links in the page. Returns a list of URL's. By default, only external URL's are returned. External URL's starts with http:// and point to another domain than the domain the page is on. """ domain = URLParser(self.url).domain links = [] for a in self("a"): for attribute, value in a.attrs: if attribute == "href": if not external \ or (value.startswith("http://") and value.find("http://"+domain) < 0): links.append(value) return links
def sorted(list, cmp=None, reversed=False): """ Returns a sorted copy of the list. """ list = [x for x in list] list.sort(cmp) if reversed: list.reverse() return list
def unique(list): """ Returns a copy of the list without duplicates. """ unique = []; [unique.append(x) for x in list if x not in unique] return unique
def flatten(node, distance=1): """ Recursively lists the node and its links. Distance of 0 will return the given [node]. Distance of 1 will return a list of the node and all its links. Distance of 2 will also include the linked nodes' links, etc. """ # When you pass a graph it returns all the node id's in it. if hasattr(node, "nodes") and hasattr(node, "edges"): return [n.id for n in node.nodes] all = [node] if distance >= 1: for n in node.links: all += n.flatten(distance-1) return unique(all)
def subgraph(graph, id, distance=1): """ Creates the subgraph of the flattened node with given id (or list of id's). Finds all the edges between the nodes that make up the subgraph. """ g = graph.copy(empty=True) if isinstance(id, (FunctionType, LambdaType)): # id can also be a lambda or function that returns True or False # for each node in the graph. We take the id's of nodes that pass. id = [node.id for node in filter(id, graph.nodes)] if not isinstance(id, (list, tuple)): id = [id] for id in id: for n in flatten(graph[id], distance): g.add_node(n.id, n.r, n.style, n.category, n.label, (n==graph.root), n.__dict__) for e in graph.edges: if g.has_key(e.node1.id) and \ g.has_key(e.node2.id): g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__) # Should we look for shortest paths between nodes here? return g
def clique(graph, id): """ Returns the largest possible clique for the node with given id. """ clique = [id] for n in graph.nodes: friend = True for id in clique: if n.id == id or graph.edge(n.id, id) == None: friend = False break if friend: clique.append(n.id) return clique
def cliques(graph, threshold=3): """ Returns all the cliques in the graph of at least the given size. """ cliques = [] for n in graph.nodes: c = clique(graph, n.id) if len(c) >= threshold: c.sort() if c not in cliques: cliques.append(c) return cliques
def partition(graph): """ Splits unconnected subgraphs. For each node in the graph, make a list of its id and all directly connected id's. If one of the nodes in this list intersects with a subgraph, they are all part of that subgraph. Otherwise, this list is part of a new subgraph. Return a list of subgraphs sorted by size (biggest-first). """ g = [] for n in graph.nodes: c = [n.id for n in flatten(n)] f = False for i in range(len(g)): if len(intersection(g[i], c)) > 0: g[i] = union(g[i], c) f = True break if not f: g.append(c) # If 1 is directly connected to 2 and 3, # and 4 is directly connected to 5 and 6, these are separate subgraphs. # If we later find that 7 is directly connected to 3 and 6, # it will be attached to [1, 2, 3] yielding # [1, 2, 3, 6, 7] and [4, 5, 6]. # These two subgraphs are connected and need to be merged. merged = [] for i in range(len(g)): merged.append(g[i]) for j in range(i+1, len(g)): if len(intersection(g[i], g[j])) > 0: merged[-1].extend(g[j]) g[j] = [] g = merged g = [graph.sub(g, distance=0) for g in g] g.sort(lambda a, b: len(b) - len(a)) return g
def render(self, size, frame, drawqueue): ''' Calls implmentation to get a render context, passes it to the drawqueues render function then calls self.rendering_finished ''' r_context = self.create_rcontext(size, frame) drawqueue.render(r_context) self.rendering_finished(size, frame, r_context) return r_context
def batch(vars, oldvars, ns): """ Context manager to only update listeners at the end, in the meantime it doesn't matter what intermediate state the vars are in (they can be added and removed) >>> with VarListener.batch() ... pass """ snapshot_vars = dict(vars) with VarListener.disabled(): yield added_vars = set(oldvars.keys()) - set(snapshot_vars.keys()) deleted_vars = set(snapshot_vars.keys()) - set(oldvars.keys()) existing_vars = set(vars.keys()) - added_vars - deleted_vars for name in existing_vars: old_var = snapshot_vars[name] new_var = vars[name] if old_var.type != new_var.type or old_var.min != new_var.min or old_var.max != new_var.max: deleted_vars.add(name) added_vars.add(name) if old_var.type == new_var.type: new_var.value = old_var.value for listener in VarListener.listeners: for name in deleted_vars: listener.var_deleted(snapshot_vars[name]) if ns.get(name) is snapshot_vars[name].value: del ns[name] for name in added_vars: listener.var_added(vars[name])
def hexDump(bytes): """Useful utility; prints the string in hexadecimal""" for i in range(len(bytes)): sys.stdout.write("%2x " % (ord(bytes[i]))) if (i+1) % 8 == 0: print repr(bytes[i-7:i+1]) if(len(bytes) % 8 != 0): print string.rjust("", 11), repr(bytes[i-len(bytes)%8:i+1])
def readLong(data): """Tries to interpret the next 8 bytes of the data as a 64-bit signed integer.""" high, low = struct.unpack(">ll", data[0:8]) big = (long(high) << 32) + low rest = data[8:] return (big, rest)
def OSCBlob(next): """Convert a string into an OSC Blob, returning a (typetag, data) tuple.""" if type(next) == type(""): length = len(next) padded = math.ceil((len(next)) / 4.0) * 4 binary = struct.pack(">i%ds" % (padded), length, next) tag = 'b' else: tag = '' binary = '' return (tag, binary)
def OSCArgument(next): """Convert some Python types to their OSC binary representations, returning a (typetag, data) tuple.""" if type(next) == type(""): OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4 binary = struct.pack(">%ds" % (OSCstringLength), next) tag = "s" elif type(next) == type(42.5): binary = struct.pack(">f", next) tag = "f" elif type(next) == type(13): binary = struct.pack(">i", next) tag = "i" else: binary = "" tag = "" return (tag, binary)
def parseArgs(args): """Given a list of strings, produces a list where those strings have been parsed (where possible) as floats or integers.""" parsed = [] for arg in args: print arg arg = arg.strip() interpretation = None try: interpretation = float(arg) if string.find(arg, ".") == -1: interpretation = int(interpretation) except: # Oh - it was a string. interpretation = arg pass parsed.append(interpretation) return parsed
def decodeOSC(data): """Converts a typetagged OSC message to a Python list.""" table = {"i":readInt, "f":readFloat, "s":readString, "b":readBlob} decoded = [] address, rest = readString(data) typetags = "" if address == "#bundle": time, rest = readLong(rest) # decoded.append(address) # decoded.append(time) while len(rest)>0: length, rest = readInt(rest) decoded.append(decodeOSC(rest[:length])) rest = rest[length:] elif len(rest) > 0: typetags, rest = readString(rest) decoded.append(address) decoded.append(typetags) if typetags[0] == ",": for tag in typetags[1:]: value, rest = table[tag](rest) decoded.append(value) else: print "Oops, typetag lacks the magic ," return decoded
def append(self, argument, typehint = None): """Appends data to the message, updating the typetags based on the argument's type. If the argument is a blob (counted string) pass in 'b' as typehint.""" if typehint == 'b': binary = OSCBlob(argument) else: binary = OSCArgument(argument) self.typetags = self.typetags + binary[0] self.rawAppend(binary[1])
def getBinary(self): """Returns the binary message (so far) with typetags.""" address = OSCArgument(self.address)[1] typetags = OSCArgument(self.typetags)[1] return address + typetags + self.message
def handle(self, data, source = None): """Given OSC data, tries to call the callback with the right address.""" decoded = decodeOSC(data) self.dispatch(decoded, source)
def dispatch(self, message, source = None): """Sends decoded OSC data to an appropriate calback""" msgtype = "" try: if type(message[0]) == str: # got a single message address = message[0] self.callbacks[address](message) elif type(message[0]) == list: for msg in message: self.dispatch(msg) except KeyError, key: print 'address %s not found, %s: %s' % (address, key, message) pprint.pprint(message) except IndexError, e: print '%s: %s' % (e, message) pass except None, e: print "Exception in", address, "callback :", e return
def add(self, callback, name): """Adds a callback to our set of callbacks, or removes the callback with name if callback is None.""" if callback == None: del self.callbacks[name] else: self.callbacks[name] = callback
def find_example_dir(): """ Find examples dir .. a little bit ugly.. """ # Replace %s with directory to check for shoebot menus. code_stub = textwrap.dedent(""" from pkg_resources import resource_filename, Requirement, DistributionNotFound try: print(resource_filename(Requirement.parse('shoebot'), '%s')) except DistributionNotFound: pass """) # Needs to run in same python env as shoebot (may be different to gedits) code = code_stub % 'share/shoebot/examples' cmd = ["python", "-c", code] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, errors = p.communicate() if errors: print('Shoebot experienced errors searching for install and examples.') print('Errors:\n{0}'.format(errors.decode('utf-8'))) return None else: examples_dir = output.decode('utf-8').strip() if os.path.isdir(examples_dir): return examples_dir # If user is running 'setup.py develop' then examples could be right here #code = "from pkg_resources import resource_filename, Requirement; print resource_filename(Requirement.parse('shoebot'), 'examples/')" code = code_stub % 'examples/' cmd = ["python", "-c", code] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) output, errors = p.communicate() examples_dir = output.decode('utf-8').strip() if os.path.isdir(examples_dir): return examples_dir if examples_dir: print('Shoebot could not find examples at: {0}'.format(examples_dir)) else: print('Shoebot could not find install dir and examples.')
def run(self): """ The body of the tread: read lines and put them on the queue. """ try: for line in iter(self._fd.readline, False): if line is not None: if self._althandler: if self._althandler(line): # If the althandler returns True # then don't process this as usual continue self._queue.put(line) if not line: time.sleep(0.1) except ValueError: # This can happen if we are closed during readline - TODO - better fix. if not self._fd.closed: raise
def eof(self): """ Check whether there is no more content to expect. """ return (not self.is_alive()) and self._queue.empty() or self._fd.closed
def live_source_load(self, source): """ Send new source code to the bot :param source: :param good_cb: callback called if code was good :param bad_cb: callback called if code was bad (will get contents of exception) :return: """ source = source.rstrip('\n') if source != self.source: self.source = source b64_source = base64.b64encode(bytes(bytearray(source, "ascii"))) self.send_command(CMD_LOAD_BASE64, b64_source)
def send_command(self, cmd, *args): """ :param cmd: :param args: :return: """ # Test in python 2 and 3 before modifying (gedit2 + 3) if True: # Create a CommandResponse using a cookie as a unique id cookie = str(uuid.uuid4()) response = CommandResponse(cmd, cookie, None, info=[]) self.responses[cookie] = response args = list(args) + [b'cookie=' + bytes(cookie, "ascii")] if args: bytes_args = [] for arg in args: if isinstance(arg, bytes): bytes_args.append(arg) else: bytes_args.append(bytearray(arg, "ascii")) data = bytearray(cmd, "ascii") + b' ' + b' '.join(bytes_args) + b'\n' else: data = bytearray(cmd, "ascii") + b'\n' self.process.stdin.write(data) self.process.stdin.flush()
def close(self): """ Close outputs of process. """ self.process.stdout.close() self.process.stderr.close() self.running = False
def get_output(self): """ :yield: stdout_line, stderr_line, running Generator that outputs lines captured from stdout and stderr These can be consumed to output on a widget in an IDE """ if self.process.poll() is not None: self.close() yield None, None while not (self.stdout_queue.empty() and self.stderr_queue.empty()): if not self.stdout_queue.empty(): line = self.stdout_queue.get().decode('utf-8') yield line, None if not self.stderr_queue.empty(): line = self.stderr_queue.get().decode('utf-8') yield None, line
def get_command_responses(self): """ Get responses to commands sent """ if not self.response_queue.empty(): yield None while not self.response_queue.empty(): line = self.response_queue.get() if line is not None: yield line
def sort_by_preference(options, prefer): """ :param options: List of options :param prefer: Prefered options :return: Pass in a list of options, return options in 'prefer' first >>> sort_by_preference(["cairo", "cairocffi"], ["cairocffi"]) ["cairocffi", "cairo"] """ if not prefer: return options return sorted(options, key=lambda x: (prefer + options).index(x))
def get_driver_options(): """ Interpret env var as key=value :return: """ options = os.environ.get("SHOEBOT_GRAPHICS") if not options: return {} try: return dict([kv.split('=') for kv in options.split()]) except ValueError: sys.stderr.write("Bad option format.\n") sys.stderr.write("Environment variable should be in the format key=value separated by spaces.\n\n") sys.stderr.write("SHOEBOT_GRAPHICS='cairo=cairocffi,cairo gi=pgi'\n") sys.exit(1)
def import_libs(self, module_names, impl_name): """ Loop through module_names, add has_.... booleans to class set ..._impl to first successful import :param module_names: list of module names to try importing :param impl_name: used in error output if no modules succeed :return: name, module from first successful implementation """ for name in module_names: try: module = __import__(name) has_module = True except ImportError: module = None has_module = False setattr(self, name, module) setattr(self, 'has_%s' % name, has_module) for name in module_names: try: return name, __import__(name) except ImportError: pass raise ImportError('No %s Implementation found, tried: %s' % (impl_name, ' '.join(module_names)))
def ensure_pycairo_context(self, ctx): """ If ctx is a cairocffi Context convert it to a PyCairo Context otherwise return the original context :param ctx: :return: """ if self.cairocffi and isinstance(ctx, self.cairocffi.Context): from shoebot.util.cairocffi.cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo return _UNSAFE_cairocffi_context_to_pycairo(ctx) else: return ctx
def pangocairo_create_context(cr): """ If python-gi-cairo is not installed, using PangoCairo.create_context dies with an unhelpful KeyError, check for that and output somethig useful. """ # TODO move this to core.backend try: return PangoCairo.create_context(cr) except KeyError as e: if e.args == ('could not find foreign type Context',): raise ShoebotInstallError("Error creating PangoCairo missing dependency: python-gi-cairo") else: raise
def _get_center(self): '''Returns the center point of the path, disregarding transforms. ''' w, h = self.layout.get_pixel_size() x = (self.x + w / 2) y = (self.y + h / 2) return x, y
def is_list(str): """ Determines if an item in a paragraph is a list. If all of the lines in the markup start with a "*" or "1." this indicates a list as parsed by parse_paragraphs(). It can be drawn with draw_list(). """ for chunk in str.split("\n"): chunk = chunk.replace("\t", "") if not chunk.lstrip().startswith("*") \ and not re.search(r"^([0-9]{1,3}\. )", chunk.lstrip()): return False return True
def is_math(str): """ Determines if an item in a paragraph is a LaTeX math equation. Math equations are wrapped in <math></math> tags. They can be drawn as an image using draw_math(). """ str = str.strip() if str.startswith("<math>") and str.endswith("</math>"): return True else: return False
def draw_math(str, x, y, alpha=1.0): """ Uses mimetex to generate a GIF-image from the LaTeX equation. """ try: from web import _ctx except: pass str = re.sub("</{0,1}math>", "", str.strip()) img = mimetex.gif(str) w, h = _ctx.imagesize(img) _ctx.image(img, x, y, alpha=alpha) return w, h
def textwidth(str): """textwidth() reports incorrectly when lineheight() is smaller than 1.0 """ try: from web import _ctx except: pass l = _ctx.lineheight() _ctx.lineheight(1) w = _ctx.textwidth(str) _ctx.lineheight(l) return w