language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def solve(template: str, ruleset: Ruleset) -> tuple[int, int]: """ Calculates the required answers given the original template and the pair insertion rules """ pairs = ("".join(pair) for pair in zip(template, template[1:])) pair_counter = Counter(pairs) for _ in range(10): pair_counter = step(ruleset, pair_counter) part1 = calculate_answer(template, pair_counter) for _ in range(30): pair_counter = step(ruleset, pair_counter) part2 = calculate_answer(template, pair_counter) return (part1, part2)
def solve(template: str, ruleset: Ruleset) -> tuple[int, int]: """ Calculates the required answers given the original template and the pair insertion rules """ pairs = ("".join(pair) for pair in zip(template, template[1:])) pair_counter = Counter(pairs) for _ in range(10): pair_counter = step(ruleset, pair_counter) part1 = calculate_answer(template, pair_counter) for _ in range(30): pair_counter = step(ruleset, pair_counter) part2 = calculate_answer(template, pair_counter) return (part1, part2)
Python
def graph_copy_integrity_checker(self, original_graph, addition_graph): """Utility function to test the integrity of a graph copy.""" main_graph = copy.deepcopy(original_graph) node_map, edge_map = merge_graphs(main_graph, addition_graph) # --Verify that the updated graph has all the nodes and edges from both graphs expected_node_count = len(original_graph.get_all_node_ids()) + len(addition_graph.get_all_node_ids()) expected_edge_count = len(original_graph.get_all_edge_ids()) + len(addition_graph.get_all_edge_ids()) self.assertEqual(expected_node_count, len(main_graph.get_all_node_ids())) self.assertEqual(expected_edge_count, len(main_graph.get_all_edge_ids())) # --Verify that the original graph nodes and edges are still in-place for node_id in original_graph.get_all_node_ids(): original_node = original_graph.get_node(node_id) new_node = main_graph.get_node(node_id) # Verify each node has the proper number of edges self.assertEqual(len(original_node['edges']), len(new_node['edges'])) # Verify each node has the right edges for edge_id in original_node['edges']: self.assertIn(edge_id, new_node['edges']) for edge_id in original_graph.get_all_edge_ids(): original_edge = original_graph.get_edge(edge_id) new_edge = main_graph.get_edge(edge_id) # Verify each edge has the correct targets self.assertEqual(original_edge['vertices'], new_edge['vertices']) # --Verify that the new nodes and edges exist and have the correct topology for node_id in addition_graph.get_all_node_ids(): original_node = addition_graph.get_node(node_id) new_node = main_graph.get_node(node_map[node_id]) # Verify each node has the proper number of edges self.assertEqual(len(original_node['edges']), len(new_node['edges'])) # Verify each node has the right edges for edge_id in original_node['edges']: self.assertIn(edge_map[edge_id], new_node['edges']) for edge_id in addition_graph.get_all_edge_ids(): original_edge = addition_graph.get_edge(edge_id) new_edge = main_graph.get_edge(edge_map[edge_id]) # Verify each edge has the correct targets original_vertex_a, original_vertex_b = original_edge['vertices'] mapped_new_vertices = (node_map[original_vertex_a], node_map[original_vertex_b]) self.assertEqual(mapped_new_vertices, new_edge['vertices'])
def graph_copy_integrity_checker(self, original_graph, addition_graph): """Utility function to test the integrity of a graph copy.""" main_graph = copy.deepcopy(original_graph) node_map, edge_map = merge_graphs(main_graph, addition_graph) # --Verify that the updated graph has all the nodes and edges from both graphs expected_node_count = len(original_graph.get_all_node_ids()) + len(addition_graph.get_all_node_ids()) expected_edge_count = len(original_graph.get_all_edge_ids()) + len(addition_graph.get_all_edge_ids()) self.assertEqual(expected_node_count, len(main_graph.get_all_node_ids())) self.assertEqual(expected_edge_count, len(main_graph.get_all_edge_ids())) # --Verify that the original graph nodes and edges are still in-place for node_id in original_graph.get_all_node_ids(): original_node = original_graph.get_node(node_id) new_node = main_graph.get_node(node_id) # Verify each node has the proper number of edges self.assertEqual(len(original_node['edges']), len(new_node['edges'])) # Verify each node has the right edges for edge_id in original_node['edges']: self.assertIn(edge_id, new_node['edges']) for edge_id in original_graph.get_all_edge_ids(): original_edge = original_graph.get_edge(edge_id) new_edge = main_graph.get_edge(edge_id) # Verify each edge has the correct targets self.assertEqual(original_edge['vertices'], new_edge['vertices']) # --Verify that the new nodes and edges exist and have the correct topology for node_id in addition_graph.get_all_node_ids(): original_node = addition_graph.get_node(node_id) new_node = main_graph.get_node(node_map[node_id]) # Verify each node has the proper number of edges self.assertEqual(len(original_node['edges']), len(new_node['edges'])) # Verify each node has the right edges for edge_id in original_node['edges']: self.assertIn(edge_map[edge_id], new_node['edges']) for edge_id in addition_graph.get_all_edge_ids(): original_edge = addition_graph.get_edge(edge_id) new_edge = main_graph.get_edge(edge_map[edge_id]) # Verify each edge has the correct targets original_vertex_a, original_vertex_b = original_edge['vertices'] mapped_new_vertices = (node_map[original_vertex_a], node_map[original_vertex_b]) self.assertEqual(mapped_new_vertices, new_edge['vertices'])
Python
def breadth_first_search(graph, root_node=None): """Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached. """ ordering = [] all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering queue = deque() discovered = defaultdict(lambda: False) to_visit = set(all_nodes) if root_node is None: root_node = all_nodes[0] discovered[root_node] = True queue.appendleft(root_node) # We need to make sure we visit all the nodes, including disconnected ones while True: # BFS Main Loop while len(queue) > 0: current_node = queue.pop() ordering.append(current_node) to_visit.remove(current_node) for n in graph.neighbors(current_node): if not discovered[n]: discovered[n] = True queue.appendleft(n) # New root node if we still have more nodes if len(to_visit) > 0: node = to_visit.pop() to_visit.add(node) # --We need this here because we remove the node as part of the BFS algorithm discovered[node] = True queue.appendleft(node) else: break return ordering
def breadth_first_search(graph, root_node=None): """Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached. """ ordering = [] all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering queue = deque() discovered = defaultdict(lambda: False) to_visit = set(all_nodes) if root_node is None: root_node = all_nodes[0] discovered[root_node] = True queue.appendleft(root_node) # We need to make sure we visit all the nodes, including disconnected ones while True: # BFS Main Loop while len(queue) > 0: current_node = queue.pop() ordering.append(current_node) to_visit.remove(current_node) for n in graph.neighbors(current_node): if not discovered[n]: discovered[n] = True queue.appendleft(n) # New root node if we still have more nodes if len(to_visit) > 0: node = to_visit.pop() to_visit.add(node) # --We need this here because we remove the node as part of the BFS algorithm discovered[node] = True queue.appendleft(node) else: break return ordering
Python
def find_minimum_spanning_tree(graph): """Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. """ mst = [] if graph.num_nodes() == 0: return mst if graph.num_edges() == 0: return mst connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError edge_list = kruskal_mst(graph) return edge_list
def find_minimum_spanning_tree(graph): """Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. """ mst = [] if graph.num_nodes() == 0: return mst if graph.num_edges() == 0: return mst connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError edge_list = kruskal_mst(graph) return edge_list
Python
def find_minimum_spanning_tree_as_subgraph(graph): """Calculates a minimum spanning tree and returns a graph representation.""" edge_list = find_minimum_spanning_tree(graph) subgraph = get_subgraph_from_edge_list(graph, edge_list) return subgraph
def find_minimum_spanning_tree_as_subgraph(graph): """Calculates a minimum spanning tree and returns a graph representation.""" edge_list = find_minimum_spanning_tree(graph) subgraph = get_subgraph_from_edge_list(graph, edge_list) return subgraph
Python
def find_minimum_spanning_forest(graph): """Calculates the minimum spanning forest of a disconnected graph. Returns a list of lists, each containing the edges that define that tree. Returns an empty list for an empty graph. """ msf = [] if graph.num_nodes() == 0: return msf if graph.num_edges() == 0: return msf connected_components = get_connected_components_as_subgraphs(graph) for subgraph in connected_components: edge_list = kruskal_mst(subgraph) msf.append(edge_list) return msf
def find_minimum_spanning_forest(graph): """Calculates the minimum spanning forest of a disconnected graph. Returns a list of lists, each containing the edges that define that tree. Returns an empty list for an empty graph. """ msf = [] if graph.num_nodes() == 0: return msf if graph.num_edges() == 0: return msf connected_components = get_connected_components_as_subgraphs(graph) for subgraph in connected_components: edge_list = kruskal_mst(subgraph) msf.append(edge_list) return msf
Python
def find_minimum_spanning_forest_as_subgraphs(graph): """Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
def find_minimum_spanning_forest_as_subgraphs(graph): """Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
Python
def kruskal_mst(graph): """Implements Kruskal's Algorithm for finding minimum spanning trees. Assumes a non-empty, connected graph. """ edges_accepted = 0 ds = DisjointSet() pq = PriorityQueue() accepted_edges = [] label_lookup = {} nodes = graph.get_all_node_ids() num_vertices = len(nodes) for n in nodes: label = ds.add_set() label_lookup[n] = label edges = graph.get_all_edge_objects() for e in edges: pq.put(e['id'], e['cost']) while edges_accepted < (num_vertices - 1): edge_id = pq.get() edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] label_a = label_lookup[node_a] label_b = label_lookup[node_b] a_set = ds.find(label_a) b_set = ds.find(label_b) if a_set != b_set: edges_accepted += 1 accepted_edges.append(edge_id) ds.union(a_set, b_set) return accepted_edges
def kruskal_mst(graph): """Implements Kruskal's Algorithm for finding minimum spanning trees. Assumes a non-empty, connected graph. """ edges_accepted = 0 ds = DisjointSet() pq = PriorityQueue() accepted_edges = [] label_lookup = {} nodes = graph.get_all_node_ids() num_vertices = len(nodes) for n in nodes: label = ds.add_set() label_lookup[n] = label edges = graph.get_all_edge_objects() for e in edges: pq.put(e['id'], e['cost']) while edges_accepted < (num_vertices - 1): edge_id = pq.get() edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] label_a = label_lookup[node_a] label_b = label_lookup[node_b] a_set = ds.find(label_a) b_set = ds.find(label_b) if a_set != b_set: edges_accepted += 1 accepted_edges.append(edge_id) ds.union(a_set, b_set) return accepted_edges
Python
def __get_cycle(graph, ordering, parent_lookup): """Gets the main cycle of the dfs tree.""" root_node = ordering[0] for i in range(2, len(ordering)): current_node = ordering[i] if graph.adjacent(current_node, root_node): path = [] while current_node != root_node: path.append(current_node) current_node = parent_lookup[current_node] path.append(root_node) path.reverse() return path
def __get_cycle(graph, ordering, parent_lookup): """Gets the main cycle of the dfs tree.""" root_node = ordering[0] for i in range(2, len(ordering)): current_node = ordering[i] if graph.adjacent(current_node, root_node): path = [] while current_node != root_node: path.append(current_node) current_node = parent_lookup[current_node] path.append(root_node) path.reverse() return path
Python
def __get_segments_from_node(node, graph): """Calculates the segments that can emanate from a particular node on the main cycle.""" list_of_segments = [] node_object = graph.get_node(node) for e in node_object['edges']: list_of_segments.append(e) return list_of_segments
def __get_segments_from_node(node, graph): """Calculates the segments that can emanate from a particular node on the main cycle.""" list_of_segments = [] node_object = graph.get_node(node) for e in node_object['edges']: list_of_segments.append(e) return list_of_segments
Python
def __get_segments_from_cycle(graph, cycle_path): """Calculates the segments that emanate from the main cycle.""" list_of_segments = [] # We work through the cycle in a bottom-up fashion for n in cycle_path[::-1]: segments = __get_segments_from_node(n, graph) if segments: list_of_segments.append(segments) return list_of_segments
def __get_segments_from_cycle(graph, cycle_path): """Calculates the segments that emanate from the main cycle.""" list_of_segments = [] # We work through the cycle in a bottom-up fashion for n in cycle_path[::-1]: segments = __get_segments_from_node(n, graph) if segments: list_of_segments.append(segments) return list_of_segments
Python
def new_edge(self, node_a, node_b, cost=1): """Adds a new, undirected edge between node_a and node_b with a cost. Returns the edge id of the new edge.""" edge_id = super(UndirectedGraph, self).new_edge(node_a, node_b, cost) self.nodes[node_b]['edges'].append(edge_id) return edge_id
def new_edge(self, node_a, node_b, cost=1): """Adds a new, undirected edge between node_a and node_b with a cost. Returns the edge id of the new edge.""" edge_id = super(UndirectedGraph, self).new_edge(node_a, node_b, cost) self.nodes[node_b]['edges'].append(edge_id) return edge_id
Python
def neighbors(self, node_id): """Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.""" node = self.get_node(node_id) flattened_nodes_list = [] for a, b in [self.get_edge(edge_id)['vertices'] for edge_id in node['edges']]: flattened_nodes_list.append(a) flattened_nodes_list.append(b) node_set = set(flattened_nodes_list) if node_id in node_set: node_set.remove(node_id) return [nid for nid in node_set]
def neighbors(self, node_id): """Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.""" node = self.get_node(node_id) flattened_nodes_list = [] for a, b in [self.get_edge(edge_id)['vertices'] for edge_id in node['edges']]: flattened_nodes_list.append(a) flattened_nodes_list.append(b) node_set = set(flattened_nodes_list) if node_id in node_set: node_set.remove(node_id) return [nid for nid in node_set]
Python
def delete_edge_by_id(self, edge_id): """Removes the edge identified by "edge_id" from the graph.""" edge = self.get_edge(edge_id) # Remove the edge from the "from node" # --Determine the from node from_node_id = edge['vertices'][0] from_node = self.get_node(from_node_id) # --Remove the edge from it from_node['edges'].remove(edge_id) # Remove the edge from the "to node" to_node_id = edge['vertices'][1] to_node = self.get_node(to_node_id) # --Remove the edge from it to_node['edges'].remove(edge_id) # Remove the edge from the edge list del self.edges[edge_id] self._num_edges -= 1
def delete_edge_by_id(self, edge_id): """Removes the edge identified by "edge_id" from the graph.""" edge = self.get_edge(edge_id) # Remove the edge from the "from node" # --Determine the from node from_node_id = edge['vertices'][0] from_node = self.get_node(from_node_id) # --Remove the edge from it from_node['edges'].remove(edge_id) # Remove the edge from the "to node" to_node_id = edge['vertices'][1] to_node = self.get_node(to_node_id) # --Remove the edge from it to_node['edges'].remove(edge_id) # Remove the edge from the edge list del self.edges[edge_id] self._num_edges -= 1
Python
def move_edge_target(self, edge_id, node_a): """Moves an edge so that it targets node_a.""" # Grab the edge edge = self.get_edge(edge_id) # Remove the edge from the original "target node" original_target_node_id = edge['vertices'][1] original_target_node = self.get_node(original_target_node_id) original_target_node['edges'].remove(edge_id) # Add the edge to the new target node new_target_node_id = node_a new_target_node = self.get_node(new_target_node_id) new_target_node['edges'].append(edge_id) # Alter the vertices on the edge edge['vertices'] = (edge['vertices'][0], node_a)
def move_edge_target(self, edge_id, node_a): """Moves an edge so that it targets node_a.""" # Grab the edge edge = self.get_edge(edge_id) # Remove the edge from the original "target node" original_target_node_id = edge['vertices'][1] original_target_node = self.get_node(original_target_node_id) original_target_node['edges'].remove(edge_id) # Add the edge to the new target node new_target_node_id = node_a new_target_node = self.get_node(new_target_node_id) new_target_node['edges'].append(edge_id) # Alter the vertices on the edge edge['vertices'] = (edge['vertices'][0], node_a)
Python
def _a_star_search_internal(graph, start, goal): """Performs an A* search, returning information about whether the goal node was reached and path cost information that can be used to reconstruct the path. """ frontier = PriorityQueue() frontier.put(start, 0) came_from = {start: None} cost_so_far = {start: 0} goal_reached = False while not frontier.empty(): current = frontier.get() if current == goal: goal_reached = True break for next_node in graph.neighbors(current): new_cost = cost_so_far[current] + graph.edge_cost(current, next_node) if next_node not in cost_so_far or new_cost < cost_so_far[next_node]: cost_so_far[next_node] = new_cost priority = new_cost + heuristic(goal, next_node) frontier.put(next_node, priority) came_from[next_node] = current return came_from, cost_so_far, goal_reached
def _a_star_search_internal(graph, start, goal): """Performs an A* search, returning information about whether the goal node was reached and path cost information that can be used to reconstruct the path. """ frontier = PriorityQueue() frontier.put(start, 0) came_from = {start: None} cost_so_far = {start: 0} goal_reached = False while not frontier.empty(): current = frontier.get() if current == goal: goal_reached = True break for next_node in graph.neighbors(current): new_cost = cost_so_far[current] + graph.edge_cost(current, next_node) if next_node not in cost_so_far or new_cost < cost_so_far[next_node]: cost_so_far[next_node] = new_cost priority = new_cost + heuristic(goal, next_node) frontier.put(next_node, priority) came_from[next_node] = current return came_from, cost_so_far, goal_reached
Python
def graph_to_dot(graph, node_renderer=None, edge_renderer=None): """Produces a DOT specification string from the provided graph.""" node_pairs = list(graph.nodes.items()) edge_pairs = list(graph.edges.items()) if node_renderer is None: node_renderer_wrapper = lambda nid: '' else: node_renderer_wrapper = lambda nid: ' [%s]' % ','.join( ['%s=%s' % tpl for tpl in list(node_renderer(graph, nid).items())]) # Start the graph graph_string = 'digraph G {\n' graph_string += 'overlap=scale;\n' # Print the nodes (placeholder) for node_id, node in node_pairs: graph_string += '%i%s;\n' % (node_id, node_renderer_wrapper(node_id)) # Print the edges for edge_id, edge in edge_pairs: node_a = edge['vertices'][0] node_b = edge['vertices'][1] graph_string += '%i -> %i;\n' % (node_a, node_b) # Finish the graph graph_string += '}' return graph_string
def graph_to_dot(graph, node_renderer=None, edge_renderer=None): """Produces a DOT specification string from the provided graph.""" node_pairs = list(graph.nodes.items()) edge_pairs = list(graph.edges.items()) if node_renderer is None: node_renderer_wrapper = lambda nid: '' else: node_renderer_wrapper = lambda nid: ' [%s]' % ','.join( ['%s=%s' % tpl for tpl in list(node_renderer(graph, nid).items())]) # Start the graph graph_string = 'digraph G {\n' graph_string += 'overlap=scale;\n' # Print the nodes (placeholder) for node_id, node in node_pairs: graph_string += '%i%s;\n' % (node_id, node_renderer_wrapper(node_id)) # Print the edges for edge_id, edge in edge_pairs: node_a = edge['vertices'][0] node_b = edge['vertices'][1] graph_string += '%i -> %i;\n' % (node_a, node_b) # Finish the graph graph_string += '}' return graph_string
Python
def build_cycle_graph(num_nodes): """Builds a cycle graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = UndirectedGraph() if num_nodes > 0: first_node = graph.new_node() if num_nodes > 1: previous_node = first_node for _ in range(num_nodes - 1): new_node = graph.new_node() graph.new_edge(previous_node, new_node) previous_node = new_node graph.new_edge(previous_node, first_node) return graph
def build_cycle_graph(num_nodes): """Builds a cycle graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = UndirectedGraph() if num_nodes > 0: first_node = graph.new_node() if num_nodes > 1: previous_node = first_node for _ in range(num_nodes - 1): new_node = graph.new_node() graph.new_edge(previous_node, new_node) previous_node = new_node graph.new_edge(previous_node, first_node) return graph
Python
def build_wheel_graph(num_nodes): """Builds a wheel graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/WheelGraph.html""" # The easiest way to build a wheel graph is to build # C_n-1 and then add a hub node and spoke edges graph = build_cycle_graph(num_nodes - 1) cycle_graph_vertices = graph.get_all_node_ids() node_id = graph.new_node() for cycle_node in cycle_graph_vertices: graph.new_edge(node_id, cycle_node) return graph
def build_wheel_graph(num_nodes): """Builds a wheel graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/WheelGraph.html""" # The easiest way to build a wheel graph is to build # C_n-1 and then add a hub node and spoke edges graph = build_cycle_graph(num_nodes - 1) cycle_graph_vertices = graph.get_all_node_ids() node_id = graph.new_node() for cycle_node in cycle_graph_vertices: graph.new_edge(node_id, cycle_node) return graph
Python
def build_triangle_graph(): """Builds a triangle graph, C3. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = build_cycle_graph(3) return graph
def build_triangle_graph(): """Builds a triangle graph, C3. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = build_cycle_graph(3) return graph
Python
def build_square_graph(): """Builds a square graph, C4. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = build_cycle_graph(4) return graph
def build_square_graph(): """Builds a square graph, C4. Ref: http://mathworld.wolfram.com/CycleGraph.html""" graph = build_cycle_graph(4) return graph
Python
def build_k5_graph(): """Makes a new K5 graph. Ref: http://mathworld.wolfram.com/Pentatope.html""" graph = UndirectedGraph() # K5 has 5 nodes for _ in range(5): graph.new_node() # K5 has 10 edges # --Edge: a graph.new_edge(1, 2) # --Edge: b graph.new_edge(2, 3) # --Edge: c graph.new_edge(3, 4) # --Edge: d graph.new_edge(4, 5) # --Edge: e graph.new_edge(5, 1) # --Edge: f graph.new_edge(1, 3) # --Edge: g graph.new_edge(1, 4) # --Edge: h graph.new_edge(2, 4) # --Edge: i graph.new_edge(2, 5) # --Edge: j graph.new_edge(3, 5) return graph
def build_k5_graph(): """Makes a new K5 graph. Ref: http://mathworld.wolfram.com/Pentatope.html""" graph = UndirectedGraph() # K5 has 5 nodes for _ in range(5): graph.new_node() # K5 has 10 edges # --Edge: a graph.new_edge(1, 2) # --Edge: b graph.new_edge(2, 3) # --Edge: c graph.new_edge(3, 4) # --Edge: d graph.new_edge(4, 5) # --Edge: e graph.new_edge(5, 1) # --Edge: f graph.new_edge(1, 3) # --Edge: g graph.new_edge(1, 4) # --Edge: h graph.new_edge(2, 4) # --Edge: i graph.new_edge(2, 5) # --Edge: j graph.new_edge(3, 5) return graph
Python
def build_groetzch_graph(): """Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html""" # Because the graph is so complicated, we want to # build it via adjacency matrix specification # -- Initialize the matrix to all zeros adj = [[0 for _ in range(11)] for _ in range(11)] # -- Add individual edge connections row_connections = [] row_connections.append( (1,2,7,10) ) row_connections.append( (0,3,6,9) ) row_connections.append( (0,4,6,8) ) row_connections.append( (1,4,8,10) ) row_connections.append( (2,3,7,9) ) row_connections.append( (6,7,8,9,10) ) row_connections.append( (1,2,5) ) row_connections.append( (0,4,5) ) row_connections.append( (2,3,5) ) row_connections.append( (1,4,5) ) row_connections.append( (0,3,5) ) for j, tpl in enumerate(row_connections): for i in tpl: adj[j][i] = 1 adj[i][j] = 1 # Debug print the adjacency matrix #for row in adj: # print row graph, _ = create_graph_from_adjacency_matrix(adj) return graph
def build_groetzch_graph(): """Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html""" # Because the graph is so complicated, we want to # build it via adjacency matrix specification # -- Initialize the matrix to all zeros adj = [[0 for _ in range(11)] for _ in range(11)] # -- Add individual edge connections row_connections = [] row_connections.append( (1,2,7,10) ) row_connections.append( (0,3,6,9) ) row_connections.append( (0,4,6,8) ) row_connections.append( (1,4,8,10) ) row_connections.append( (2,3,7,9) ) row_connections.append( (6,7,8,9,10) ) row_connections.append( (1,2,5) ) row_connections.append( (0,4,5) ) row_connections.append( (2,3,5) ) row_connections.append( (1,4,5) ) row_connections.append( (0,3,5) ) for j, tpl in enumerate(row_connections): for i in tpl: adj[j][i] = 1 adj[i][j] = 1 # Debug print the adjacency matrix #for row in adj: # print row graph, _ = create_graph_from_adjacency_matrix(adj) return graph
Python
def build_franklin_graph(): """Makes a new Franklin graph. Ref: http://mathworld.wolfram.com/FranklinGraph.html""" # The easiest way to build the Franklin graph is to start # with C12 and add the additional 6 edges graph = build_cycle_graph(12) edge_tpls = [ (1,8), (2,7), (3,10), (4,9), (5,12), (6,11) ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
def build_franklin_graph(): """Makes a new Franklin graph. Ref: http://mathworld.wolfram.com/FranklinGraph.html""" # The easiest way to build the Franklin graph is to start # with C12 and add the additional 6 edges graph = build_cycle_graph(12) edge_tpls = [ (1,8), (2,7), (3,10), (4,9), (5,12), (6,11) ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
Python
def build_chvatal_graph(): """Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html""" # The easiest way to build the Chvatal graph is to start # with C12 and add the additional 12 edges graph = build_cycle_graph(12) edge_tpls = [ (1,7), (1,9), (2,5), (2,11), (3,7), (3,9), (4,10), (4,12), (5,8), (6,10), (6,12), (8,11), ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
def build_chvatal_graph(): """Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html""" # The easiest way to build the Chvatal graph is to start # with C12 and add the additional 12 edges graph = build_cycle_graph(12) edge_tpls = [ (1,7), (1,9), (2,5), (2,11), (3,7), (3,9), (4,10), (4,12), (5,8), (6,10), (6,12), (8,11), ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
Python
def build_single_node_graph(directed=False): """Builds a graph with a single node for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() return graph
def build_single_node_graph(directed=False): """Builds a graph with a single node for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() return graph
Python
def build_2_node_graph(directed=False): """Builds a 2-node connected graph for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_edge(1, 2) return graph
def build_2_node_graph(directed=False): """Builds a 2-node connected graph for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_edge(1, 2) return graph
Python
def build_3_node_line_graph(directed=False): """Builds a 3-node, 2-edge connected line graph for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2) graph.new_edge(2, 3) return graph
def build_3_node_line_graph(directed=False): """Builds a 3-node, 2-edge connected line graph for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2) graph.new_edge(2, 3) return graph
Python
def build_3_node_line_root_articulation_graph(directed=False): """Builds a 3-node, 2-edge connected line graph for testing, where the root node is the articulation vertex.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2) graph.new_edge(1, 3) return graph
def build_3_node_line_root_articulation_graph(directed=False): """Builds a 3-node, 2-edge connected line graph for testing, where the root node is the articulation vertex.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2) graph.new_edge(1, 3) return graph
Python
def build_triangle_graph_with_costs(directed=False): """Builds a triangle graph with costs for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2, 1) graph.new_edge(2, 3, 2) graph.new_edge(3, 1, 10) return graph
def build_triangle_graph_with_costs(directed=False): """Builds a triangle graph with costs for testing.""" if directed: graph = DirectedGraph() else: graph = UndirectedGraph() graph.new_node() graph.new_node() graph.new_node() graph.new_edge(1, 2, 1) graph.new_edge(2, 3, 2) graph.new_edge(3, 1, 10) return graph
Python
def build_petersons_graph(): """Builds a non-planar test graph that does not contain K5 or K3,3 as a subgraph (Peterson's Graph). Ref: http://mathworld.wolfram.com/PetersenGraph.html""" graph = build_5_cycle_graph() # --Build a 5-pointed star for _ in range(5): graph.new_node() graph.new_edge(6, 8) graph.new_edge(6, 9) graph.new_edge(7, 9) graph.new_edge(7, 10) graph.new_edge(8, 10) # --Connect it to the outside 5-cycle graph graph.new_edge(1, 6) graph.new_edge(2, 7) graph.new_edge(3, 8) graph.new_edge(4, 9) graph.new_edge(5, 10) return graph
def build_petersons_graph(): """Builds a non-planar test graph that does not contain K5 or K3,3 as a subgraph (Peterson's Graph). Ref: http://mathworld.wolfram.com/PetersenGraph.html""" graph = build_5_cycle_graph() # --Build a 5-pointed star for _ in range(5): graph.new_node() graph.new_edge(6, 8) graph.new_edge(6, 9) graph.new_edge(7, 9) graph.new_edge(7, 10) graph.new_edge(8, 10) # --Connect it to the outside 5-cycle graph graph.new_edge(1, 6) graph.new_edge(2, 7) graph.new_edge(3, 8) graph.new_edge(4, 9) graph.new_edge(5, 10) return graph
Python
def make_subgraph(graph, vertices, edges): """Converts a subgraph given by a list of vertices and edges into a graph object.""" # Copy the entire graph local_graph = copy.deepcopy(graph) # Remove all the edges that aren't in the list edges_to_delete = [x for x in local_graph.get_all_edge_ids() if x not in edges] for e in edges_to_delete: local_graph.delete_edge_by_id(e) # Remove all the vertices that aren't in the list nodes_to_delete = [x for x in local_graph.get_all_node_ids() if x not in vertices] for n in nodes_to_delete: local_graph.delete_node(n) return local_graph
def make_subgraph(graph, vertices, edges): """Converts a subgraph given by a list of vertices and edges into a graph object.""" # Copy the entire graph local_graph = copy.deepcopy(graph) # Remove all the edges that aren't in the list edges_to_delete = [x for x in local_graph.get_all_edge_ids() if x not in edges] for e in edges_to_delete: local_graph.delete_edge_by_id(e) # Remove all the vertices that aren't in the list nodes_to_delete = [x for x in local_graph.get_all_node_ids() if x not in vertices] for n in nodes_to_delete: local_graph.delete_node(n) return local_graph
Python
def convert_graph_directed_to_undirected(dg): """Converts a directed graph into an undirected graph. Directed edges are made undirected.""" udg = UndirectedGraph() # Copy the graph # --Copy nodes # --Copy edges udg.nodes = copy.deepcopy(dg.nodes) udg.edges = copy.deepcopy(dg.edges) udg.next_node_id = dg.next_node_id udg.next_edge_id = dg.next_edge_id # Convert the directed edges into undirected edges for edge_id in udg.get_all_edge_ids(): edge = udg.get_edge(edge_id) target_node_id = edge['vertices'][1] target_node = udg.get_node(target_node_id) target_node['edges'].append(edge_id) return udg
def convert_graph_directed_to_undirected(dg): """Converts a directed graph into an undirected graph. Directed edges are made undirected.""" udg = UndirectedGraph() # Copy the graph # --Copy nodes # --Copy edges udg.nodes = copy.deepcopy(dg.nodes) udg.edges = copy.deepcopy(dg.edges) udg.next_node_id = dg.next_node_id udg.next_edge_id = dg.next_edge_id # Convert the directed edges into undirected edges for edge_id in udg.get_all_edge_ids(): edge = udg.get_edge(edge_id) target_node_id = edge['vertices'][1] target_node = udg.get_node(target_node_id) target_node['edges'].append(edge_id) return udg
Python
def remove_duplicate_edges_directed(dg): """Removes duplicate edges from a directed graph.""" # With directed edges, we can just hash the to and from node id tuples and if # a node happens to conflict with one that already exists, we delete it # --For aesthetic, we sort the edge ids so that lower edge ids are kept lookup = {} edges = sorted(dg.get_all_edge_ids()) for edge_id in edges: e = dg.get_edge(edge_id) tpl = e['vertices'] if tpl in lookup: dg.delete_edge_by_id(edge_id) else: lookup[tpl] = edge_id
def remove_duplicate_edges_directed(dg): """Removes duplicate edges from a directed graph.""" # With directed edges, we can just hash the to and from node id tuples and if # a node happens to conflict with one that already exists, we delete it # --For aesthetic, we sort the edge ids so that lower edge ids are kept lookup = {} edges = sorted(dg.get_all_edge_ids()) for edge_id in edges: e = dg.get_edge(edge_id) tpl = e['vertices'] if tpl in lookup: dg.delete_edge_by_id(edge_id) else: lookup[tpl] = edge_id
Python
def remove_duplicate_edges_undirected(udg): """Removes duplicate edges from an undirected graph.""" # With undirected edges, we need to hash both combinations of the to-from node ids, since a-b and b-a are equivalent # --For aesthetic, we sort the edge ids so that lower edges ids are kept lookup = {} edges = sorted(udg.get_all_edge_ids()) for edge_id in edges: e = udg.get_edge(edge_id) tpl_a = e['vertices'] tpl_b = (tpl_a[1], tpl_a[0]) if tpl_a in lookup or tpl_b in lookup: udg.delete_edge_by_id(edge_id) else: lookup[tpl_a] = edge_id lookup[tpl_b] = edge_id
def remove_duplicate_edges_undirected(udg): """Removes duplicate edges from an undirected graph.""" # With undirected edges, we need to hash both combinations of the to-from node ids, since a-b and b-a are equivalent # --For aesthetic, we sort the edge ids so that lower edges ids are kept lookup = {} edges = sorted(udg.get_all_edge_ids()) for edge_id in edges: e = udg.get_edge(edge_id) tpl_a = e['vertices'] tpl_b = (tpl_a[1], tpl_a[0]) if tpl_a in lookup or tpl_b in lookup: udg.delete_edge_by_id(edge_id) else: lookup[tpl_a] = edge_id lookup[tpl_b] = edge_id
Python
def create_graph_from_adjacency_matrix(adjacency_matrix): """Generates a graph from an adjacency matrix specification. Returns a tuple containing the graph and a list-mapping of node ids to matrix column indices. The graph will be an UndirectedGraph if the provided adjacency matrix is symmetric. The graph will be a DirectedGraph if the provided adjacency matrix is not symmetric. Ref: http://mathworld.wolfram.com/AdjacencyMatrix.html""" if is_adjacency_matrix_symmetric(adjacency_matrix): graph = UndirectedGraph() else: graph = DirectedGraph() node_column_mapping = [] num_columns = len(adjacency_matrix) for _ in range(num_columns): node_id = graph.new_node() node_column_mapping.append(node_id) for j in range(num_columns): for i in range(num_columns): if adjacency_matrix[j][i]: jnode_id = node_column_mapping[j] inode_id = node_column_mapping[i] # Because of our adjacency matrix encoding, [j][i] in our code corresponds to [i][j] in a traditional matrix interpretation # Thus, we need to put an edge from node i to node j if [j][i] in our code is non-zero graph.new_edge(inode_id, jnode_id) return (graph, node_column_mapping)
def create_graph_from_adjacency_matrix(adjacency_matrix): """Generates a graph from an adjacency matrix specification. Returns a tuple containing the graph and a list-mapping of node ids to matrix column indices. The graph will be an UndirectedGraph if the provided adjacency matrix is symmetric. The graph will be a DirectedGraph if the provided adjacency matrix is not symmetric. Ref: http://mathworld.wolfram.com/AdjacencyMatrix.html""" if is_adjacency_matrix_symmetric(adjacency_matrix): graph = UndirectedGraph() else: graph = DirectedGraph() node_column_mapping = [] num_columns = len(adjacency_matrix) for _ in range(num_columns): node_id = graph.new_node() node_column_mapping.append(node_id) for j in range(num_columns): for i in range(num_columns): if adjacency_matrix[j][i]: jnode_id = node_column_mapping[j] inode_id = node_column_mapping[i] # Because of our adjacency matrix encoding, [j][i] in our code corresponds to [i][j] in a traditional matrix interpretation # Thus, we need to put an edge from node i to node j if [j][i] in our code is non-zero graph.new_edge(inode_id, jnode_id) return (graph, node_column_mapping)
Python
def is_adjacency_matrix_symmetric(adjacency_matrix): """Determines if an adjacency matrix is symmetric. Ref: http://mathworld.wolfram.com/SymmetricMatrix.html""" # Verify that the matrix is square num_columns = len(adjacency_matrix) for column in adjacency_matrix: # In a square matrix, every row should be the same length as the number of columns if len(column) != num_columns: return False # Loop through the bottom half of the matrix and compare it to the top half # --We do the bottom half because of how we construct adjacency matrices max_i = 0 for j in range(num_columns): for i in range(max_i): # If i == j, we can skip ahead so we don't compare with ourself if i == j: continue # Compare the value in the bottom half with the mirrored value in the top half # If they aren't the same, the matrix isn't symmetric if adjacency_matrix[j][i] != adjacency_matrix[i][j]: return False max_i += 1 # If we reach this far without returning false, then we know that everything matched, # which makes this a symmetric matrix return True
def is_adjacency_matrix_symmetric(adjacency_matrix): """Determines if an adjacency matrix is symmetric. Ref: http://mathworld.wolfram.com/SymmetricMatrix.html""" # Verify that the matrix is square num_columns = len(adjacency_matrix) for column in adjacency_matrix: # In a square matrix, every row should be the same length as the number of columns if len(column) != num_columns: return False # Loop through the bottom half of the matrix and compare it to the top half # --We do the bottom half because of how we construct adjacency matrices max_i = 0 for j in range(num_columns): for i in range(max_i): # If i == j, we can skip ahead so we don't compare with ourself if i == j: continue # Compare the value in the bottom half with the mirrored value in the top half # If they aren't the same, the matrix isn't symmetric if adjacency_matrix[j][i] != adjacency_matrix[i][j]: return False max_i += 1 # If we reach this far without returning false, then we know that everything matched, # which makes this a symmetric matrix return True
Python
def depth_first_search(graph, root_node=None): """Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached. """ ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, root_node) return ordering
def depth_first_search(graph, root_node=None): """Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached. """ ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, root_node) return ordering
Python
def depth_first_search_with_parent_data(graph, root_node = None, adjacency_lists = None): """Performs a depth-first search with visiting order of nodes determined by provided adjacency lists, and also returns a parent lookup dict and a children lookup dict.""" ordering = [] parent_lookup = {} children_lookup = defaultdict(lambda: []) all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering, parent_lookup, children_lookup stack = deque() discovered = defaultdict(lambda: False) unvisited_nodes = set(all_nodes) if root_node is None: root_node = all_nodes[0] if adjacency_lists is None: adj = lambda v: graph.neighbors(v) else: adj = lambda v: adjacency_lists[v] # --Initialize the stack, simulating the DFS call on the root node stack.appendleft(root_node) parent_lookup[root_node] = root_node # We're using a non-recursive implementation of DFS, since Python isn't great for deep recursion while True: # Main DFS Loop while len(stack) > 0: u = stack.popleft() if not discovered[u]: discovered[u] = True if u in unvisited_nodes: unvisited_nodes.remove(u) ordering.append(u) neighbors = adj(u) # When adding the new nodes to the stack, we want to add them in reverse order so that # the order the nodes are visited is the same as with a recursive DFS implementation for n in neighbors[::-1]: if discovered[n]: # If the node already exists in the discovered nodes list # we don't want to re-add it to the stack continue stack.appendleft(n) parent_lookup[n] = u children_lookup[u].append(n) # While there are still nodes that need visiting, repopulate the stack if len(unvisited_nodes) > 0: u = unvisited_nodes.pop() stack.appendleft(u) else: break return ordering, parent_lookup, children_lookup
def depth_first_search_with_parent_data(graph, root_node = None, adjacency_lists = None): """Performs a depth-first search with visiting order of nodes determined by provided adjacency lists, and also returns a parent lookup dict and a children lookup dict.""" ordering = [] parent_lookup = {} children_lookup = defaultdict(lambda: []) all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering, parent_lookup, children_lookup stack = deque() discovered = defaultdict(lambda: False) unvisited_nodes = set(all_nodes) if root_node is None: root_node = all_nodes[0] if adjacency_lists is None: adj = lambda v: graph.neighbors(v) else: adj = lambda v: adjacency_lists[v] # --Initialize the stack, simulating the DFS call on the root node stack.appendleft(root_node) parent_lookup[root_node] = root_node # We're using a non-recursive implementation of DFS, since Python isn't great for deep recursion while True: # Main DFS Loop while len(stack) > 0: u = stack.popleft() if not discovered[u]: discovered[u] = True if u in unvisited_nodes: unvisited_nodes.remove(u) ordering.append(u) neighbors = adj(u) # When adding the new nodes to the stack, we want to add them in reverse order so that # the order the nodes are visited is the same as with a recursive DFS implementation for n in neighbors[::-1]: if discovered[n]: # If the node already exists in the discovered nodes list # we don't want to re-add it to the stack continue stack.appendleft(n) parent_lookup[n] = u children_lookup[u].append(n) # While there are still nodes that need visiting, repopulate the stack if len(unvisited_nodes) > 0: u = unvisited_nodes.pop() stack.appendleft(u) else: break return ordering, parent_lookup, children_lookup
Python
def find_biconnected_components(graph): """Finds all the biconnected components in a graph. Returns a list of lists, each containing the edges that form a biconnected component. Returns an empty list for an empty graph. """ list_of_components = [] # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal biconnnected components function to find # --the edge lists for this particular connected component edge_list = _internal_get_biconnected_components_edge_lists(component) list_of_components.extend(edge_list) return list_of_components
def find_biconnected_components(graph): """Finds all the biconnected components in a graph. Returns a list of lists, each containing the edges that form a biconnected component. Returns an empty list for an empty graph. """ list_of_components = [] # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal biconnnected components function to find # --the edge lists for this particular connected component edge_list = _internal_get_biconnected_components_edge_lists(component) list_of_components.extend(edge_list) return list_of_components
Python
def find_biconnected_components_as_subgraphs(graph): """Finds the biconnected components and returns them as subgraphs.""" list_of_graphs = [] list_of_components = find_biconnected_components(graph) for edge_list in list_of_components: subgraph = get_subgraph_from_edge_list(graph, edge_list) list_of_graphs.append(subgraph) return list_of_graphs
def find_biconnected_components_as_subgraphs(graph): """Finds the biconnected components and returns them as subgraphs.""" list_of_graphs = [] list_of_components = find_biconnected_components(graph) for edge_list in list_of_components: subgraph = get_subgraph_from_edge_list(graph, edge_list) list_of_graphs.append(subgraph) return list_of_graphs
Python
def find_articulation_vertices(graph): """Finds all of the articulation vertices within a graph. Returns a list of all articulation vertices within the graph. Returns an empty list for an empty graph. """ articulation_vertices = [] all_nodes = graph.get_all_node_ids() if len(all_nodes) == 0: return articulation_vertices # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal articulation vertices function to find # --the node list for this particular connected component vertex_list = _internal_get_cut_vertex_list(component) articulation_vertices.extend(vertex_list) return articulation_vertices
def find_articulation_vertices(graph): """Finds all of the articulation vertices within a graph. Returns a list of all articulation vertices within the graph. Returns an empty list for an empty graph. """ articulation_vertices = [] all_nodes = graph.get_all_node_ids() if len(all_nodes) == 0: return articulation_vertices # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal articulation vertices function to find # --the node list for this particular connected component vertex_list = _internal_get_cut_vertex_list(component) articulation_vertices.extend(vertex_list) return articulation_vertices
Python
def _internal_get_biconnected_components_edge_lists(graph): """Works on a single connected component to produce the edge lists of the biconnected components. Returns a list of lists, each containing the edges that combine to produce the connected component. Returns a single nested list with 1 edge if there is only 1 edge in the graph (a 2-node graph is a special case, generally considered to be a biconnected graph). Returns an empty list if there are no edges in the graph (i.e. if it's a single-node or empty graph). """ list_of_components = [] num_nodes = graph.num_nodes() num_edges = graph.num_edges() if num_nodes <= 2: if num_edges == 1: list_of_components.append(graph.get_all_edge_ids()) return list_of_components dfs_count = 0 edge_stack = deque() dfs_stack = deque() visited = defaultdict(lambda: False) parent = defaultdict(lambda: None) depth = {} low = {} preorder_processed = defaultdict(lambda: False) postorder_processed = defaultdict(lambda: False) # We're simulating a recursive DFS with an explicit stack, since Python has a really small function stack unvisited_nodes = set(graph.get_all_node_ids()) while len(unvisited_nodes) > 0: # --Initialize the first stack frame, simulating the DFS call on the root node u = unvisited_nodes.pop() parent[u] = u stack_frame = { 'u': u, 'v': None, 'remaining_children': graph.neighbors(u) } dfs_stack.appendleft(stack_frame) while len(dfs_stack) > 0: frame = dfs_stack.popleft() u = frame['u'] v = frame['v'] if not visited[u]: if u in unvisited_nodes: unvisited_nodes.remove(u) visited[u] = True dfs_count += 1 depth[u] = dfs_count low[u] = depth[u] if len(frame['remaining_children']) > 0: v = frame['remaining_children'].pop() frame['v'] = v if v is None: # --u has no neighbor nodes continue if not preorder_processed[v]: # --This is the preorder processing, done for each neighbor node ''v'' of u node_v = graph.get_node(v) for edge_id in node_v['edges']: edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: edge_stack.appendleft(edge_id) break parent[v] = u preorder_processed[v] = True # print 'preorder for {}'.format(v) dfs_stack.appendleft(frame) # --Simulate the recursion to call the DFS on v new_frame = { 'u': v, 'v': None, 'remaining_children': graph.neighbors(v) } dfs_stack.appendleft(new_frame) continue elif not postorder_processed[v] and u == parent[v]: # --This is the postorder processing, done for each neighbor node ''v'' of u if low[v] >= depth[u]: component = output_component(graph, edge_stack, u, v) if len(component) > 2: # --You can't have a biconnected component with less than 3 edges list_of_components.append(component) low[u] = min(low[u], low[v]) postorder_processed[v] = True # print 'postorder for {}'.format(v) elif visited[v] and (parent[u] != v) and (depth[v] < depth[u]): # (u,v) is a backedge from u to its ancestor v node_v = graph.get_node(v) for edge_id in node_v['edges']: edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: edge_stack.appendleft(edge_id) break low[u] = min(low[u], depth[v]) if len(frame['remaining_children']) > 0: # --Continue onto the next neighbor node of u v = frame['remaining_children'].pop() frame['v'] = v dfs_stack.appendleft(frame) return list_of_components
def _internal_get_biconnected_components_edge_lists(graph): """Works on a single connected component to produce the edge lists of the biconnected components. Returns a list of lists, each containing the edges that combine to produce the connected component. Returns a single nested list with 1 edge if there is only 1 edge in the graph (a 2-node graph is a special case, generally considered to be a biconnected graph). Returns an empty list if there are no edges in the graph (i.e. if it's a single-node or empty graph). """ list_of_components = [] num_nodes = graph.num_nodes() num_edges = graph.num_edges() if num_nodes <= 2: if num_edges == 1: list_of_components.append(graph.get_all_edge_ids()) return list_of_components dfs_count = 0 edge_stack = deque() dfs_stack = deque() visited = defaultdict(lambda: False) parent = defaultdict(lambda: None) depth = {} low = {} preorder_processed = defaultdict(lambda: False) postorder_processed = defaultdict(lambda: False) # We're simulating a recursive DFS with an explicit stack, since Python has a really small function stack unvisited_nodes = set(graph.get_all_node_ids()) while len(unvisited_nodes) > 0: # --Initialize the first stack frame, simulating the DFS call on the root node u = unvisited_nodes.pop() parent[u] = u stack_frame = { 'u': u, 'v': None, 'remaining_children': graph.neighbors(u) } dfs_stack.appendleft(stack_frame) while len(dfs_stack) > 0: frame = dfs_stack.popleft() u = frame['u'] v = frame['v'] if not visited[u]: if u in unvisited_nodes: unvisited_nodes.remove(u) visited[u] = True dfs_count += 1 depth[u] = dfs_count low[u] = depth[u] if len(frame['remaining_children']) > 0: v = frame['remaining_children'].pop() frame['v'] = v if v is None: # --u has no neighbor nodes continue if not preorder_processed[v]: # --This is the preorder processing, done for each neighbor node ''v'' of u node_v = graph.get_node(v) for edge_id in node_v['edges']: edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: edge_stack.appendleft(edge_id) break parent[v] = u preorder_processed[v] = True # print 'preorder for {}'.format(v) dfs_stack.appendleft(frame) # --Simulate the recursion to call the DFS on v new_frame = { 'u': v, 'v': None, 'remaining_children': graph.neighbors(v) } dfs_stack.appendleft(new_frame) continue elif not postorder_processed[v] and u == parent[v]: # --This is the postorder processing, done for each neighbor node ''v'' of u if low[v] >= depth[u]: component = output_component(graph, edge_stack, u, v) if len(component) > 2: # --You can't have a biconnected component with less than 3 edges list_of_components.append(component) low[u] = min(low[u], low[v]) postorder_processed[v] = True # print 'postorder for {}'.format(v) elif visited[v] and (parent[u] != v) and (depth[v] < depth[u]): # (u,v) is a backedge from u to its ancestor v node_v = graph.get_node(v) for edge_id in node_v['edges']: edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: edge_stack.appendleft(edge_id) break low[u] = min(low[u], depth[v]) if len(frame['remaining_children']) > 0: # --Continue onto the next neighbor node of u v = frame['remaining_children'].pop() frame['v'] = v dfs_stack.appendleft(frame) return list_of_components
Python
def output_component(graph, edge_stack, u, v): """Helper function to pop edges off the stack and produce a list of them.""" edge_list = [] while len(edge_stack) > 0: edge_id = edge_stack.popleft() edge_list.append(edge_id) edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: break return edge_list
def output_component(graph, edge_stack, u, v): """Helper function to pop edges off the stack and produce a list of them.""" edge_list = [] while len(edge_stack) > 0: edge_id = edge_stack.popleft() edge_list.append(edge_id) edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: break return edge_list
Python
def _internal_get_cut_vertex_list(graph): """Works on a single connected component to produce the node list of cut vertices. Returns a list of nodes. Returns an empty list if there are no nodes in the graph (i.e. if it's an empty graph). """ list_of_cut_vertices = set() if graph.num_nodes() == 0: return list(list_of_cut_vertices) dfs_count = 0 root_dfs_count = 1 dfs_stack = deque() visited = defaultdict(lambda: False) parent = defaultdict(lambda: None) children = defaultdict(lambda: []) depth = {} low = {} preorder_processed = defaultdict(lambda: False) postorder_processed = defaultdict(lambda: False) # We're simulating a recursive DFS with an explicit stack, since Python has a really small function stack unvisited_nodes = set(graph.get_all_node_ids()) while len(unvisited_nodes) > 0: # --Initialize the first stack frame, simulating the DFS call on the root node u = unvisited_nodes.pop() parent[u] = u stack_frame = { 'u': u, 'v': None, 'remaining_children': graph.neighbors(u) } dfs_stack.appendleft(stack_frame) while len(dfs_stack) > 0: frame = dfs_stack.popleft() u = frame['u'] v = frame['v'] if not visited[u]: if u in unvisited_nodes: unvisited_nodes.remove(u) visited[u] = True dfs_count += 1 depth[u] = dfs_count low[u] = depth[u] if len(frame['remaining_children']) > 0: v = frame['remaining_children'].pop() frame['v'] = v if v is None: # --u has no neighbor nodes continue if not preorder_processed[v]: # --This is the preorder processing, done for each neighbor node ''v'' of u parent[v] = u children[u].append(v) preorder_processed[v] = True # print 'preorder for {}'.format(v) dfs_stack.appendleft(frame) # --Simulate the recursion to call the DFS on v new_frame = { 'u': v, 'v': None, 'remaining_children': graph.neighbors(v) } dfs_stack.appendleft(new_frame) continue elif not postorder_processed[v] and u == parent[v]: # --This is the postorder processing, done for each neighbor node ''v'' of u if low[v] >= depth[u] and depth[u] > 1: list_of_cut_vertices.add(u) low[u] = min(low[u], low[v]) postorder_processed[v] = True # print 'postorder for {}'.format(v) elif visited[v] and (parent[u] != v) and (depth[v] < depth[u]): # (u,v) is a backedge from u to its ancestor v low[u] = min(low[u], depth[v]) if len(frame['remaining_children']) > 0: # --Continue onto the next neighbor node of u v = frame['remaining_children'].pop() frame['v'] = v dfs_stack.appendleft(frame) # The root node gets special treatment; it's a cut vertex iff it has multiple children if len(children[root_dfs_count]) > 1: for node_id, dfs in list(depth.items()): if dfs == root_dfs_count: list_of_cut_vertices.add(node_id) break return list(list_of_cut_vertices)
def _internal_get_cut_vertex_list(graph): """Works on a single connected component to produce the node list of cut vertices. Returns a list of nodes. Returns an empty list if there are no nodes in the graph (i.e. if it's an empty graph). """ list_of_cut_vertices = set() if graph.num_nodes() == 0: return list(list_of_cut_vertices) dfs_count = 0 root_dfs_count = 1 dfs_stack = deque() visited = defaultdict(lambda: False) parent = defaultdict(lambda: None) children = defaultdict(lambda: []) depth = {} low = {} preorder_processed = defaultdict(lambda: False) postorder_processed = defaultdict(lambda: False) # We're simulating a recursive DFS with an explicit stack, since Python has a really small function stack unvisited_nodes = set(graph.get_all_node_ids()) while len(unvisited_nodes) > 0: # --Initialize the first stack frame, simulating the DFS call on the root node u = unvisited_nodes.pop() parent[u] = u stack_frame = { 'u': u, 'v': None, 'remaining_children': graph.neighbors(u) } dfs_stack.appendleft(stack_frame) while len(dfs_stack) > 0: frame = dfs_stack.popleft() u = frame['u'] v = frame['v'] if not visited[u]: if u in unvisited_nodes: unvisited_nodes.remove(u) visited[u] = True dfs_count += 1 depth[u] = dfs_count low[u] = depth[u] if len(frame['remaining_children']) > 0: v = frame['remaining_children'].pop() frame['v'] = v if v is None: # --u has no neighbor nodes continue if not preorder_processed[v]: # --This is the preorder processing, done for each neighbor node ''v'' of u parent[v] = u children[u].append(v) preorder_processed[v] = True # print 'preorder for {}'.format(v) dfs_stack.appendleft(frame) # --Simulate the recursion to call the DFS on v new_frame = { 'u': v, 'v': None, 'remaining_children': graph.neighbors(v) } dfs_stack.appendleft(new_frame) continue elif not postorder_processed[v] and u == parent[v]: # --This is the postorder processing, done for each neighbor node ''v'' of u if low[v] >= depth[u] and depth[u] > 1: list_of_cut_vertices.add(u) low[u] = min(low[u], low[v]) postorder_processed[v] = True # print 'postorder for {}'.format(v) elif visited[v] and (parent[u] != v) and (depth[v] < depth[u]): # (u,v) is a backedge from u to its ancestor v low[u] = min(low[u], depth[v]) if len(frame['remaining_children']) > 0: # --Continue onto the next neighbor node of u v = frame['remaining_children'].pop() frame['v'] = v dfs_stack.appendleft(frame) # The root node gets special treatment; it's a cut vertex iff it has multiple children if len(children[root_dfs_count]) > 1: for node_id, dfs in list(depth.items()): if dfs == root_dfs_count: list_of_cut_vertices.add(node_id) break return list(list_of_cut_vertices)
Python
def __setup_dfs_data(graph, adj): """Sets up the dfs_data object, for consistency.""" dfs_data = __get_dfs_data(graph, adj) dfs_data['graph'] = graph dfs_data['adj'] = adj L1, L2 = __low_point_dfs(dfs_data) dfs_data['lowpoint_1_lookup'] = L1 dfs_data['lowpoint_2_lookup'] = L2 edge_weights = __calculate_edge_weights(dfs_data) dfs_data['edge_weights'] = edge_weights return dfs_data
def __setup_dfs_data(graph, adj): """Sets up the dfs_data object, for consistency.""" dfs_data = __get_dfs_data(graph, adj) dfs_data['graph'] = graph dfs_data['adj'] = adj L1, L2 = __low_point_dfs(dfs_data) dfs_data['lowpoint_1_lookup'] = L1 dfs_data['lowpoint_2_lookup'] = L2 edge_weights = __calculate_edge_weights(dfs_data) dfs_data['edge_weights'] = edge_weights return dfs_data
Python
def __calculate_edge_weights(dfs_data): """Calculates the weight of each edge, for embedding-order sorting.""" graph = dfs_data['graph'] weights = {} for edge_id in graph.get_all_edge_ids(): edge_weight = __edge_weight(edge_id, dfs_data) weights[edge_id] = edge_weight return weights
def __calculate_edge_weights(dfs_data): """Calculates the weight of each edge, for embedding-order sorting.""" graph = dfs_data['graph'] weights = {} for edge_id in graph.get_all_edge_ids(): edge_weight = __edge_weight(edge_id, dfs_data) weights[edge_id] = edge_weight return weights
Python
def __sort_adjacency_lists(dfs_data): """Sorts the adjacency list representation by the edge weights.""" new_adjacency_lists = {} adjacency_lists = dfs_data['adj'] edge_weights = dfs_data['edge_weights'] edge_lookup = dfs_data['edge_lookup'] for node_id, adj_list in list(adjacency_lists.items()): node_weight_lookup = {} frond_lookup = {} for node_b in adj_list: edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(node_id, node_b) node_weight_lookup[node_b] = edge_weights[edge_id] frond_lookup[node_b] = 1 if edge_lookup[edge_id] == 'backedge' else 2 # Fronds should be before branches if the weights are equal new_list = sorted(adj_list, key=lambda n: frond_lookup[n]) # Sort by weights new_list.sort(key=lambda n: node_weight_lookup[n]) # Add the new sorted list to the new adjacency list lookup table new_adjacency_lists[node_id] = new_list return new_adjacency_lists
def __sort_adjacency_lists(dfs_data): """Sorts the adjacency list representation by the edge weights.""" new_adjacency_lists = {} adjacency_lists = dfs_data['adj'] edge_weights = dfs_data['edge_weights'] edge_lookup = dfs_data['edge_lookup'] for node_id, adj_list in list(adjacency_lists.items()): node_weight_lookup = {} frond_lookup = {} for node_b in adj_list: edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(node_id, node_b) node_weight_lookup[node_b] = edge_weights[edge_id] frond_lookup[node_b] = 1 if edge_lookup[edge_id] == 'backedge' else 2 # Fronds should be before branches if the weights are equal new_list = sorted(adj_list, key=lambda n: frond_lookup[n]) # Sort by weights new_list.sort(key=lambda n: node_weight_lookup[n]) # Add the new sorted list to the new adjacency list lookup table new_adjacency_lists[node_id] = new_list return new_adjacency_lists
Python
def __branch_point_dfs(dfs_data): """DFS that calculates the b(u) and N(u) lookups, and also reorders the adjacency lists.""" u = dfs_data['ordering'][0] large_n = {} large_n[u] = 0 stem = {} stem[u] = u b = {} b[u] = 1 __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data) dfs_data['N_u_lookup'] = large_n dfs_data['b_u_lookup'] = b return
def __branch_point_dfs(dfs_data): """DFS that calculates the b(u) and N(u) lookups, and also reorders the adjacency lists.""" u = dfs_data['ordering'][0] large_n = {} large_n[u] = 0 stem = {} stem[u] = u b = {} b[u] = 1 __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data) dfs_data['N_u_lookup'] = large_n dfs_data['b_u_lookup'] = b return
Python
def __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data): """A recursive implementation of the BranchPtDFS function, as defined on page 14 of the paper.""" first_vertex = dfs_data['adj'][u][0] large_w = wt(u, first_vertex, dfs_data) if large_w % 2 == 0: large_w += 1 v_I = 0 v_II = 0 for v in [v for v in dfs_data['adj'][u] if wt(u, v, dfs_data) <= large_w]: stem[u] = v # not in the original paper, but a logical extension based on page 13 if a(v, dfs_data) == u: # uv is a tree edge large_n[v] = 0 if wt(u, v, dfs_data) % 2 == 0: v_I = v else: b_u = b[u] l2_v = L2(v, dfs_data) #if l2_v > b_u: # If this is true, then we're not on a branch at all # continue if l2_v < b_u: large_n[v] = 1 elif b_u != 1: #print stem #print dfs_data['lowpoint_2_lookup'] #print b xnode = stem[l2_v] if large_n[xnode] != 0: large_n[v] = large_n[xnode] + 1 elif dfs_data['graph'].adjacent(u, L1(v, dfs_data)): large_n[v] = 2 else: large_n[v] = large_n[u] if large_n[v] % 2 == 0: v_II = v break # Goto 1 if v_II != 0: # Move v_II to head of Adj[u] dfs_data['adj'][u].remove(v_II) dfs_data['adj'][u].insert(0, v_II) elif v_I != 0: # Move v_I to head of Adj[u] dfs_data['adj'][u].remove(v_I) dfs_data['adj'][u].insert(0, v_I) first_time = True for v in dfs_data['adj'][u]: if a(v, dfs_data) == u: b[v] = u if first_time: b[v] = b[u] elif wt(u, v, dfs_data) % 2 == 0: large_n[v] = 0 else: large_n[v] = 1 stem[u] = v __branch_point_dfs_recursive(v, large_n, b, stem, dfs_data) first_time = False return
def __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data): """A recursive implementation of the BranchPtDFS function, as defined on page 14 of the paper.""" first_vertex = dfs_data['adj'][u][0] large_w = wt(u, first_vertex, dfs_data) if large_w % 2 == 0: large_w += 1 v_I = 0 v_II = 0 for v in [v for v in dfs_data['adj'][u] if wt(u, v, dfs_data) <= large_w]: stem[u] = v # not in the original paper, but a logical extension based on page 13 if a(v, dfs_data) == u: # uv is a tree edge large_n[v] = 0 if wt(u, v, dfs_data) % 2 == 0: v_I = v else: b_u = b[u] l2_v = L2(v, dfs_data) #if l2_v > b_u: # If this is true, then we're not on a branch at all # continue if l2_v < b_u: large_n[v] = 1 elif b_u != 1: #print stem #print dfs_data['lowpoint_2_lookup'] #print b xnode = stem[l2_v] if large_n[xnode] != 0: large_n[v] = large_n[xnode] + 1 elif dfs_data['graph'].adjacent(u, L1(v, dfs_data)): large_n[v] = 2 else: large_n[v] = large_n[u] if large_n[v] % 2 == 0: v_II = v break # Goto 1 if v_II != 0: # Move v_II to head of Adj[u] dfs_data['adj'][u].remove(v_II) dfs_data['adj'][u].insert(0, v_II) elif v_I != 0: # Move v_I to head of Adj[u] dfs_data['adj'][u].remove(v_I) dfs_data['adj'][u].insert(0, v_I) first_time = True for v in dfs_data['adj'][u]: if a(v, dfs_data) == u: b[v] = u if first_time: b[v] = b[u] elif wt(u, v, dfs_data) % 2 == 0: large_n[v] = 0 else: large_n[v] = 1 stem[u] = v __branch_point_dfs_recursive(v, large_n, b, stem, dfs_data) first_time = False return
Python
def __embed_branch(dfs_data): """Builds the combinatorial embedding of the graph. Returns whether the graph is planar.""" u = dfs_data['ordering'][0] dfs_data['LF'] = [] dfs_data['RF'] = [] dfs_data['FG'] = {} n = dfs_data['graph'].num_nodes() f0 = (0, n) g0 = (0, n) L0 = {'u': 0, 'v': n} R0 = {'x': 0, 'y': n} dfs_data['LF'].append(f0) dfs_data['RF'].append(g0) dfs_data['FG'][0] = [L0, R0] dfs_data['FG']['m'] = 0 dfs_data['FG']['l'] = 0 dfs_data['FG']['r'] = 0 #print 'DFS Ordering: {}'.format(dfs_data['ordering']) #for node in dfs_data['ordering']: #print '{}: {}'.format(node, dfs_data['adj'][node]) nonplanar = __embed_branch_recursive(u, dfs_data) #print "Nonplanar:", nonplanar return not nonplanar
def __embed_branch(dfs_data): """Builds the combinatorial embedding of the graph. Returns whether the graph is planar.""" u = dfs_data['ordering'][0] dfs_data['LF'] = [] dfs_data['RF'] = [] dfs_data['FG'] = {} n = dfs_data['graph'].num_nodes() f0 = (0, n) g0 = (0, n) L0 = {'u': 0, 'v': n} R0 = {'x': 0, 'y': n} dfs_data['LF'].append(f0) dfs_data['RF'].append(g0) dfs_data['FG'][0] = [L0, R0] dfs_data['FG']['m'] = 0 dfs_data['FG']['l'] = 0 dfs_data['FG']['r'] = 0 #print 'DFS Ordering: {}'.format(dfs_data['ordering']) #for node in dfs_data['ordering']: #print '{}: {}'.format(node, dfs_data['adj'][node]) nonplanar = __embed_branch_recursive(u, dfs_data) #print "Nonplanar:", nonplanar return not nonplanar
Python
def __embed_branch_recursive(u, dfs_data): """A recursive implementation of the EmbedBranch function, as defined on pages 8 and 22 of the paper.""" #print "\nu: {}\nadj: {}".format(u, dfs_data['adj'][u]) #print 'Pre-inserts' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) for v in dfs_data['adj'][u]: #print "\nu, v: {}, {}".format(u, v) #print "dfs_u, dfs_v: {}, {}".format(D(u, dfs_data), D(v, dfs_data)) nonplanar = True if a(v, dfs_data) == u: #print 'Ancestor block entered:', u, v if b(v, dfs_data) == u: successful = __insert_branch(u, v, dfs_data) if not successful: #print 'InsertBranch({}, {}) Failed'.format(u, v) nonplanar = True return nonplanar nonplanar = __embed_branch_recursive(v, dfs_data) if nonplanar: return nonplanar elif is_frond(u, v, dfs_data): #print 'Frond block entered:', u, v successful = __embed_frond(u, v, dfs_data) if not successful: #print 'EmbedFrond({}, {}) Failed'.format(u, v) nonplanar = True return nonplanar #print 'Post EmbedFrond' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) else: # This block is totally valid, and there will be multiple cases when it gets hit. # We only want to do things if an edge is a tree edge (parent to child along the spine of the DFS tree), # or if it's a frond edge (an edge moving up the tree from lower along the spine). # Every non-tree edge will eventually get handled by the frond edge code as we recurse up the spine. pass #print "{}: Should be planar".format(u) #print 'Post-inserts' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) nonplanar = False return nonplanar
def __embed_branch_recursive(u, dfs_data): """A recursive implementation of the EmbedBranch function, as defined on pages 8 and 22 of the paper.""" #print "\nu: {}\nadj: {}".format(u, dfs_data['adj'][u]) #print 'Pre-inserts' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) for v in dfs_data['adj'][u]: #print "\nu, v: {}, {}".format(u, v) #print "dfs_u, dfs_v: {}, {}".format(D(u, dfs_data), D(v, dfs_data)) nonplanar = True if a(v, dfs_data) == u: #print 'Ancestor block entered:', u, v if b(v, dfs_data) == u: successful = __insert_branch(u, v, dfs_data) if not successful: #print 'InsertBranch({}, {}) Failed'.format(u, v) nonplanar = True return nonplanar nonplanar = __embed_branch_recursive(v, dfs_data) if nonplanar: return nonplanar elif is_frond(u, v, dfs_data): #print 'Frond block entered:', u, v successful = __embed_frond(u, v, dfs_data) if not successful: #print 'EmbedFrond({}, {}) Failed'.format(u, v) nonplanar = True return nonplanar #print 'Post EmbedFrond' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) else: # This block is totally valid, and there will be multiple cases when it gets hit. # We only want to do things if an edge is a tree edge (parent to child along the spine of the DFS tree), # or if it's a frond edge (an edge moving up the tree from lower along the spine). # Every non-tree edge will eventually get handled by the frond edge code as we recurse up the spine. pass #print "{}: Should be planar".format(u) #print 'Post-inserts' #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) nonplanar = False return nonplanar
Python
def __insert_branch(u, v, dfs_data): """Embeds a branch Bu(v) (as described on page 22 of the paper). Returns whether the embedding was successful.""" w = L1(v, dfs_data) d_u = D(u, dfs_data) d_w = D(w, dfs_data) # Embed uw successful = __embed_frond(u, w, dfs_data) if not successful: return False # Embed a branch marker uu on the side opposite to uw, in the same frond block #successful = __embed_frond(u, v, dfs_data, as_branch_marker=True) successful = __embed_frond(u, u, dfs_data, as_branch_marker=True) if not successful: return False return True
def __insert_branch(u, v, dfs_data): """Embeds a branch Bu(v) (as described on page 22 of the paper). Returns whether the embedding was successful.""" w = L1(v, dfs_data) d_u = D(u, dfs_data) d_w = D(w, dfs_data) # Embed uw successful = __embed_frond(u, w, dfs_data) if not successful: return False # Embed a branch marker uu on the side opposite to uw, in the same frond block #successful = __embed_frond(u, v, dfs_data, as_branch_marker=True) successful = __embed_frond(u, u, dfs_data, as_branch_marker=True) if not successful: return False return True
Python
def __embed_frond(node_u, node_w, dfs_data, as_branch_marker=False): """Embeds a frond uw into either LF or RF. Returns whether the embedding was successful.""" d_u = D(node_u, dfs_data) d_w = D(node_w, dfs_data) comp_d_w = abs(d_w) if as_branch_marker: d_w *= -1 if dfs_data['last_inserted_side'] == 'LF': __insert_frond_RF(d_w, d_u, dfs_data) else: # We default to inserting a branch marker on the left side, unless we know otherwise __insert_frond_LF(d_w, d_u, dfs_data) return True LF = dfs_data['LF'] m = dfs_data['FG']['m'] l_w = lw(dfs_data) r_w = rw(dfs_data) u_m = u(m, dfs_data) x_m = fn_x(m, dfs_data) # There are multiple cases for both u and w # --Detect the case for u and store it for handling once the case for w is determined case_1 = False case_2 = False case_3 = False if d_u > u_m and d_u > x_m: case_1 = True elif d_u <= u_m and d_u > x_m: case_2 = True elif d_u > u_m and d_u <= x_m: case_3 = True else: # We should never get here, return false because there's no way we can embed this frond #print "Invalid u-case detected: (d_u, u_m, x_m): ({}, {}, {})".format(d_u, u_m, x_m) #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) return False # --Detect the case for w and process the edge appropriately if comp_d_w >= l_w and comp_d_w >= r_w: # Case 4 #print "w-case 4 reached" # --We do the same thing for all three u-cases: Add the frond to the left side __insert_frond_LF(d_w, d_u, dfs_data) dfs_data['FG']['m'] += 1 m = dfs_data['FG']['m'] n = dfs_data['graph'].num_nodes() Lm = {'u': d_w, 'v': d_u} Rm = {'x': n, 'y': 0} # See page 17 for how we deal with Ri being empty #Rm = {} dfs_data['FG'][m] = [Lm, Rm] return True elif comp_d_w >= l_w and comp_d_w < r_w: # Case 5 #print "w-case 5 reached" return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data) elif comp_d_w < l_w and comp_d_w >= r_w: # Case 6 #print "w-case 6 reached" return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data) elif comp_d_w < l_w and comp_d_w < r_w: # Case 7 #print "w-case 7 reached" #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) #print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w) #print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m) while comp_d_w < l_w and comp_d_w < r_w: if d_u > u_m and d_u > x_m: #print "Nonplanar case reached: u-case 1, w-case 7" #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) #print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w) #print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m) return False switch_sides(d_u, dfs_data) # --Update the local variables fo the next loop iteration l_w = lw(dfs_data) r_w = rw(dfs_data) m = dfs_data['FG']['m'] u_m = u(m, dfs_data) x_m = fn_x(m, dfs_data) case_1 = False case_2 = False case_3 = False if d_u <= u_m and d_u > x_m: case_2 = True elif d_u > u_m and d_u <= x_m: case_3 = True if comp_d_w >= l_w and comp_d_w < r_w: # Case 5 redux #print "w-case 5 redux reached" return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data) if comp_d_w < l_w and comp_d_w >= r_w: # Case 6 redux #print "w-case 6 redux reached" return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data) else: # We should never get here, return false because there's no way we can embed this frond #print "Invalid w-case detected" return False # We really shouldn't get to this point, but this is a catch-all just in case #print "Failure catchall reached" return False
def __embed_frond(node_u, node_w, dfs_data, as_branch_marker=False): """Embeds a frond uw into either LF or RF. Returns whether the embedding was successful.""" d_u = D(node_u, dfs_data) d_w = D(node_w, dfs_data) comp_d_w = abs(d_w) if as_branch_marker: d_w *= -1 if dfs_data['last_inserted_side'] == 'LF': __insert_frond_RF(d_w, d_u, dfs_data) else: # We default to inserting a branch marker on the left side, unless we know otherwise __insert_frond_LF(d_w, d_u, dfs_data) return True LF = dfs_data['LF'] m = dfs_data['FG']['m'] l_w = lw(dfs_data) r_w = rw(dfs_data) u_m = u(m, dfs_data) x_m = fn_x(m, dfs_data) # There are multiple cases for both u and w # --Detect the case for u and store it for handling once the case for w is determined case_1 = False case_2 = False case_3 = False if d_u > u_m and d_u > x_m: case_1 = True elif d_u <= u_m and d_u > x_m: case_2 = True elif d_u > u_m and d_u <= x_m: case_3 = True else: # We should never get here, return false because there's no way we can embed this frond #print "Invalid u-case detected: (d_u, u_m, x_m): ({}, {}, {})".format(d_u, u_m, x_m) #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) return False # --Detect the case for w and process the edge appropriately if comp_d_w >= l_w and comp_d_w >= r_w: # Case 4 #print "w-case 4 reached" # --We do the same thing for all three u-cases: Add the frond to the left side __insert_frond_LF(d_w, d_u, dfs_data) dfs_data['FG']['m'] += 1 m = dfs_data['FG']['m'] n = dfs_data['graph'].num_nodes() Lm = {'u': d_w, 'v': d_u} Rm = {'x': n, 'y': 0} # See page 17 for how we deal with Ri being empty #Rm = {} dfs_data['FG'][m] = [Lm, Rm] return True elif comp_d_w >= l_w and comp_d_w < r_w: # Case 5 #print "w-case 5 reached" return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data) elif comp_d_w < l_w and comp_d_w >= r_w: # Case 6 #print "w-case 6 reached" return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data) elif comp_d_w < l_w and comp_d_w < r_w: # Case 7 #print "w-case 7 reached" #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) #print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w) #print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m) while comp_d_w < l_w and comp_d_w < r_w: if d_u > u_m and d_u > x_m: #print "Nonplanar case reached: u-case 1, w-case 7" #print "FG: {}".format(dfs_data['FG']) #print "LF: {}".format(dfs_data['LF']) #print "RF: {}".format(dfs_data['RF']) #print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w) #print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m) return False switch_sides(d_u, dfs_data) # --Update the local variables fo the next loop iteration l_w = lw(dfs_data) r_w = rw(dfs_data) m = dfs_data['FG']['m'] u_m = u(m, dfs_data) x_m = fn_x(m, dfs_data) case_1 = False case_2 = False case_3 = False if d_u <= u_m and d_u > x_m: case_2 = True elif d_u > u_m and d_u <= x_m: case_3 = True if comp_d_w >= l_w and comp_d_w < r_w: # Case 5 redux #print "w-case 5 redux reached" return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data) if comp_d_w < l_w and comp_d_w >= r_w: # Case 6 redux #print "w-case 6 redux reached" return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data) else: # We should never get here, return false because there's no way we can embed this frond #print "Invalid w-case detected" return False # We really shouldn't get to this point, but this is a catch-all just in case #print "Failure catchall reached" return False
Python
def __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data): """Encapsulates the work that will be done for case 5 of __embed_frond, since it gets used in more than one place.""" # --We should only ever see u-cases 1 and 2 if case_3: # --We should never get here return False comp_d_w = abs(d_w) #if case_1: # --Add the frond to the left side __insert_frond_LF(d_w, d_u, dfs_data) # --Add uw to Lm m = dfs_data['FG']['m'] Lm = L(m, dfs_data) if comp_d_w < Lm['u']: Lm['u'] = d_w if d_u > Lm['v']: Lm['v'] = d_u # --Case 2 requires a bit of extra work if case_2: Lm['u'] = d_w x_m1 = fn_x(m-1, dfs_data) while comp_d_w < x_m1: merge_Fm(dfs_data) m = dfs_data['FG']['m'] x_m1 = fn_x(m-1, dfs_data) #else: #print "Case 5 work, u-case 1" return True
def __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data): """Encapsulates the work that will be done for case 5 of __embed_frond, since it gets used in more than one place.""" # --We should only ever see u-cases 1 and 2 if case_3: # --We should never get here return False comp_d_w = abs(d_w) #if case_1: # --Add the frond to the left side __insert_frond_LF(d_w, d_u, dfs_data) # --Add uw to Lm m = dfs_data['FG']['m'] Lm = L(m, dfs_data) if comp_d_w < Lm['u']: Lm['u'] = d_w if d_u > Lm['v']: Lm['v'] = d_u # --Case 2 requires a bit of extra work if case_2: Lm['u'] = d_w x_m1 = fn_x(m-1, dfs_data) while comp_d_w < x_m1: merge_Fm(dfs_data) m = dfs_data['FG']['m'] x_m1 = fn_x(m-1, dfs_data) #else: #print "Case 5 work, u-case 1" return True
Python
def __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data): """Encapsulates the work that will be done for case 6 of __embed_frond, since it gets used in more than one place.""" # --We should only ever see u-cases 1 and 3 if case_2: # --We should never get here return False comp_d_w = abs(d_w) # --Add the frond to the right side __insert_frond_RF(d_w, d_u, dfs_data) # --Add uw to Rm m = dfs_data['FG']['m'] Rm = R(m, dfs_data) if comp_d_w < Rm['x']: Rm['x'] = d_w if d_u > Rm['y']: Rm['y'] = d_u # --Case 3 requires a bit of extra work if case_3: Rm['x'] = d_w u_m1 = u(m-1, dfs_data) while comp_d_w < u_m1: merge_Fm(dfs_data) m = dfs_data['FG']['m'] u_m1 = u(m-1, dfs_data) #else: #print "Case 6 work, u-case 1" return True
def __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data): """Encapsulates the work that will be done for case 6 of __embed_frond, since it gets used in more than one place.""" # --We should only ever see u-cases 1 and 3 if case_2: # --We should never get here return False comp_d_w = abs(d_w) # --Add the frond to the right side __insert_frond_RF(d_w, d_u, dfs_data) # --Add uw to Rm m = dfs_data['FG']['m'] Rm = R(m, dfs_data) if comp_d_w < Rm['x']: Rm['x'] = d_w if d_u > Rm['y']: Rm['y'] = d_u # --Case 3 requires a bit of extra work if case_3: Rm['x'] = d_w u_m1 = u(m-1, dfs_data) while comp_d_w < u_m1: merge_Fm(dfs_data) m = dfs_data['FG']['m'] u_m1 = u(m-1, dfs_data) #else: #print "Case 6 work, u-case 1" return True
Python
def __insert_frond_RF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the right side frond group.""" # --Add the frond to the right side dfs_data['RF'].append( (d_w, d_u) ) dfs_data['FG']['r'] += 1 dfs_data['last_inserted_side'] = 'RF'
def __insert_frond_RF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the right side frond group.""" # --Add the frond to the right side dfs_data['RF'].append( (d_w, d_u) ) dfs_data['FG']['r'] += 1 dfs_data['last_inserted_side'] = 'RF'
Python
def __insert_frond_LF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the left side frond group.""" # --Add the frond to the left side dfs_data['LF'].append( (d_w, d_u) ) dfs_data['FG']['l'] += 1 dfs_data['last_inserted_side'] = 'LF'
def __insert_frond_LF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the left side frond group.""" # --Add the frond to the left side dfs_data['LF'].append( (d_w, d_u) ) dfs_data['FG']['l'] += 1 dfs_data['last_inserted_side'] = 'LF'
Python
def merge_Fm(dfs_data): """Merges Fm-1 and Fm, as defined on page 19 of the paper.""" FG = dfs_data['FG'] m = FG['m'] FGm = FG[m] FGm1 = FG[m-1] if FGm[0]['u'] < FGm1[0]['u']: FGm1[0]['u'] = FGm[0]['u'] if FGm[0]['v'] > FGm1[0]['v']: FGm1[0]['v'] = FGm[0]['v'] if FGm[1]['x'] < FGm1[1]['x']: FGm1[1]['x'] = FGm[1]['x'] if FGm[1]['y'] > FGm1[1]['y']: FGm1[1]['y'] = FGm[1]['y'] del FG[m] FG['m'] -= 1
def merge_Fm(dfs_data): """Merges Fm-1 and Fm, as defined on page 19 of the paper.""" FG = dfs_data['FG'] m = FG['m'] FGm = FG[m] FGm1 = FG[m-1] if FGm[0]['u'] < FGm1[0]['u']: FGm1[0]['u'] = FGm[0]['u'] if FGm[0]['v'] > FGm1[0]['v']: FGm1[0]['v'] = FGm[0]['v'] if FGm[1]['x'] < FGm1[1]['x']: FGm1[1]['x'] = FGm[1]['x'] if FGm[1]['y'] > FGm1[1]['y']: FGm1[1]['y'] = FGm[1]['y'] del FG[m] FG['m'] -= 1
Python
def switch_sides(d_u, dfs_data): """Switches Lm and Rm, as defined on page 20 of the paper.""" # global fn_x m = dfs_data['FG']['m'] u_m = u(m, dfs_data) # x_m = fn_x(m, dfs_data) if d_u <= u_m: l_w = lw(dfs_data) u_m1 = u(m-1, dfs_data) while u_m1 > l_w: merge_Fm(dfs_data) m = dfs_data['FG']['m'] u_m1 = u(m-1, dfs_data) # l_w = r_w is handled dynamically by the switching of fronds below # l = r dfs_data['FG']['l'] = dfs_data['FG']['r'] # adjust r so that gr is first frond preceding xm in RF x_m = fn_x(m, dfs_data) r = len(dfs_data['RF']) - 1 g_r = dfs_data['RF'][r][0] while g_r >= x_m: r -= 1 if r < 0: r = 0 break g_r = dfs_data['RF'][r][0] dfs_data['FG']['r'] = r # changing r_w is also handled dynamically by the frond switching else: r_w = rw(dfs_data) x_m1 = fn_x(m-1, dfs_data) while x_m1 > r_w: merge_Fm(dfs_data) m = dfs_data['FG']['m'] x_m1 = fn_x(m-1, dfs_data) # r_w = l_w is handled dynamically by the switching of fronds below # r = l dfs_data['FG']['r'] = dfs_data['FG']['l'] # adjust l so that fl is first frond preceding um in LF u_m = u(m, dfs_data) l = len(dfs_data['LF']) - 1 f_l = dfs_data['LF'][l][0] while f_l >= u_m: l -= 1 f_l = dfs_data['LF'][l][0] dfs_data['FG']['l'] = l # changing l_w is also handled dynamically by the frond switching m = dfs_data['FG']['m'] # Exchange the portion of the linked list LF between um and vm with the portion of RF between xm and ym LF = dfs_data['LF'] RF = dfs_data['RF'] u_m = u(m, dfs_data) v_m = v(m, dfs_data) x_m = fn_x(m, dfs_data) y_m = y(m, dfs_data) # --These are the baseline indexes, they should be narrowed appropriately first_left_index = 1 last_left_index = len(LF) - 1 first_right_index = 1 last_right_index = len(RF) - 1 # --Narrow the left indexes while first_left_index < last_left_index: frond = LF[first_left_index] if u_m >= frond[0]: first_left_index -= 1 break else: first_left_index += 1 while first_left_index < last_left_index: frond = LF[last_left_index] if v_m < frond[1]: last_left_index -= 1 else: last_left_index += 1 break # --Narrow the right indexes while first_right_index < last_right_index: frond = RF[first_right_index] if x_m >= frond[0]: first_right_index -= 1 break else: first_right_index += 1 while first_right_index < last_right_index: frond = RF[last_right_index] if y_m < frond[1]: last_right_index -= 1 else: last_right_index += 1 break # --Grab the appropriate list slices from each list LF_slice = LF[first_left_index:last_left_index+1] RF_slice = RF[first_right_index:last_right_index+1] # --Remove the slices from each list del LF[first_left_index:last_left_index+1] del RF[first_right_index:last_right_index+1] # --Add the slice from the right list to the left list i = first_left_index for x in RF_slice: LF.insert(i, x) i += 1 # --Add the slice from the left list to the right list i = first_right_index for x in LF_slice: RF.insert(i, x) i += 1 # Descriptive Note: We can just switch the slices directly because we know that if there were any conflicts from # the switch, those fronds would also have been included in the switch. # Exchange um and xm , vm and ym , Lm and Rm # --Only Lm and Rm need to be exchanged, since um, xm, vm, and ym are all dynamically calculated old_rm = dfs_data['FG'][m][1] old_lm = dfs_data['FG'][m][0] # --We have to convert the Lm and Rm dicts to use the correct keys converted_rm = __convert_RF_dict_to_LF(old_rm) converted_lm = __convert_LF_dict_to_RF(old_lm) dfs_data['FG'][m][1] = converted_lm dfs_data['FG'][m][0] = converted_rm merge_Fm(dfs_data)
def switch_sides(d_u, dfs_data): """Switches Lm and Rm, as defined on page 20 of the paper.""" # global fn_x m = dfs_data['FG']['m'] u_m = u(m, dfs_data) # x_m = fn_x(m, dfs_data) if d_u <= u_m: l_w = lw(dfs_data) u_m1 = u(m-1, dfs_data) while u_m1 > l_w: merge_Fm(dfs_data) m = dfs_data['FG']['m'] u_m1 = u(m-1, dfs_data) # l_w = r_w is handled dynamically by the switching of fronds below # l = r dfs_data['FG']['l'] = dfs_data['FG']['r'] # adjust r so that gr is first frond preceding xm in RF x_m = fn_x(m, dfs_data) r = len(dfs_data['RF']) - 1 g_r = dfs_data['RF'][r][0] while g_r >= x_m: r -= 1 if r < 0: r = 0 break g_r = dfs_data['RF'][r][0] dfs_data['FG']['r'] = r # changing r_w is also handled dynamically by the frond switching else: r_w = rw(dfs_data) x_m1 = fn_x(m-1, dfs_data) while x_m1 > r_w: merge_Fm(dfs_data) m = dfs_data['FG']['m'] x_m1 = fn_x(m-1, dfs_data) # r_w = l_w is handled dynamically by the switching of fronds below # r = l dfs_data['FG']['r'] = dfs_data['FG']['l'] # adjust l so that fl is first frond preceding um in LF u_m = u(m, dfs_data) l = len(dfs_data['LF']) - 1 f_l = dfs_data['LF'][l][0] while f_l >= u_m: l -= 1 f_l = dfs_data['LF'][l][0] dfs_data['FG']['l'] = l # changing l_w is also handled dynamically by the frond switching m = dfs_data['FG']['m'] # Exchange the portion of the linked list LF between um and vm with the portion of RF between xm and ym LF = dfs_data['LF'] RF = dfs_data['RF'] u_m = u(m, dfs_data) v_m = v(m, dfs_data) x_m = fn_x(m, dfs_data) y_m = y(m, dfs_data) # --These are the baseline indexes, they should be narrowed appropriately first_left_index = 1 last_left_index = len(LF) - 1 first_right_index = 1 last_right_index = len(RF) - 1 # --Narrow the left indexes while first_left_index < last_left_index: frond = LF[first_left_index] if u_m >= frond[0]: first_left_index -= 1 break else: first_left_index += 1 while first_left_index < last_left_index: frond = LF[last_left_index] if v_m < frond[1]: last_left_index -= 1 else: last_left_index += 1 break # --Narrow the right indexes while first_right_index < last_right_index: frond = RF[first_right_index] if x_m >= frond[0]: first_right_index -= 1 break else: first_right_index += 1 while first_right_index < last_right_index: frond = RF[last_right_index] if y_m < frond[1]: last_right_index -= 1 else: last_right_index += 1 break # --Grab the appropriate list slices from each list LF_slice = LF[first_left_index:last_left_index+1] RF_slice = RF[first_right_index:last_right_index+1] # --Remove the slices from each list del LF[first_left_index:last_left_index+1] del RF[first_right_index:last_right_index+1] # --Add the slice from the right list to the left list i = first_left_index for x in RF_slice: LF.insert(i, x) i += 1 # --Add the slice from the left list to the right list i = first_right_index for x in LF_slice: RF.insert(i, x) i += 1 # Descriptive Note: We can just switch the slices directly because we know that if there were any conflicts from # the switch, those fronds would also have been included in the switch. # Exchange um and xm , vm and ym , Lm and Rm # --Only Lm and Rm need to be exchanged, since um, xm, vm, and ym are all dynamically calculated old_rm = dfs_data['FG'][m][1] old_lm = dfs_data['FG'][m][0] # --We have to convert the Lm and Rm dicts to use the correct keys converted_rm = __convert_RF_dict_to_LF(old_rm) converted_lm = __convert_LF_dict_to_RF(old_lm) dfs_data['FG'][m][1] = converted_lm dfs_data['FG'][m][0] = converted_rm merge_Fm(dfs_data)
Python
def __check_left_side_conflict(x, y, dfs_data): """Checks to see if the frond xy will conflict with a frond on the left side of the embedding.""" l = dfs_data['FG']['l'] w, z = dfs_data['LF'][l] return __check_conflict_fronds(x, y, w, z, dfs_data)
def __check_left_side_conflict(x, y, dfs_data): """Checks to see if the frond xy will conflict with a frond on the left side of the embedding.""" l = dfs_data['FG']['l'] w, z = dfs_data['LF'][l] return __check_conflict_fronds(x, y, w, z, dfs_data)
Python
def __check_right_side_conflict(x, y, dfs_data): """Checks to see if the frond xy will conflict with a frond on the right side of the embedding.""" r = dfs_data['FG']['r'] w, z = dfs_data['RF'][r] return __check_conflict_fronds(x, y, w, z, dfs_data)
def __check_right_side_conflict(x, y, dfs_data): """Checks to see if the frond xy will conflict with a frond on the right side of the embedding.""" r = dfs_data['FG']['r'] w, z = dfs_data['RF'][r] return __check_conflict_fronds(x, y, w, z, dfs_data)
Python
def __check_conflict_fronds(x, y, w, z, dfs_data): """Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise.""" # Case 1: False frond and corresponding branch marker # --x and w should both be negative, and either xy or wz should be the same value uu if x < 0 and w < 0 and (x == y or w == z): # --Determine if the marker and frond correspond (have the same low-value) if x == w: return True return False # Case 2: Fronds with an overlap if b(x, dfs_data) == b(w, dfs_data) and x > w and w > y and y > z: return False # Case 3: Branch marker and a frond on that branch if x < 0 or w < 0: # --Determine which one is the branch marker if x < 0: u = abs(x) t = y x = w y = z else: u = abs(w) t = z # --Run the rest of the tests if b(x, dfs_data) == u and y < u and \ (x, y) in __dfsify_branch_uv(u, t, dfs_data): return True return False # If non of the conflict conditions were met, then there are obviously no conflicts return False
def __check_conflict_fronds(x, y, w, z, dfs_data): """Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise.""" # Case 1: False frond and corresponding branch marker # --x and w should both be negative, and either xy or wz should be the same value uu if x < 0 and w < 0 and (x == y or w == z): # --Determine if the marker and frond correspond (have the same low-value) if x == w: return True return False # Case 2: Fronds with an overlap if b(x, dfs_data) == b(w, dfs_data) and x > w and w > y and y > z: return False # Case 3: Branch marker and a frond on that branch if x < 0 or w < 0: # --Determine which one is the branch marker if x < 0: u = abs(x) t = y x = w y = z else: u = abs(w) t = z # --Run the rest of the tests if b(x, dfs_data) == u and y < u and \ (x, y) in __dfsify_branch_uv(u, t, dfs_data): return True return False # If non of the conflict conditions were met, then there are obviously no conflicts return False
Python
def __dfsify_branch_uv(u, v, dfs_data): """Helper function to convert the output of Bu(v) from edge ids to dfs-ordered fronds.""" buv = B(u, v, dfs_data) new_list = [] for edge_id in buv: edge = dfs_data['graph'].get_edge(edge_id) j, k = edge['vertices'] d_x = D(j, dfs_data) d_y = D(k, dfs_data) if d_x < d_y: smaller = d_x larger = d_y else: smaller = d_y larger = d_x frond = (smaller, larger) new_list.append(frond) return new_list
def __dfsify_branch_uv(u, v, dfs_data): """Helper function to convert the output of Bu(v) from edge ids to dfs-ordered fronds.""" buv = B(u, v, dfs_data) new_list = [] for edge_id in buv: edge = dfs_data['graph'].get_edge(edge_id) j, k = edge['vertices'] d_x = D(j, dfs_data) d_y = D(k, dfs_data) if d_x < d_y: smaller = d_x larger = d_y else: smaller = d_y larger = d_x frond = (smaller, larger) new_list.append(frond) return new_list
Python
def __get_dfs_data(graph, adj=None): """Internal function that calculates the depth-first search of the graph. Returns a dictionary with the following data: * 'ordering': A dfs-ordering list of nodes * 'ordering_lookup': A lookup dict mapping nodes to dfs-ordering * 'node_lookup': A lookup dict mapping dfs-ordering to nodes * 'edge_lookup': A lookup dict mapping edges as tree-edges or back-edges * 'parent_lookup': A lookup dict mapping nodes to their parent node * 'children_lookup': A lookup dict mapping nodes to their children """ ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, adjacency_lists=adj) ordering_lookup = dict(list(zip(ordering, list(range(1, len(ordering) + 1))))) node_lookup = dict(list(zip(list(range(1, len(ordering) + 1)), ordering))) edge_lookup = {} for edge_id in graph.get_all_edge_ids(): edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] parent_a = parent_lookup[node_a] parent_b = parent_lookup[node_b] if parent_a == node_b or parent_b == node_a: edge_lookup[edge_id] = 'tree-edge' else: edge_lookup[edge_id] = 'backedge' dfs_data = {} dfs_data['ordering'] = ordering dfs_data['ordering_lookup'] = ordering_lookup dfs_data['node_lookup'] = node_lookup dfs_data['edge_lookup'] = edge_lookup dfs_data['parent_lookup'] = parent_lookup dfs_data['children_lookup'] = children_lookup return dfs_data
def __get_dfs_data(graph, adj=None): """Internal function that calculates the depth-first search of the graph. Returns a dictionary with the following data: * 'ordering': A dfs-ordering list of nodes * 'ordering_lookup': A lookup dict mapping nodes to dfs-ordering * 'node_lookup': A lookup dict mapping dfs-ordering to nodes * 'edge_lookup': A lookup dict mapping edges as tree-edges or back-edges * 'parent_lookup': A lookup dict mapping nodes to their parent node * 'children_lookup': A lookup dict mapping nodes to their children """ ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, adjacency_lists=adj) ordering_lookup = dict(list(zip(ordering, list(range(1, len(ordering) + 1))))) node_lookup = dict(list(zip(list(range(1, len(ordering) + 1)), ordering))) edge_lookup = {} for edge_id in graph.get_all_edge_ids(): edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] parent_a = parent_lookup[node_a] parent_b = parent_lookup[node_b] if parent_a == node_b or parent_b == node_a: edge_lookup[edge_id] = 'tree-edge' else: edge_lookup[edge_id] = 'backedge' dfs_data = {} dfs_data['ordering'] = ordering dfs_data['ordering_lookup'] = ordering_lookup dfs_data['node_lookup'] = node_lookup dfs_data['edge_lookup'] = edge_lookup dfs_data['parent_lookup'] = parent_lookup dfs_data['children_lookup'] = children_lookup return dfs_data
Python
def __calculate_adjacency_lists(graph): """Builds an adjacency list representation for the graph, since we can't guarantee that the internal representation of the graph is stored that way.""" adj = {} for node in graph.get_all_node_ids(): neighbors = graph.neighbors(node) adj[node] = neighbors return adj
def __calculate_adjacency_lists(graph): """Builds an adjacency list representation for the graph, since we can't guarantee that the internal representation of the graph is stored that way.""" adj = {} for node in graph.get_all_node_ids(): neighbors = graph.neighbors(node) adj[node] = neighbors return adj
Python
def __get_all_lowpoints(dfs_data): """Calculates the lowpoints for each node in a graph.""" lowpoint_1_lookup = {} lowpoint_2_lookup = {} ordering = dfs_data['ordering'] for node in ordering: low_1, low_2 = __get_lowpoints(node, dfs_data) lowpoint_1_lookup[node] = low_1 lowpoint_2_lookup[node] = low_2 return lowpoint_1_lookup, lowpoint_2_lookup
def __get_all_lowpoints(dfs_data): """Calculates the lowpoints for each node in a graph.""" lowpoint_1_lookup = {} lowpoint_2_lookup = {} ordering = dfs_data['ordering'] for node in ordering: low_1, low_2 = __get_lowpoints(node, dfs_data) lowpoint_1_lookup[node] = low_1 lowpoint_2_lookup[node] = low_2 return lowpoint_1_lookup, lowpoint_2_lookup
Python
def __get_lowpoints(node, dfs_data): """Calculates the lowpoints for a single node in a graph.""" ordering_lookup = dfs_data['ordering_lookup'] t_u = T(node, dfs_data) sorted_t_u = sorted(t_u, key=lambda a: ordering_lookup[a]) lowpoint_1 = sorted_t_u[0] lowpoint_2 = sorted_t_u[1] return lowpoint_1, lowpoint_2
def __get_lowpoints(node, dfs_data): """Calculates the lowpoints for a single node in a graph.""" ordering_lookup = dfs_data['ordering_lookup'] t_u = T(node, dfs_data) sorted_t_u = sorted(t_u, key=lambda a: ordering_lookup[a]) lowpoint_1 = sorted_t_u[0] lowpoint_2 = sorted_t_u[1] return lowpoint_1, lowpoint_2
Python
def __edge_weight(edge_id, dfs_data): """Calculates the edge weight used to sort edges.""" graph = dfs_data['graph'] edge_lookup = dfs_data['edge_lookup'] edge = graph.get_edge(edge_id) u, v = edge['vertices'] d_u = D(u, dfs_data) d_v = D(v, dfs_data) lp_1 = L1(v, dfs_data) d_lp_1 = D(lp_1, dfs_data) if edge_lookup[edge_id] == 'backedge' and d_v < d_u: return 2*d_v elif is_type_I_branch(u, v, dfs_data): return 2*d_lp_1 elif is_type_II_branch(u, v, dfs_data): return 2*d_lp_1 + 1 else: return 2*graph.num_nodes() + 1
def __edge_weight(edge_id, dfs_data): """Calculates the edge weight used to sort edges.""" graph = dfs_data['graph'] edge_lookup = dfs_data['edge_lookup'] edge = graph.get_edge(edge_id) u, v = edge['vertices'] d_u = D(u, dfs_data) d_v = D(v, dfs_data) lp_1 = L1(v, dfs_data) d_lp_1 = D(lp_1, dfs_data) if edge_lookup[edge_id] == 'backedge' and d_v < d_u: return 2*d_v elif is_type_I_branch(u, v, dfs_data): return 2*d_lp_1 elif is_type_II_branch(u, v, dfs_data): return 2*d_lp_1 + 1 else: return 2*graph.num_nodes() + 1
Python
def __calculate_bu_dfs_recursively(u, b, dfs_data): """Calculates the b(u) lookup table with a recursive DFS.""" first_time = True for v in dfs_data['adj'][u]: if a(v, dfs_data) == u: if first_time: b[v] = b[u] else: b[v] = D(u, dfs_data) __calculate_bu_dfs_recursively(v, b, dfs_data) first_time = False
def __calculate_bu_dfs_recursively(u, b, dfs_data): """Calculates the b(u) lookup table with a recursive DFS.""" first_time = True for v in dfs_data['adj'][u]: if a(v, dfs_data) == u: if first_time: b[v] = b[u] else: b[v] = D(u, dfs_data) __calculate_bu_dfs_recursively(v, b, dfs_data) first_time = False
Python
def is_type_I_branch(u, v, dfs_data): """Determines whether a branch uv is a type I branch.""" if u != a(v, dfs_data): return False if u == L2(v, dfs_data): return True return False
def is_type_I_branch(u, v, dfs_data): """Determines whether a branch uv is a type I branch.""" if u != a(v, dfs_data): return False if u == L2(v, dfs_data): return True return False
Python
def is_type_II_branch(u, v, dfs_data): """Determines whether a branch uv is a type II branch.""" if u != a(v, dfs_data): return False if u < L2(v, dfs_data): return True return False
def is_type_II_branch(u, v, dfs_data): """Determines whether a branch uv is a type II branch.""" if u != a(v, dfs_data): return False if u < L2(v, dfs_data): return True return False
Python
def __get_descendants(node, dfs_data): """Gets the descendants of a node.""" list_of_descendants = [] stack = deque() children_lookup = dfs_data['children_lookup'] current_node = node children = children_lookup[current_node] dfs_current_node = D(current_node, dfs_data) for n in children: dfs_child = D(n, dfs_data) # Validate that the child node is actually a descendant and not an ancestor if dfs_child > dfs_current_node: stack.append(n) while len(stack) > 0: current_node = stack.pop() list_of_descendants.append(current_node) children = children_lookup[current_node] dfs_current_node = D(current_node, dfs_data) for n in children: dfs_child = D(n, dfs_data) # Validate that the child node is actually a descendant and not an ancestor if dfs_child > dfs_current_node: stack.append(n) return list_of_descendants
def __get_descendants(node, dfs_data): """Gets the descendants of a node.""" list_of_descendants = [] stack = deque() children_lookup = dfs_data['children_lookup'] current_node = node children = children_lookup[current_node] dfs_current_node = D(current_node, dfs_data) for n in children: dfs_child = D(n, dfs_data) # Validate that the child node is actually a descendant and not an ancestor if dfs_child > dfs_current_node: stack.append(n) while len(stack) > 0: current_node = stack.pop() list_of_descendants.append(current_node) children = children_lookup[current_node] dfs_current_node = D(current_node, dfs_data) for n in children: dfs_child = D(n, dfs_data) # Validate that the child node is actually a descendant and not an ancestor if dfs_child > dfs_current_node: stack.append(n) return list_of_descendants
Python
def S_star(u, dfs_data): """The set of all descendants of u, with u added.""" s_u = S(u, dfs_data) if u not in s_u: s_u.append(u) return s_u
def S_star(u, dfs_data): """The set of all descendants of u, with u added.""" s_u = S(u, dfs_data) if u not in s_u: s_u.append(u) return s_u
Python
def B(u, v, dfs_data): """The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u.""" """Bu(v) = {wx | w is in S*(v)}""" if a(v, dfs_data) != u: return None return list(set([edge_id for w in S_star(v, dfs_data) for edge_id in dfs_data['graph'].get_node(w)['edges']]))
def B(u, v, dfs_data): """The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u.""" """Bu(v) = {wx | w is in S*(v)}""" if a(v, dfs_data) != u: return None return list(set([edge_id for w in S_star(v, dfs_data) for edge_id in dfs_data['graph'].get_node(w)['edges']]))
Python
def _L(dfs_data): """L(T) contains leaves and branch points for the DFS-tree T.""" """L(T) = {v | the first w in Adj[v] corresponds to a frond vw}.""" node_set = set() for v, adj in list(dfs_data['adj'].items()): w = adj[0] if is_frond(v, w, dfs_data): node_set.add(v) return list(node_set)
def _L(dfs_data): """L(T) contains leaves and branch points for the DFS-tree T.""" """L(T) = {v | the first w in Adj[v] corresponds to a frond vw}.""" node_set = set() for v, adj in list(dfs_data['adj'].items()): w = adj[0] if is_frond(v, w, dfs_data): node_set.add(v) return list(node_set)
Python
def fn_x(i, dfs_data): """The minimum vertex (DFS-number) in a frond contained in Ri.""" try: return R(i, dfs_data)['x'] except Exception as e: # Page 17 states that if Ri is empty, then we take xi to be n return dfs_data['graph'].num_nodes()
def fn_x(i, dfs_data): """The minimum vertex (DFS-number) in a frond contained in Ri.""" try: return R(i, dfs_data)['x'] except Exception as e: # Page 17 states that if Ri is empty, then we take xi to be n return dfs_data['graph'].num_nodes()
Python
def y(i, dfs_data): """The maximum vertex (DFS-number) in a frond contained in Ri.""" try: return R(i, dfs_data)['y'] except Exception as e: # Page 17 states that if Ri is empty, then we take yi to be 0 return 0
def y(i, dfs_data): """The maximum vertex (DFS-number) in a frond contained in Ri.""" try: return R(i, dfs_data)['y'] except Exception as e: # Page 17 states that if Ri is empty, then we take yi to be 0 return 0
Python
def is_planar(graph): """Determines whether a graph is planar or not.""" # Determine connected components as subgraphs; their planarity is independent of each other connected_components = get_connected_components_as_subgraphs(graph) for component in connected_components: # Biconnected components likewise have independent planarity biconnected_components = find_biconnected_components_as_subgraphs(component) for bi_component in biconnected_components: planarity = __is_subgraph_planar(bi_component) if not planarity: return False return True
def is_planar(graph): """Determines whether a graph is planar or not.""" # Determine connected components as subgraphs; their planarity is independent of each other connected_components = get_connected_components_as_subgraphs(graph) for component in connected_components: # Biconnected components likewise have independent planarity biconnected_components = find_biconnected_components_as_subgraphs(component) for bi_component in biconnected_components: planarity = __is_subgraph_planar(bi_component) if not planarity: return False return True
Python
def __is_subgraph_planar(graph): """Internal function to determine if a subgraph is planar.""" # --First pass: Determine edge and vertex counts validate Euler's Formula num_nodes = graph.num_nodes() num_edges = graph.num_edges() # --We can guarantee that if there are 4 or less nodes, then the graph is planar # --A 4-node simple graph has a maximum of 6 possible edges (K4); this will always satisfy Euler's Formula: # -- 6 <= 3(4 - 2) if num_nodes < 5: return True if num_edges > 3*(num_nodes - 2): return False # --At this point, we have no choice but to run the calculation the hard way return kocay_planarity_test(graph)
def __is_subgraph_planar(graph): """Internal function to determine if a subgraph is planar.""" # --First pass: Determine edge and vertex counts validate Euler's Formula num_nodes = graph.num_nodes() num_edges = graph.num_edges() # --We can guarantee that if there are 4 or less nodes, then the graph is planar # --A 4-node simple graph has a maximum of 6 possible edges (K4); this will always satisfy Euler's Formula: # -- 6 <= 3(4 - 2) if num_nodes < 5: return True if num_edges > 3*(num_nodes - 2): return False # --At this point, we have no choice but to run the calculation the hard way return kocay_planarity_test(graph)
Python
def add_set(self): """Adds a new set to the forest. Returns a label by which the new set can be referenced """ self.__label_counter += 1 new_label = self.__label_counter self.__forest[new_label] = -1 # All new sets have their parent set to themselves self.__set_counter += 1 return new_label
def add_set(self): """Adds a new set to the forest. Returns a label by which the new set can be referenced """ self.__label_counter += 1 new_label = self.__label_counter self.__forest[new_label] = -1 # All new sets have their parent set to themselves self.__set_counter += 1 return new_label
Python
def find(self, node_label): """Finds the set containing the node_label. Returns the set label. """ queue = [] current_node = node_label while self.__forest[current_node] >= 0: queue.append(current_node) current_node = self.__forest[current_node] root_node = current_node # Path compression for n in queue: self.__forest[n] = root_node return root_node
def find(self, node_label): """Finds the set containing the node_label. Returns the set label. """ queue = [] current_node = node_label while self.__forest[current_node] >= 0: queue.append(current_node) current_node = self.__forest[current_node] root_node = current_node # Path compression for n in queue: self.__forest[n] = root_node return root_node
Python
def union(self, label_a, label_b): """Joins two sets into a single new set. label_a, label_b can be any nodes within the sets """ # Base case to avoid work if label_a == label_b: return # Find the tree root of each node root_a = self.find(label_a) root_b = self.find(label_b) # Avoid merging a tree to itself if root_a == root_b: return self.__internal_union(root_a, root_b) self.__set_counter -= 1
def union(self, label_a, label_b): """Joins two sets into a single new set. label_a, label_b can be any nodes within the sets """ # Base case to avoid work if label_a == label_b: return # Find the tree root of each node root_a = self.find(label_a) root_b = self.find(label_b) # Avoid merging a tree to itself if root_a == root_b: return self.__internal_union(root_a, root_b) self.__set_counter -= 1
Python
def __internal_union(self, root_a, root_b): """Internal function to join two set trees specified by root_a and root_b. Assumes root_a and root_b are distinct. """ # Merge the trees, smaller to larger update_rank = False # --Determine the larger tree rank_a = self.__forest[root_a] rank_b = self.__forest[root_b] if rank_a < rank_b: larger = root_b smaller = root_a else: larger = root_a smaller = root_b if rank_a == rank_b: update_rank = True # --Make the smaller tree a subtree of the larger tree self.__forest[smaller] = larger # --Update the rank of the new tree (if necessary) if update_rank: self.__forest[larger] -= 1
def __internal_union(self, root_a, root_b): """Internal function to join two set trees specified by root_a and root_b. Assumes root_a and root_b are distinct. """ # Merge the trees, smaller to larger update_rank = False # --Determine the larger tree rank_a = self.__forest[root_a] rank_b = self.__forest[root_b] if rank_a < rank_b: larger = root_b smaller = root_a else: larger = root_a smaller = root_b if rank_a == rank_b: update_rank = True # --Make the smaller tree a subtree of the larger tree self.__forest[smaller] = larger # --Update the rank of the new tree (if necessary) if update_rank: self.__forest[larger] -= 1
Python
def matching_annotations_ids(self, factories): """Annotations that're within the timeframe that we're reindexing.""" return [ annotation.id for annotation in factories.Annotation.create_batch( 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=3) ) ]
def matching_annotations_ids(self, factories): """Annotations that're within the timeframe that we're reindexing.""" return [ annotation.id for annotation in factories.Annotation.create_batch( 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=3) ) ]
Python
def not_matching_annotations(self, factories): """Annotations that're outside the timeframe that we're reindexing.""" before_annotations = factories.Annotation.build_batch( 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=14) ) after_annotations = factories.Annotation.build_batch( 3, updated=datetime.datetime.utcnow() + datetime.timedelta(days=14) ) return before_annotations + after_annotations
def not_matching_annotations(self, factories): """Annotations that're outside the timeframe that we're reindexing.""" before_annotations = factories.Annotation.build_batch( 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=14) ) after_annotations = factories.Annotation.build_batch( 3, updated=datetime.datetime.utcnow() + datetime.timedelta(days=14) ) return before_annotations + after_annotations
Python
def handle_message(self, body, message): """ Handles a realtime message by acknowledging it and then calling the wrapped handler. """ message.ack() self.handler(body)
def handle_message(self, body, message): """ Handles a realtime message by acknowledging it and then calling the wrapped handler. """ message.ack() self.handler(body)
Python
def matching(cls, sockets, annotation): """Find sockets with matching filters for the given annotation. For this to work, the sockets must have first had `set_filter()` called on them. :param sockets: Iterable of sockets to check :param annotation: Annotation to match :return: A generator of matching socket objects """ values = { "/id": [annotation.id], "/uri": [normalize_uri(annotation.target_uri)], "/references": set(annotation.references), } for socket in sockets: # Some sockets might not yet have the filter applied (or had a non # parsable filter etc.) if not hasattr(socket, "filter_rows"): continue # Iterate over the filter_rows added by `set_filter()` for field, value in socket.filter_rows: try: if value in values[field]: yield socket break except KeyError: continue
def matching(cls, sockets, annotation): """Find sockets with matching filters for the given annotation. For this to work, the sockets must have first had `set_filter()` called on them. :param sockets: Iterable of sockets to check :param annotation: Annotation to match :return: A generator of matching socket objects """ values = { "/id": [annotation.id], "/uri": [normalize_uri(annotation.target_uri)], "/references": set(annotation.references), } for socket in sockets: # Some sockets might not yet have the filter applied (or had a non # parsable filter etc.) if not hasattr(socket, "filter_rows"): continue # Iterate over the filter_rows added by `set_filter()` for field, value in socket.filter_rows: try: if value in values[field]: yield socket break except KeyError: continue
Python
def _rows_for(cls, filter): """Convert a filter to field value pairs.""" for clause in filter["clauses"]: field = clause["field"] if field not in cls.KNOWN_FIELDS: continue values = clause["value"] # Normalise to an iterable of distinct values values = set(values) if isinstance(values, list) else [values] for value in values: if field == "/uri": value = normalize_uri(value) yield field, value
def _rows_for(cls, filter): """Convert a filter to field value pairs.""" for clause in filter["clauses"]: field = clause["field"] if field not in cls.KNOWN_FIELDS: continue values = clause["value"] # Normalise to an iterable of distinct values values = set(values) if isinstance(values, list) else [values] for value in values: if field == "/uri": value = normalize_uri(value) yield field, value
Python
def start(event): """ Start some greenlets to process the incoming data from the message queue. This subscriber is called when the application is booted, and kicks off greenlets running `process_queue` for each message queue we subscribe to. The function does not block. """ settings = event.app.registry.settings greenlets = [ # Start greenlets to process messages from RabbitMQ gevent.spawn(messages.process_messages, settings, ANNOTATION_TOPIC, WORK_QUEUE), gevent.spawn(messages.process_messages, settings, USER_TOPIC, WORK_QUEUE), # And one to process the queued work gevent.spawn(process_work_queue, settings, WORK_QUEUE), ] # Start a "greenlet of last resort" to monitor the worker greenlets and # bail if any unexpected errors occur. gevent.spawn(supervise, greenlets)
def start(event): """ Start some greenlets to process the incoming data from the message queue. This subscriber is called when the application is booted, and kicks off greenlets running `process_queue` for each message queue we subscribe to. The function does not block. """ settings = event.app.registry.settings greenlets = [ # Start greenlets to process messages from RabbitMQ gevent.spawn(messages.process_messages, settings, ANNOTATION_TOPIC, WORK_QUEUE), gevent.spawn(messages.process_messages, settings, USER_TOPIC, WORK_QUEUE), # And one to process the queued work gevent.spawn(process_work_queue, settings, WORK_QUEUE), ] # Start a "greenlet of last resort" to monitor the worker greenlets and # bail if any unexpected errors occur. gevent.spawn(supervise, greenlets)
Python
def process_work_queue(settings, queue, session_factory=None): """ Process each message from the queue in turn, handling exceptions. This is the core of the streamer: we pull messages off the work queue, dispatching them as appropriate. The handling of each message is wrapped in code that ensures the database session is appropriately committed and closed between messages. """ if session_factory is None: session_factory = _get_session session = session_factory(settings) topic_handlers = { ANNOTATION_TOPIC: messages.handle_annotation_event, USER_TOPIC: messages.handle_user_event, } for msg in queue: try: # All access to the database in the streamer is currently # read-only, so enforce that: session.execute( "SET TRANSACTION " "ISOLATION LEVEL SERIALIZABLE " "READ ONLY " "DEFERRABLE" ) if isinstance(msg, messages.Message): messages.handle_message(msg, settings, session, topic_handlers) elif isinstance(msg, websocket.Message): websocket.handle_message(msg, session) else: raise UnknownMessageType(repr(msg)) except (KeyboardInterrupt, SystemExit): session.rollback() raise except Exception as exc: log.warning("Caught exception handling streamer message:", exc_info=exc) session.rollback() else: session.commit() finally: session.close()
def process_work_queue(settings, queue, session_factory=None): """ Process each message from the queue in turn, handling exceptions. This is the core of the streamer: we pull messages off the work queue, dispatching them as appropriate. The handling of each message is wrapped in code that ensures the database session is appropriately committed and closed between messages. """ if session_factory is None: session_factory = _get_session session = session_factory(settings) topic_handlers = { ANNOTATION_TOPIC: messages.handle_annotation_event, USER_TOPIC: messages.handle_user_event, } for msg in queue: try: # All access to the database in the streamer is currently # read-only, so enforce that: session.execute( "SET TRANSACTION " "ISOLATION LEVEL SERIALIZABLE " "READ ONLY " "DEFERRABLE" ) if isinstance(msg, messages.Message): messages.handle_message(msg, settings, session, topic_handlers) elif isinstance(msg, websocket.Message): websocket.handle_message(msg, session) else: raise UnknownMessageType(repr(msg)) except (KeyboardInterrupt, SystemExit): session.rollback() raise except Exception as exc: log.warning("Caught exception handling streamer message:", exc_info=exc) session.rollback() else: session.commit() finally: session.close()
Python
def reindex_annotations_in_date_range(start_date, end_date, max_annotations=250000): """Re-index annotations from Postgres to Elasticsearch in a date range. :param start_date: Begin at this time (greater or equal) :param end_date: End at this time (less than or equal) :param max_annotations: Maximum number of items to process overall """ log.info(f"Re-indexing from {start_date} to {end_date}...") indexer = BatchIndexer(celery.request.db, celery.request.es, celery.request) errored = indexer.index( annotation.id for annotation in celery.request.db.query(Annotation.id) .filter(Annotation.updated >= start_date) .filter(Annotation.updated <= end_date) .limit(max_annotations) ) if errored: log.warning("Failed to re-index annotations into ES6 %s", errored) log.info( "Re-index from %s to %s complete.", start_date, end_date, )
def reindex_annotations_in_date_range(start_date, end_date, max_annotations=250000): """Re-index annotations from Postgres to Elasticsearch in a date range. :param start_date: Begin at this time (greater or equal) :param end_date: End at this time (less than or equal) :param max_annotations: Maximum number of items to process overall """ log.info(f"Re-indexing from {start_date} to {end_date}...") indexer = BatchIndexer(celery.request.db, celery.request.es, celery.request) errored = indexer.index( annotation.id for annotation in celery.request.db.query(Annotation.id) .filter(Annotation.updated >= start_date) .filter(Annotation.updated <= end_date) .limit(max_annotations) ) if errored: log.warning("Failed to re-index annotations into ES6 %s", errored) log.info( "Re-index from %s to %s complete.", start_date, end_date, )
Python
async def raid( self, ctx: Context, operand: str = "" ): """ Allows an admin user to lock down the server in case of a raid. This command toggles invite link generation for @everyone and revokes all existing invite links. """ everyone = ctx.channel.guild.default_role perms = everyone.permissions enabled = not perms.create_instant_invite logs_channel = self.bot.get_channel(LOGGING_CHANNEL_ID) operand = operand.lower() boolonoff = ("OFF", "ON") action = True embed = None if not operand: # status query await ctx.send(f"Raid protection currently {boolonoff[enabled]}. Use `:raid [on/off]` to toggle.") action = False elif operand in ("on", "yes") and not enabled: # need to turn it on enabled = True perms.update(create_instant_invite=False) embed = Embed( color=Colour.blue(), title="Raid Protection ON.", description=("Raid protection now ON - All invite links were" " deleted and members may not create new ones") ) for invite in await ctx.channel.guild.invites(): # delete links await invite.delete() elif operand in ("off", "no") and enabled: enabled = False perms.update(create_instant_invite=True) embed = Embed( color=Colour.blue(), title="Raid Protection OFF.", description=("Raid protection now OFF - Members can now create" " new invite links") ) else: # no changes await ctx.send(f"Raid protection {boolonoff[enabled]}, nothing was changed.") action = False if action: # if we toggled it msg = f"{ctx.author.name} toggled raid protection {boolonoff[enabled]}." await everyone.edit(reason=msg, permissions=perms) # make the perm change await ctx.send(msg) # direct response to invocation embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) await logs_channel.send(embed=embed)
async def raid( self, ctx: Context, operand: str = "" ): """ Allows an admin user to lock down the server in case of a raid. This command toggles invite link generation for @everyone and revokes all existing invite links. """ everyone = ctx.channel.guild.default_role perms = everyone.permissions enabled = not perms.create_instant_invite logs_channel = self.bot.get_channel(LOGGING_CHANNEL_ID) operand = operand.lower() boolonoff = ("OFF", "ON") action = True embed = None if not operand: # status query await ctx.send(f"Raid protection currently {boolonoff[enabled]}. Use `:raid [on/off]` to toggle.") action = False elif operand in ("on", "yes") and not enabled: # need to turn it on enabled = True perms.update(create_instant_invite=False) embed = Embed( color=Colour.blue(), title="Raid Protection ON.", description=("Raid protection now ON - All invite links were" " deleted and members may not create new ones") ) for invite in await ctx.channel.guild.invites(): # delete links await invite.delete() elif operand in ("off", "no") and enabled: enabled = False perms.update(create_instant_invite=True) embed = Embed( color=Colour.blue(), title="Raid Protection OFF.", description=("Raid protection now OFF - Members can now create" " new invite links") ) else: # no changes await ctx.send(f"Raid protection {boolonoff[enabled]}, nothing was changed.") action = False if action: # if we toggled it msg = f"{ctx.author.name} toggled raid protection {boolonoff[enabled]}." await everyone.edit(reason=msg, permissions=perms) # make the perm change await ctx.send(msg) # direct response to invocation embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) await logs_channel.send(embed=embed)
Python
async def bbcnews(self, ctx: Context): """ Returns a link to BBC News. """ await ctx.send("https://www.bbc.co.uk/iplayer/live/bbcnews")
async def bbcnews(self, ctx: Context): """ Returns a link to BBC News. """ await ctx.send("https://www.bbc.co.uk/iplayer/live/bbcnews")
Python
async def skynews(self, ctx: Context): """ Returns a link to Sky News. """ await ctx.send("https://www.youtube.com/watch?v=9Auq9mYxFEE")
async def skynews(self, ctx: Context): """ Returns a link to Sky News. """ await ctx.send("https://www.youtube.com/watch?v=9Auq9mYxFEE")
Python
async def tos(self, ctx: Context): """ Returns a link to discord's terms of service. """ await ctx.send("https://www.discord.com/terms")
async def tos(self, ctx: Context): """ Returns a link to discord's terms of service. """ await ctx.send("https://www.discord.com/terms")
Python
def encrypt(public_key: str, secret_value: str) -> str: """ Encrypts a Unicode string using the public key, returns base64 of the publickey """ # noqa: E501 public_key = public.PublicKey( public_key.encode("utf-8"), encoding.Base64Encoder()) sealed_box = public.SealedBox(public_key) encrypted = sealed_box.encrypt(secret_value.encode("utf-8")) return b64encode(encrypted).decode("utf-8")
def encrypt(public_key: str, secret_value: str) -> str: """ Encrypts a Unicode string using the public key, returns base64 of the publickey """ # noqa: E501 public_key = public.PublicKey( public_key.encode("utf-8"), encoding.Base64Encoder()) sealed_box = public.SealedBox(public_key) encrypted = sealed_box.encrypt(secret_value.encode("utf-8")) return b64encode(encrypted).decode("utf-8")
Python
def print_response(response): """Prints a human-readable response""" res = {} if (response.text): try: res['body'] = response.json() except: # noqa: E722 res['body'] = response.text res['status_code'] = response.status_code print(json.dumps(res, indent=4, sort_keys=True))
def print_response(response): """Prints a human-readable response""" res = {} if (response.text): try: res['body'] = response.json() except: # noqa: E722 res['body'] = response.text res['status_code'] = response.status_code print(json.dumps(res, indent=4, sort_keys=True))
Python
def global_avg_pooling(x): """ Incoming Tensor shape must be 4-D """ gap = tf.reduce_mean(x, axis=[1, 2]) return gap
def global_avg_pooling(x): """ Incoming Tensor shape must be 4-D """ gap = tf.reduce_mean(x, axis=[1, 2]) return gap
Python
def login(self, email, password): '''Authenticate the user to use the History Playground. Stores the authentication token in the Playground object. @param self: Playground object @param email: Email address used to register on the History Playground @param password: Password associated with email address when registering ''' session = requests.Session() payload = {'email':email,'password':password} data = session.post(self._base_url + 'auth', data = payload).json() if 'token' in data: self.__auth_token = data['token'] else: self.__auth_token = None print('Failed to login.')
def login(self, email, password): '''Authenticate the user to use the History Playground. Stores the authentication token in the Playground object. @param self: Playground object @param email: Email address used to register on the History Playground @param password: Password associated with email address when registering ''' session = requests.Session() payload = {'email':email,'password':password} data = session.post(self._base_url + 'auth', data = payload).json() if 'token' in data: self.__auth_token = data['token'] else: self.__auth_token = None print('Failed to login.')