language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def tick(self, duration_ms): """Tick the simulator with the given duration :param duration_ms: The duration to step the sim """ tick = SimulatorTick() tick.milliseconds = duration_ms self.sim_tick_sender.send(tick)
def tick(self, duration_ms): """Tick the simulator with the given duration :param duration_ms: The duration to step the sim """ tick = SimulatorTick() tick.milliseconds = duration_ms self.sim_tick_sender.send(tick)
Python
def stop(): """Stop all listeners and senders. """ for unix_socket in [ self.sim_tick_sender, self.world_state_sender, self.blue_world_sender, self.yellow_world_sender, self.blue_primitive_set_sender, self.yellow_primitive_set_sender, self.ssl_wrapper_listener, self.blue_robot_status_listener, self.yellow_robot_status_listener, ]: unix_socket.force_stop()
def stop(): """Stop all listeners and senders. """ for unix_socket in [ self.sim_tick_sender, self.world_state_sender, self.blue_world_sender, self.yellow_world_sender, self.blue_primitive_set_sender, self.yellow_primitive_set_sender, self.ssl_wrapper_listener, self.blue_robot_status_listener, self.yellow_robot_status_listener, ]: unix_socket.force_stop()
Python
def plot_zones(self, zones: list): """ Plots pass generator zones from the Python bindings as per software/python_bindings/pass_generator.cpp. :param zones: A list of dicts describing rectangular passing zones. """ zones_dict = dict(center_xs=[], center_ys=[], widths=[], heights=[]) for zone in zones: zones_dict["center_xs"].append(zone.centre().x()) zones_dict["center_ys"].append(zone.centre().y()) zones_dict["widths"].append(zone.xLength()) zones_dict["heights"].append(zone.yLength()) self.zones_source.data.update(zones_dict)
def plot_zones(self, zones: list): """ Plots pass generator zones from the Python bindings as per software/python_bindings/pass_generator.cpp. :param zones: A list of dicts describing rectangular passing zones. """ zones_dict = dict(center_xs=[], center_ys=[], widths=[], heights=[]) for zone in zones: zones_dict["center_xs"].append(zone.centre().x()) zones_dict["center_ys"].append(zone.centre().y()) zones_dict["widths"].append(zone.xLength()) zones_dict["heights"].append(zone.yLength()) self.zones_source.data.update(zones_dict)
Python
def plot_passes(self, passes: list): """ Plots dicts describing passes from Python bindings as per software/python_bindings/pass_utilities.cpp. :param passes: A list of dicts describing passes and their associated ratings. """ passes_dict = dict( receiver_xs=[], receiver_ys=[], pass_line_xs=[], pass_line_ys=[], pass_rating=[], line_width=[], ) for pass_and_rating_dict in passes: the_pass = pass_and_rating_dict["pass"] rating = pass_and_rating_dict["rating"] passer_point = the_pass["passer_point"] receiver_point = the_pass["receiver_point"] passes_dict["receiver_xs"].append(receiver_point.x()) passes_dict["receiver_ys"].append(receiver_point.y()) passes_dict["pass_line_xs"].append([passer_point.x(), receiver_point.x()]) passes_dict["pass_line_ys"].append([passer_point.y(), receiver_point.y()]) passes_dict["pass_rating"].append(rating) # line width is 1 + pass_rating passes_dict["line_width"].append(1 + rating) self.passes_source.data.update(passes_dict)
def plot_passes(self, passes: list): """ Plots dicts describing passes from Python bindings as per software/python_bindings/pass_utilities.cpp. :param passes: A list of dicts describing passes and their associated ratings. """ passes_dict = dict( receiver_xs=[], receiver_ys=[], pass_line_xs=[], pass_line_ys=[], pass_rating=[], line_width=[], ) for pass_and_rating_dict in passes: the_pass = pass_and_rating_dict["pass"] rating = pass_and_rating_dict["rating"] passer_point = the_pass["passer_point"] receiver_point = the_pass["receiver_point"] passes_dict["receiver_xs"].append(receiver_point.x()) passes_dict["receiver_ys"].append(receiver_point.y()) passes_dict["pass_line_xs"].append([passer_point.x(), receiver_point.x()]) passes_dict["pass_line_ys"].append([passer_point.y(), receiver_point.y()]) passes_dict["pass_rating"].append(rating) # line width is 1 + pass_rating passes_dict["line_width"].append(1 + rating) self.passes_source.data.update(passes_dict)
Python
def toggle_pause(event): """Pause/Play animation when clicked""" nonlocal is_paused if is_paused: robot_anim.event_source.start() else: robot_anim.event_source.stop() is_paused = not is_paused
def toggle_pause(event): """Pause/Play animation when clicked""" nonlocal is_paused if is_paused: robot_anim.event_source.start() else: robot_anim.event_source.stop() is_paused = not is_paused
Python
def on_plot_hover(event): """Highlight robot path when hovering over it""" # Iterating over each data member plotted for robot_id, line in enumerate(line_list): # Searching which data member corresponds to current mouse position if line.contains(event)[0]: line.set_alpha(1.0) robot_id_text.set_text(f"Robot {robot_id}") else: line.set_alpha(default_line_alpha)
def on_plot_hover(event): """Highlight robot path when hovering over it""" # Iterating over each data member plotted for robot_id, line in enumerate(line_list): # Searching which data member corresponds to current mouse position if line.contains(event)[0]: line.set_alpha(1.0) robot_id_text.set_text(f"Robot {robot_id}") else: line.set_alpha(default_line_alpha)
Python
def _get_item_at_idx(self, idx: int) -> MsgClass: """ Returns the idx'th message out of all the messages in the data directory. :param idx: index of the message :return: the message at the given index """ if idx in self.cached_unpacked_msgs: return self.cached_unpacked_msgs[idx] if idx >= self.chunk_start_idxs[-1] + len(self.repeated_any_msgs[-1].messages): raise IndexError( "Tried to access msg idx {} when we only have {} msgs!".format( idx, self.chunk_start_idxs[-1] + len(self.repeated_any_msgs[-1].messages), ) ) item_chunk_idx = len(self.chunk_start_idxs) - 1 for chunk_idx in range(len(self.chunk_start_idxs) - 1): if self.chunk_start_idxs[chunk_idx + 1] > idx: item_chunk_idx = chunk_idx break msg_idx = idx - self.chunk_start_idxs[item_chunk_idx] msg = self.msg_class() self.repeated_any_msgs[item_chunk_idx].messages[msg_idx].Unpack(msg) self.cached_unpacked_msgs[idx] = msg return msg
def _get_item_at_idx(self, idx: int) -> MsgClass: """ Returns the idx'th message out of all the messages in the data directory. :param idx: index of the message :return: the message at the given index """ if idx in self.cached_unpacked_msgs: return self.cached_unpacked_msgs[idx] if idx >= self.chunk_start_idxs[-1] + len(self.repeated_any_msgs[-1].messages): raise IndexError( "Tried to access msg idx {} when we only have {} msgs!".format( idx, self.chunk_start_idxs[-1] + len(self.repeated_any_msgs[-1].messages), ) ) item_chunk_idx = len(self.chunk_start_idxs) - 1 for chunk_idx in range(len(self.chunk_start_idxs) - 1): if self.chunk_start_idxs[chunk_idx + 1] > idx: item_chunk_idx = chunk_idx break msg_idx = idx - self.chunk_start_idxs[item_chunk_idx] msg = self.msg_class() self.repeated_any_msgs[item_chunk_idx].messages[msg_idx].Unpack(msg) self.cached_unpacked_msgs[idx] = msg return msg
Python
def update_values(self, redis_dict): """ Sync values with those from redis """ if not self.edit_mode: for i in range(self.len): if self.actions[i]["redis key"] == None: continue self.actions[i]["value"] = redis_dict[self.actions[i]["redis key"]] self.battery_voltage = redis_dict["battery voltage"] self.cap_voltage = redis_dict["cap voltage"] self.packet_loss = redis_dict["packet loss"]
def update_values(self, redis_dict): """ Sync values with those from redis """ if not self.edit_mode: for i in range(self.len): if self.actions[i]["redis key"] == None: continue self.actions[i]["value"] = redis_dict[self.actions[i]["redis key"]] self.battery_voltage = redis_dict["battery voltage"] self.cap_voltage = redis_dict["cap voltage"] self.packet_loss = redis_dict["packet loss"]
Python
def keyPressEvent(self, event): """Detect when a key has been pressed (override) Note: function name format is different due to overriding the Qt function :param event: The event """ if event.key() == Qt.Key_R: # TODO (#2410) enter function to rotate the robot print("pressed R") self.pressed_R = True elif event.key() == Qt.Key_Control: # TODO (#2410) enter function to move the ball print("pressed CTRL") elif event.key() == Qt.Key_M: # TODO (#2410) enter function to move the robot print("pressed M") self.pressed_M = True
def keyPressEvent(self, event): """Detect when a key has been pressed (override) Note: function name format is different due to overriding the Qt function :param event: The event """ if event.key() == Qt.Key_R: # TODO (#2410) enter function to rotate the robot print("pressed R") self.pressed_R = True elif event.key() == Qt.Key_Control: # TODO (#2410) enter function to move the ball print("pressed CTRL") elif event.key() == Qt.Key_M: # TODO (#2410) enter function to move the robot print("pressed M") self.pressed_M = True
Python
def keyReleaseEvent(self, event): """Detect when a key has been released (override) :param event: The event """ if event.key() == Qt.Key_R: # TODO (#2410) exit function to rotate the robot print("released R") self.pressed_R = False elif event.key() == Qt.Key_Control: # TODO (#2410) exit function to move the ball self.pressed_CTRL = False print("released CTRL") elif event.key() == Qt.Key_M: # TODO (#2410) exit function to move the robot print("released M") self.pressed_M = False
def keyReleaseEvent(self, event): """Detect when a key has been released (override) :param event: The event """ if event.key() == Qt.Key_R: # TODO (#2410) exit function to rotate the robot print("released R") self.pressed_R = False elif event.key() == Qt.Key_Control: # TODO (#2410) exit function to move the ball self.pressed_CTRL = False print("released CTRL") elif event.key() == Qt.Key_M: # TODO (#2410) exit function to move the robot print("released M") self.pressed_M = False
Python
def hoverMoveEvent(self, event): """Detect where the mouse is hovering on the field (override) NOTE: Currently not used but may be useful in next part of (#2410) :param event: The event """ self.mouse_hover_pos = [event.pos().x(), event.pos().y()]
def hoverMoveEvent(self, event): """Detect where the mouse is hovering on the field (override) NOTE: Currently not used but may be useful in next part of (#2410) :param event: The event """ self.mouse_hover_pos = [event.pos().x(), event.pos().y()]
Python
def mouseClickEvent(self, event): """Detect whether the mouse was clicked anywhere on the field (override) :param event: The event """ # TODO (#2410) implement robot and ball interactivity through simulator, based on mouse and keyboard events # print the position of the mouse click print("x: " + str(event.pos().x() / MM_PER_M)) print("y: " + str(event.pos().y() / MM_PER_M)) self.mouse_clicked = True self.mouse_click_pos = [event.pos().x(), event.pos().y()] # determine whether a robot was clicked self.identify_robots(event.pos().x(), event.pos().y())
def mouseClickEvent(self, event): """Detect whether the mouse was clicked anywhere on the field (override) :param event: The event """ # TODO (#2410) implement robot and ball interactivity through simulator, based on mouse and keyboard events # print the position of the mouse click print("x: " + str(event.pos().x() / MM_PER_M)) print("y: " + str(event.pos().y() / MM_PER_M)) self.mouse_clicked = True self.mouse_click_pos = [event.pos().x(), event.pos().y()] # determine whether a robot was clicked self.identify_robots(event.pos().x(), event.pos().y())
Python
def identify_robots(self, mouse_x, mouse_y): """Identify which robot was clicked on the field :param mouse_x: The x position of the mouse click :param mouse_y: The y position of the mouse click """ self.identify_robot( mouse_x, mouse_y, self.cached_world.friendly_team.team_robots, "Friendly: " ) self.identify_robot( mouse_x, mouse_y, self.cached_world.enemy_team.team_robots, "Enemy: " )
def identify_robots(self, mouse_x, mouse_y): """Identify which robot was clicked on the field :param mouse_x: The x position of the mouse click :param mouse_y: The y position of the mouse click """ self.identify_robot( mouse_x, mouse_y, self.cached_world.friendly_team.team_robots, "Friendly: " ) self.identify_robot( mouse_x, mouse_y, self.cached_world.enemy_team.team_robots, "Enemy: " )
Python
def identify_robot(self, mouse_x, mouse_y, team, side): """Identify which robot was clicked on the team :param mouse_x: The x position of the mouse click :param mouse_y: The y position of the mouse click :param team: The team of robots to iterate over :param side: The label of the team, "Friendly" or "Enemy" """ for robot_ in team: pos_x = robot_.current_state.global_position.x_meters pos_y = robot_.current_state.global_position.y_meters if ( math.sqrt( (pos_x - mouse_x / MM_PER_M) ** 2 + (pos_y - mouse_y / MM_PER_M) ** 2 ) <= ROBOT_MAX_RADIUS / MM_PER_M ): print(side) print(robot_.id)
def identify_robot(self, mouse_x, mouse_y, team, side): """Identify which robot was clicked on the team :param mouse_x: The x position of the mouse click :param mouse_y: The y position of the mouse click :param team: The team of robots to iterate over :param side: The label of the team, "Friendly" or "Enemy" """ for robot_ in team: pos_x = robot_.current_state.global_position.x_meters pos_y = robot_.current_state.global_position.y_meters if ( math.sqrt( (pos_x - mouse_x / MM_PER_M) ** 2 + (pos_y - mouse_y / MM_PER_M) ** 2 ) <= ROBOT_MAX_RADIUS / MM_PER_M ): print(side) print(robot_.id)
Python
def draw_mouse_click_loc(self, painter): """Draw a circle indicating where the mouse was clicked on the field :param painter: The painter """ painter.setPen(pg.mkPen("g", width=2)) painter.drawEllipse( self.createCircle( self.mouse_click_pos[0], self.mouse_click_pos[1], BALL_RADIUS * 3, ) ) self.mouse_clicked = False
def draw_mouse_click_loc(self, painter): """Draw a circle indicating where the mouse was clicked on the field :param painter: The painter """ painter.setPen(pg.mkPen("g", width=2)) painter.drawEllipse( self.createCircle( self.mouse_click_pos[0], self.mouse_click_pos[1], BALL_RADIUS * 3, ) ) self.mouse_clicked = False
Python
def draw_team(self, painter, color, team: Team, robot_id_map): """Draw the team with robot IDs :param painter: The painter :param color: The color of the robots :param team: The team proto to draw :param robot_id_map: map of robot_id -> text_item for the team being drawn """ convert_degree = -16 for robot in team.team_robots: if robot.id not in robot_id_map: robot_id_font = painter.font() robot_id_font.setPointSize(ROBOT_MAX_RADIUS / 7) # setting a black background to keep ID visible over yellow robot robot_id_text = pg.TextItem( html='<span style="color: #FFF; background-color: #000">' + str(robot.id) + "</span>" ) robot_id_map[robot.id] = robot_id_text robot_id_text.setParentItem(self) robot_id_map[robot.id].setPos( (robot.current_state.global_position.x_meters * MM_PER_M) - ROBOT_MAX_RADIUS, robot.current_state.global_position.y_meters * MM_PER_M, ) painter.setPen(pg.mkPen(color)) painter.setBrush(pg.mkBrush(color)) painter.drawChord( self.createCircle( robot.current_state.global_position.x_meters * MM_PER_M, robot.current_state.global_position.y_meters * MM_PER_M, ROBOT_MAX_RADIUS, ), int((math.degrees(robot.current_state.global_orientation.radians) + 45)) * convert_degree, 270 * convert_degree, )
def draw_team(self, painter, color, team: Team, robot_id_map): """Draw the team with robot IDs :param painter: The painter :param color: The color of the robots :param team: The team proto to draw :param robot_id_map: map of robot_id -> text_item for the team being drawn """ convert_degree = -16 for robot in team.team_robots: if robot.id not in robot_id_map: robot_id_font = painter.font() robot_id_font.setPointSize(ROBOT_MAX_RADIUS / 7) # setting a black background to keep ID visible over yellow robot robot_id_text = pg.TextItem( html='<span style="color: #FFF; background-color: #000">' + str(robot.id) + "</span>" ) robot_id_map[robot.id] = robot_id_text robot_id_text.setParentItem(self) robot_id_map[robot.id].setPos( (robot.current_state.global_position.x_meters * MM_PER_M) - ROBOT_MAX_RADIUS, robot.current_state.global_position.y_meters * MM_PER_M, ) painter.setPen(pg.mkPen(color)) painter.setBrush(pg.mkBrush(color)) painter.drawChord( self.createCircle( robot.current_state.global_position.x_meters * MM_PER_M, robot.current_state.global_position.y_meters * MM_PER_M, ROBOT_MAX_RADIUS, ), int((math.degrees(robot.current_state.global_orientation.radians) + 45)) * convert_degree, 270 * convert_degree, )
Python
def refresh(self): """Update the log widget with another log message """ try: log = self.log_buffer.get_nowait() except queue.Empty as empty: return # Checks whether this type of log is enabled from checkboxes if ( ( log.log_level == LogLevel.DEBUG and self.checkbox_widget.debug_checkbox.isChecked() ) or ( log.log_level == LogLevel.INFO and self.checkbox_widget.info_checkbox.isChecked() ) or ( log.log_level == LogLevel.WARNING and self.checkbox_widget.warning_checkbox.isChecked() ) or ( log.log_level == LogLevel.FATAL and self.checkbox_widget.fatal_checkbox.isChecked() ) ): log_str = "{} {} [{}->{}] {}\n".format( log.created_timestamp.epoch_timestamp_seconds, log.log_level, log.file_name, log.line_number, log.log_msg, ) self.write(log_str) else: return
def refresh(self): """Update the log widget with another log message """ try: log = self.log_buffer.get_nowait() except queue.Empty as empty: return # Checks whether this type of log is enabled from checkboxes if ( ( log.log_level == LogLevel.DEBUG and self.checkbox_widget.debug_checkbox.isChecked() ) or ( log.log_level == LogLevel.INFO and self.checkbox_widget.info_checkbox.isChecked() ) or ( log.log_level == LogLevel.WARNING and self.checkbox_widget.warning_checkbox.isChecked() ) or ( log.log_level == LogLevel.FATAL and self.checkbox_widget.fatal_checkbox.isChecked() ) ): log_str = "{} {} [{}->{}] {}\n".format( log.created_timestamp.epoch_timestamp_seconds, log.log_level, log.file_name, log.line_number, log.log_msg, ) self.write(log_str) else: return
Python
def boundingRect(self): """boundingRect _must_ indicate the entire area that will be drawn on or else we will get artifacts and possibly crashing. :return: Rectangle that covers the entire field """ # TODO (#2398) this rectangle makes no sense, it should be # top left x, top left y, width, height. But for some reason # that doesn't play nicely with the coordinate system. # # Instead it is bottom left x, bottom left y, width height. return QtCore.QRectF(-9000, -6000, 18000, 12000)
def boundingRect(self): """boundingRect _must_ indicate the entire area that will be drawn on or else we will get artifacts and possibly crashing. :return: Rectangle that covers the entire field """ # TODO (#2398) this rectangle makes no sense, it should be # top left x, top left y, width, height. But for some reason # that doesn't play nicely with the coordinate system. # # Instead it is bottom left x, bottom left y, width height. return QtCore.QRectF(-9000, -6000, 18000, 12000)
Python
def createCircle(self, x, y, radius): """Creates a Rectangle that bounds the circle :param x: The x position :param y: The y position :param radius: The radius of the circle :returns: bounding rectangle """ # TODO (#2398) fix this to be top left coordinates, width, height return QtCore.QRectF( int(x - radius), int(y - radius), int(radius * 2), int(radius * 2) )
def createCircle(self, x, y, radius): """Creates a Rectangle that bounds the circle :param x: The x position :param y: The y position :param radius: The radius of the circle :returns: bounding rectangle """ # TODO (#2398) fix this to be top left coordinates, width, height return QtCore.QRectF( int(x - radius), int(y - radius), int(radius * 2), int(radius * 2) )
Python
def register_refresh_function(self, refresh_func): """Register the refresh functions to run at the refresh_interval_ms passed into thunderscope. :param refresh_func: The function to call at refresh_interval_ms """ self.refresh_functions.append(refresh_func)
def register_refresh_function(self, refresh_func): """Register the refresh functions to run at the refresh_interval_ms passed into thunderscope. :param refresh_func: The function to call at refresh_interval_ms """ self.refresh_functions.append(refresh_func)
Python
def configure_default_layout(self): """Configure the default layout for thunderscope """ # Configure Docks field_dock = self.setup_field_widget() log_dock = self.setup_log_widget() performance_dock = self.setup_performance_plot() play_info_dock = self.setup_play_info() self.dock_area.addDock(field_dock, "left") self.dock_area.addDock(log_dock, "bottom", field_dock) self.dock_area.addDock(performance_dock, "right", log_dock) self.dock_area.addDock(play_info_dock, "right", performance_dock)
def configure_default_layout(self): """Configure the default layout for thunderscope """ # Configure Docks field_dock = self.setup_field_widget() log_dock = self.setup_log_widget() performance_dock = self.setup_performance_plot() play_info_dock = self.setup_play_info() self.dock_area.addDock(field_dock, "left") self.dock_area.addDock(log_dock, "bottom", field_dock) self.dock_area.addDock(performance_dock, "right", log_dock) self.dock_area.addDock(play_info_dock, "right", performance_dock)
Python
def import_all_classes(package, input_globals): """Import all classes from the given package, specifically useful for autogenerated protobuf modules. :param package: The package to import from :param input_globals: globals() from where this is called """ results = get_top_level_modules(package) for result in results: module = importlib.import_module(result) # is there an __all__? if so respect it if "__all__" in module.__dict__: names = module.__dict__["__all__"] else: # otherwise we import all names that don't begin with _ names = [x for x in module.__dict__ if not x.startswith("_")] # update the globals to contain the class input_globals.update({k: getattr(module, k) for k in names})
def import_all_classes(package, input_globals): """Import all classes from the given package, specifically useful for autogenerated protobuf modules. :param package: The package to import from :param input_globals: globals() from where this is called """ results = get_top_level_modules(package) for result in results: module = importlib.import_module(result) # is there an __all__? if so respect it if "__all__" in module.__dict__: names = module.__dict__["__all__"] else: # otherwise we import all names that don't begin with _ names = [x for x in module.__dict__ if not x.startswith("_")] # update the globals to contain the class input_globals.update({k: getattr(module, k) for k in names})
Python
def create_header_config_list_from_metadata( top_level_config_name: str, config_metadata: dict ) -> List[CppHeaderConfig]: """Takes the config metadata loaded by config_yaml_loader, and converts it to a list of CppHeaderConfig objects; this includes setting the dependency graphs needed for the configs. :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata :return: list of CppHeaderConfig objects """ cpp_configs_dict = {} dependency_graph = nx.DiGraph() top_level_config = CppHeaderConfig(top_level_config_name, True) # first pass to construct all CppHeaderConfig objects for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = CppHeaderConfig(config_name) top_level_config.include_config(config) if PARAMETER_KEY in metadata: for parameter in metadata[PARAMETER_KEY]: param_metadata = list(parameter.values())[0] param_type = list(parameter.keys())[0] cpp_param = CppParameter(param_type, param_metadata) config.add_parameter(cpp_param) cpp_configs_dict[config_name] = config dependency_graph.add_node(config_name, config=config) # second pass to create dependency graph for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = cpp_configs_dict[config_name] if INCLUDE_KEY in metadata: for included_yaml in metadata[INCLUDE_KEY]: included_config_name = to_pascal_case(included_yaml.split(".")[0]) config.include_config(cpp_configs_dict[included_config_name]) # add an edge from config node to included config node dependency_graph.add_edge(config_name, included_config_name) # for each node, create a subgraph of relevant dependencies # Note: This can be optimized by doing traversal from each source, # and creating subgraphs for all its descendants during the same traversal for node in dependency_graph.nodes: # find the subgraph of the dependency graph relevant to the current node dependency_graph.nodes[node][ "config" ].dependency_graph = dependency_graph.subgraph( nx.algorithms.dag.descendants(dependency_graph, node) ) top_level_config.dependency_graph = dependency_graph cpp_configs = [ dependency_graph.nodes[node]["config"] for node in list(reversed(list(nx.topological_sort(dependency_graph)))) ] cpp_configs.append(top_level_config) return cpp_configs
def create_header_config_list_from_metadata( top_level_config_name: str, config_metadata: dict ) -> List[CppHeaderConfig]: """Takes the config metadata loaded by config_yaml_loader, and converts it to a list of CppHeaderConfig objects; this includes setting the dependency graphs needed for the configs. :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata :return: list of CppHeaderConfig objects """ cpp_configs_dict = {} dependency_graph = nx.DiGraph() top_level_config = CppHeaderConfig(top_level_config_name, True) # first pass to construct all CppHeaderConfig objects for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = CppHeaderConfig(config_name) top_level_config.include_config(config) if PARAMETER_KEY in metadata: for parameter in metadata[PARAMETER_KEY]: param_metadata = list(parameter.values())[0] param_type = list(parameter.keys())[0] cpp_param = CppParameter(param_type, param_metadata) config.add_parameter(cpp_param) cpp_configs_dict[config_name] = config dependency_graph.add_node(config_name, config=config) # second pass to create dependency graph for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = cpp_configs_dict[config_name] if INCLUDE_KEY in metadata: for included_yaml in metadata[INCLUDE_KEY]: included_config_name = to_pascal_case(included_yaml.split(".")[0]) config.include_config(cpp_configs_dict[included_config_name]) # add an edge from config node to included config node dependency_graph.add_edge(config_name, included_config_name) # for each node, create a subgraph of relevant dependencies # Note: This can be optimized by doing traversal from each source, # and creating subgraphs for all its descendants during the same traversal for node in dependency_graph.nodes: # find the subgraph of the dependency graph relevant to the current node dependency_graph.nodes[node][ "config" ].dependency_graph = dependency_graph.subgraph( nx.algorithms.dag.descendants(dependency_graph, node) ) top_level_config.dependency_graph = dependency_graph cpp_configs = [ dependency_graph.nodes[node]["config"] for node in list(reversed(list(nx.topological_sort(dependency_graph)))) ] cpp_configs.append(top_level_config) return cpp_configs
Python
def create_source_config_list_from_metadata( top_level_config_name: str, config_metadata: dict ) -> List[CppSourceConfig]: """Takes the config metadata loaded by config_yaml_loader, and converts it to a list of CppSourceConfig objects; this includes setting the dependency graphs needed for the configs. :param top_leve_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata :return: list of CppSourceConfig objects """ cpp_configs_dict = {} dependency_graph = nx.DiGraph() top_level_config = CppSourceConfig(top_level_config_name, True) # first pass to construct all CppSourceConfig objects for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = CppSourceConfig(config_name) top_level_config.include_config(config) if PARAMETER_KEY in metadata: for parameter in metadata[PARAMETER_KEY]: param_metadata = list(parameter.values())[0] param_type = list(parameter.keys())[0] cpp_param = CppParameter(param_type, param_metadata) config.add_parameter(cpp_param) cpp_configs_dict[config_name] = config dependency_graph.add_node(config_name, config=config) # second pass to create dependency graph for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = cpp_configs_dict[config_name] if INCLUDE_KEY in metadata: for included_yaml in metadata[INCLUDE_KEY]: included_config_name = to_pascal_case(included_yaml.split(".")[0]) config.include_config(cpp_configs_dict[included_config_name]) # add an edge from config node to included config node dependency_graph.add_edge(config_name, included_config_name) # for each node, create a subgraph of relevant dependencies # Note: This can be optimized by doing traversal from each source, and creating subgraphs for # all its descendants during the same traversal for node in dependency_graph.nodes: # find the subgraph of the dependency graph relevant to the current node dependency_graph.nodes[node][ "config" ].dependency_graph = dependency_graph.subgraph( nx.algorithms.dag.descendants(dependency_graph, node) ) top_level_config.dependency_graph = dependency_graph cpp_configs = [ dependency_graph.nodes[node]["config"] for node in list(reversed(list(nx.topological_sort(dependency_graph)))) ] cpp_configs.append(top_level_config) return cpp_configs
def create_source_config_list_from_metadata( top_level_config_name: str, config_metadata: dict ) -> List[CppSourceConfig]: """Takes the config metadata loaded by config_yaml_loader, and converts it to a list of CppSourceConfig objects; this includes setting the dependency graphs needed for the configs. :param top_leve_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata :return: list of CppSourceConfig objects """ cpp_configs_dict = {} dependency_graph = nx.DiGraph() top_level_config = CppSourceConfig(top_level_config_name, True) # first pass to construct all CppSourceConfig objects for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = CppSourceConfig(config_name) top_level_config.include_config(config) if PARAMETER_KEY in metadata: for parameter in metadata[PARAMETER_KEY]: param_metadata = list(parameter.values())[0] param_type = list(parameter.keys())[0] cpp_param = CppParameter(param_type, param_metadata) config.add_parameter(cpp_param) cpp_configs_dict[config_name] = config dependency_graph.add_node(config_name, config=config) # second pass to create dependency graph for config, metadata in config_metadata.items(): config_name = to_pascal_case(config.split(".")[0]) config = cpp_configs_dict[config_name] if INCLUDE_KEY in metadata: for included_yaml in metadata[INCLUDE_KEY]: included_config_name = to_pascal_case(included_yaml.split(".")[0]) config.include_config(cpp_configs_dict[included_config_name]) # add an edge from config node to included config node dependency_graph.add_edge(config_name, included_config_name) # for each node, create a subgraph of relevant dependencies # Note: This can be optimized by doing traversal from each source, and creating subgraphs for # all its descendants during the same traversal for node in dependency_graph.nodes: # find the subgraph of the dependency graph relevant to the current node dependency_graph.nodes[node][ "config" ].dependency_graph = dependency_graph.subgraph( nx.algorithms.dag.descendants(dependency_graph, node) ) top_level_config.dependency_graph = dependency_graph cpp_configs = [ dependency_graph.nodes[node]["config"] for node in list(reversed(list(nx.topological_sort(dependency_graph)))) ] cpp_configs.append(top_level_config) return cpp_configs
Python
def write_config_metadata_header( output_header: str, include_headers: List[str], top_level_config_name: str, config_metadata: dict, ): """Generates the .h config file. :param output_header: the name of the config file :param include_headers: the list of headers that need to be included in the config file :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata """ cpp_header_configs = CppWriter.create_header_config_list_from_metadata( top_level_config_name, config_metadata ) # generate header file with open(f"{output_header}", "w") as header_file: contents = "\n".join([conf.definition for conf in cpp_header_configs]) include_headers_formatted = "\n".join( [ INCLUDE_HEADER.format(header_file=header_file) for header_file in include_headers ] ) forward_declarations = "\n".join( [conf.forward_declaration for conf in cpp_header_configs] ) header_file.write( CONFIG_H.format( autogen_warning=AUTOGEN_WARNING, include_headers=include_headers_formatted, forward_declarations=forward_declarations, contents=contents, ) )
def write_config_metadata_header( output_header: str, include_headers: List[str], top_level_config_name: str, config_metadata: dict, ): """Generates the .h config file. :param output_header: the name of the config file :param include_headers: the list of headers that need to be included in the config file :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata """ cpp_header_configs = CppWriter.create_header_config_list_from_metadata( top_level_config_name, config_metadata ) # generate header file with open(f"{output_header}", "w") as header_file: contents = "\n".join([conf.definition for conf in cpp_header_configs]) include_headers_formatted = "\n".join( [ INCLUDE_HEADER.format(header_file=header_file) for header_file in include_headers ] ) forward_declarations = "\n".join( [conf.forward_declaration for conf in cpp_header_configs] ) header_file.write( CONFIG_H.format( autogen_warning=AUTOGEN_WARNING, include_headers=include_headers_formatted, forward_declarations=forward_declarations, contents=contents, ) )
Python
def write_config_metadata_source( output_source: str, declaration_header: str, top_level_config_name: str, config_metadata: dict, ): """Generates the .cpp config file. :param output_source: the name of the config file :param declaration_header: the header that contains the declarations that the source file must include :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata """ cpp_source_configs = CppWriter.create_source_config_list_from_metadata( top_level_config_name, config_metadata ) # generate source file with open(f"{output_source}", "w") as source_file: contents = "\n".join([conf.definition for conf in cpp_source_configs]) source_file.write( CONFIG_CPP.format( autogen_warning=AUTOGEN_WARNING, declaration_header=INCLUDE_HEADER.format( header_file=declaration_header ), contents=contents, ) )
def write_config_metadata_source( output_source: str, declaration_header: str, top_level_config_name: str, config_metadata: dict, ): """Generates the .cpp config file. :param output_source: the name of the config file :param declaration_header: the header that contains the declarations that the source file must include :param top_level_config_name: the name of the top level config :param config_metadata: the dictionary containing the config metadata """ cpp_source_configs = CppWriter.create_source_config_list_from_metadata( top_level_config_name, config_metadata ) # generate source file with open(f"{output_source}", "w") as source_file: contents = "\n".join([conf.definition for conf in cpp_source_configs]) source_file.write( CONFIG_CPP.format( autogen_warning=AUTOGEN_WARNING, declaration_header=INCLUDE_HEADER.format( header_file=declaration_header ), contents=contents, ) )
Python
def create_slider(self, title, min, max, step): """Creates a slider for the widget :param title: the name of the slider :param min: the minimum value of the slider :param max: the maximum value of the slider :param step: singleStep of the slider """ groupBox = QGroupBox(title) # set up the slide slider = QSlider(Qt.Orientation.Horizontal) slider.setStyleSheet("font:white") slider.setFocusPolicy(Qt.FocusPolicy.StrongFocus) slider.setSingleStep(step) slider.setMinimum(min) slider.setMaximum(max) # create a label to display slide's value label = QLabel() label.setAlignment(Qt.AlignmentFlag.AlignCenter) label.setStyleSheet("font:white;") slider.value() if step == 10: slider.valueChanged.connect(lambda: label.setText(self.valuechange(slider))) else: slider.valueChanged.connect(label.setNum) # add widgets vbox = QGridLayout() vbox.addWidget(slider, 0, 0) vbox.addWidget(label, 0, 1) groupBox.setLayout(vbox) return groupBox, slider
def create_slider(self, title, min, max, step): """Creates a slider for the widget :param title: the name of the slider :param min: the minimum value of the slider :param max: the maximum value of the slider :param step: singleStep of the slider """ groupBox = QGroupBox(title) # set up the slide slider = QSlider(Qt.Orientation.Horizontal) slider.setStyleSheet("font:white") slider.setFocusPolicy(Qt.FocusPolicy.StrongFocus) slider.setSingleStep(step) slider.setMinimum(min) slider.setMaximum(max) # create a label to display slide's value label = QLabel() label.setAlignment(Qt.AlignmentFlag.AlignCenter) label.setStyleSheet("font:white;") slider.value() if step == 10: slider.valueChanged.connect(lambda: label.setText(self.valuechange(slider))) else: slider.valueChanged.connect(label.setNum) # add widgets vbox = QGridLayout() vbox.addWidget(slider, 0, 0) vbox.addWidget(label, 0, 1) groupBox.setLayout(vbox) return groupBox, slider
Python
def value_change(self, slider): """Change the slider's value by 0.1 per step :param title: the name of the slider """ value = slider.value() value = float(value) value = value / 100.0 valueStr = "%.1f" % value return valueStr
def value_change(self, slider): """Change the slider's value by 0.1 per step :param title: the name of the slider """ value = slider.value() value = float(value) value = value / 100.0 valueStr = "%.1f" % value return valueStr
Python
def run_validation_sequence_sets( world, eventually_validation_sequence_set, always_validation_sequence_set ): """Given both eventually and always validation sequence sets, (and world) run validation and aggregate the results in a validation proto set. :raises AssertionError: If the test fails :param world: World to validate with :param eventually_validation_sequence_set: A collection of sequences of eventually validations to validate. :param always_validation_sequence_set: A collection of sequences of always validations to validate. :returns: Eventually ValidationProtoSet, Always ValidationProtoSet """ # Proto that stores validation geometry and validation status of # all validations passed in always_validation_proto_set = ValidationProtoSet() eventually_validation_proto_set = ValidationProtoSet() def create_validation_proto_helper(validation_proto_set, validation): """Helper function that computes the status and creates a validation_proto, and updates it in the validation_proto_set. :param validation_proto_set: The validation proto set to add to :param validation: The validation to put into the proto """ # Stores the validation result validation_proto = ValidationProto() # Get status status = validation.get_validation_status(world) # Create validation proto validation_proto.status = status validation_proto.failure_msg = str(validation) + " failed" validation_proto.validation_type = validation.get_validation_type() validation_proto.geometry.CopyFrom(validation.get_validation_geometry(world)) validation_proto_set.validations.append(validation_proto) return status # Validate the eventually validations. Eventually valids for validation_sequence in list(eventually_validation_sequence_set): for validation in validation_sequence: # Add to validation_proto_set and get status status = create_validation_proto_helper( eventually_validation_proto_set, validation ) # If the current validation is failing, we don't care about # the next one. Keep evaluating until this one passes. if status == ValidationStatus.FAILING: break # If the validation has passed, remove it from the set. if status == ValidationStatus.PASSING: validation_sequence.remove(validation) continue # Validate the always validations. We need to look at all of them for validation_sequence in always_validation_sequence_set: for validation in validation_sequence: create_validation_proto_helper(always_validation_proto_set, validation) return eventually_validation_proto_set, always_validation_proto_set
def run_validation_sequence_sets( world, eventually_validation_sequence_set, always_validation_sequence_set ): """Given both eventually and always validation sequence sets, (and world) run validation and aggregate the results in a validation proto set. :raises AssertionError: If the test fails :param world: World to validate with :param eventually_validation_sequence_set: A collection of sequences of eventually validations to validate. :param always_validation_sequence_set: A collection of sequences of always validations to validate. :returns: Eventually ValidationProtoSet, Always ValidationProtoSet """ # Proto that stores validation geometry and validation status of # all validations passed in always_validation_proto_set = ValidationProtoSet() eventually_validation_proto_set = ValidationProtoSet() def create_validation_proto_helper(validation_proto_set, validation): """Helper function that computes the status and creates a validation_proto, and updates it in the validation_proto_set. :param validation_proto_set: The validation proto set to add to :param validation: The validation to put into the proto """ # Stores the validation result validation_proto = ValidationProto() # Get status status = validation.get_validation_status(world) # Create validation proto validation_proto.status = status validation_proto.failure_msg = str(validation) + " failed" validation_proto.validation_type = validation.get_validation_type() validation_proto.geometry.CopyFrom(validation.get_validation_geometry(world)) validation_proto_set.validations.append(validation_proto) return status # Validate the eventually validations. Eventually valids for validation_sequence in list(eventually_validation_sequence_set): for validation in validation_sequence: # Add to validation_proto_set and get status status = create_validation_proto_helper( eventually_validation_proto_set, validation ) # If the current validation is failing, we don't care about # the next one. Keep evaluating until this one passes. if status == ValidationStatus.FAILING: break # If the validation has passed, remove it from the set. if status == ValidationStatus.PASSING: validation_sequence.remove(validation) continue # Validate the always validations. We need to look at all of them for validation_sequence in always_validation_sequence_set: for validation in validation_sequence: create_validation_proto_helper(always_validation_proto_set, validation) return eventually_validation_proto_set, always_validation_proto_set
Python
def create_validation_proto_helper(validation_proto_set, validation): """Helper function that computes the status and creates a validation_proto, and updates it in the validation_proto_set. :param validation_proto_set: The validation proto set to add to :param validation: The validation to put into the proto """ # Stores the validation result validation_proto = ValidationProto() # Get status status = validation.get_validation_status(world) # Create validation proto validation_proto.status = status validation_proto.failure_msg = str(validation) + " failed" validation_proto.validation_type = validation.get_validation_type() validation_proto.geometry.CopyFrom(validation.get_validation_geometry(world)) validation_proto_set.validations.append(validation_proto) return status
def create_validation_proto_helper(validation_proto_set, validation): """Helper function that computes the status and creates a validation_proto, and updates it in the validation_proto_set. :param validation_proto_set: The validation proto set to add to :param validation: The validation to put into the proto """ # Stores the validation result validation_proto = ValidationProto() # Get status status = validation.get_validation_status(world) # Create validation proto validation_proto.status = status validation_proto.failure_msg = str(validation) + " failed" validation_proto.validation_type = validation.get_validation_type() validation_proto.geometry.CopyFrom(validation.get_validation_geometry(world)) validation_proto_set.validations.append(validation_proto) return status
Python
def check_validation(validation_proto_set): """Check validation and make sure its always true :param validation_proto_set: Validation proto set :raises: AssertionError """ for validation_proto in validation_proto_set.validations: if validation_proto.status == ValidationStatus.FAILING: raise AssertionError(validation_proto.failure_msg)
def check_validation(validation_proto_set): """Check validation and make sure its always true :param validation_proto_set: Validation proto set :raises: AssertionError """ for validation_proto in validation_proto_set.validations: if validation_proto.status == ValidationStatus.FAILING: raise AssertionError(validation_proto.failure_msg)
Python
def create_validation_geometry(geometry=[]) -> ValidationGeometry: """Creates a ValidationGeometry which is a visual representation of the validation to be rendered as either green (PASSING) or red (FAILING) Given a list of (vectors, polygons, circles), creates a ValidationGeometry proto containing the protobuf representations. :param geometry: A list of geom :returns: ValidationGeometry """ validation_geometry = ValidationGeometry() CREATE_PROTO_DISPATCH = { tbots.Vector.__name__: tbots.createVectorProto, tbots.Polygon.__name__: tbots.createPolygonProto, tbots.Rectangle.__name__: tbots.createPolygonProto, tbots.Circle.__name__: tbots.createCircleProto, } ADD_TO_VALIDATION_GEOMETRY_DISPATCH = { tbots.Vector.__name__: validation_geometry.vectors.append, tbots.Polygon.__name__: validation_geometry.polygons.append, tbots.Rectangle.__name__: validation_geometry.polygons.append, tbots.Circle.__name__: validation_geometry.circles.append, } for geom in geometry: ADD_TO_VALIDATION_GEOMETRY_DISPATCH[type(geom).__name__]( CREATE_PROTO_DISPATCH[type(geom).__name__](geom) ) return validation_geometry
def create_validation_geometry(geometry=[]) -> ValidationGeometry: """Creates a ValidationGeometry which is a visual representation of the validation to be rendered as either green (PASSING) or red (FAILING) Given a list of (vectors, polygons, circles), creates a ValidationGeometry proto containing the protobuf representations. :param geometry: A list of geom :returns: ValidationGeometry """ validation_geometry = ValidationGeometry() CREATE_PROTO_DISPATCH = { tbots.Vector.__name__: tbots.createVectorProto, tbots.Polygon.__name__: tbots.createPolygonProto, tbots.Rectangle.__name__: tbots.createPolygonProto, tbots.Circle.__name__: tbots.createCircleProto, } ADD_TO_VALIDATION_GEOMETRY_DISPATCH = { tbots.Vector.__name__: validation_geometry.vectors.append, tbots.Polygon.__name__: validation_geometry.polygons.append, tbots.Rectangle.__name__: validation_geometry.polygons.append, tbots.Circle.__name__: validation_geometry.circles.append, } for geom in geometry: ADD_TO_VALIDATION_GEOMETRY_DISPATCH[type(geom).__name__]( CREATE_PROTO_DISPATCH[type(geom).__name__](geom) ) return validation_geometry
Python
def __load_yaml_into_dict(yaml_paths: YamlPathList) -> dict: """Loads the yamls into an dictionary. Any errors while in the yaml syntax will raise to the main thread. We also adjust how the dictionary is stored for easier access later. :raises ConfigYamlMalformed: when the yaml is malformed :param yaml_paths: the path to all the config yamls :type yaml_paths: list of str :returns: config_medata dict representing the data to generate :rtype: dict """ raw_config_metadata = {} for filename in yaml_paths: with open(filename, "r") as param_yaml: try: # extract config name from filename _, tail = os.path.split(filename) # safe load yaml into dictionary raw_config_metadata[tail] = list(yaml.safe_load_all(param_yaml)) if len(raw_config_metadata[tail]) == 1: # include only in file if isinstance(raw_config_metadata[tail][0], dict): raw_config_metadata[tail] = { INCLUDE_KEY: raw_config_metadata[tail][0][INCLUDE_KEY] } # parameter definitions only in file elif isinstance(raw_config_metadata[tail][0], list): raw_config_metadata[tail] = { PARAMETER_KEY: raw_config_metadata[tail][0] } elif len(raw_config_metadata[tail]) == 2: # include and param definition in file raw_config_metadata[tail] = { INCLUDE_KEY: raw_config_metadata[tail][0][INCLUDE_KEY], PARAMETER_KEY: raw_config_metadata[tail][1], } else: raise ConfigYamlMalformed( "More than two yaml documents in {}".format(tail) ) except yaml.YAMLError as ymle: raise ConfigYamlMalformed( "Check malformed {} \n {}".format(tail, ymle) ) from None except Exception as exc: raise ConfigYamlMalformed( "Check malformed {} \n {}".format(tail, exc) ) from exc return raw_config_metadata
def __load_yaml_into_dict(yaml_paths: YamlPathList) -> dict: """Loads the yamls into an dictionary. Any errors while in the yaml syntax will raise to the main thread. We also adjust how the dictionary is stored for easier access later. :raises ConfigYamlMalformed: when the yaml is malformed :param yaml_paths: the path to all the config yamls :type yaml_paths: list of str :returns: config_medata dict representing the data to generate :rtype: dict """ raw_config_metadata = {} for filename in yaml_paths: with open(filename, "r") as param_yaml: try: # extract config name from filename _, tail = os.path.split(filename) # safe load yaml into dictionary raw_config_metadata[tail] = list(yaml.safe_load_all(param_yaml)) if len(raw_config_metadata[tail]) == 1: # include only in file if isinstance(raw_config_metadata[tail][0], dict): raw_config_metadata[tail] = { INCLUDE_KEY: raw_config_metadata[tail][0][INCLUDE_KEY] } # parameter definitions only in file elif isinstance(raw_config_metadata[tail][0], list): raw_config_metadata[tail] = { PARAMETER_KEY: raw_config_metadata[tail][0] } elif len(raw_config_metadata[tail]) == 2: # include and param definition in file raw_config_metadata[tail] = { INCLUDE_KEY: raw_config_metadata[tail][0][INCLUDE_KEY], PARAMETER_KEY: raw_config_metadata[tail][1], } else: raise ConfigYamlMalformed( "More than two yaml documents in {}".format(tail) ) except yaml.YAMLError as ymle: raise ConfigYamlMalformed( "Check malformed {} \n {}".format(tail, ymle) ) from None except Exception as exc: raise ConfigYamlMalformed( "Check malformed {} \n {}".format(tail, exc) ) from exc return raw_config_metadata
Python
def __validate_config_metadata(config_metadata: dict): """Validates the config_metadata that was loaded against the dynamic_parameter_schemas and then checks for duplicate includes and duplicate parameters in the same config. :raises ConfigYamlMalformed: When the yaml is malformed :raises ConfigSchemaViolation: When the shema is violated :param config_metadata: Metadata describing params and config includes :type config_metadata: dict """ for config_file, metadata in config_metadata.items(): if INCLUDE_KEY in metadata: # validate correct format with schema try: jsonschema.validate(metadata[INCLUDE_KEY], INCLUDE_DEF_SCHEMA) except jsonschema.exceptions.ValidationError as jsval: raise ConfigYamlSchemaViolation( "Schema violation in {}: {}".format(config_file, jsval) ) from None # check duplicates if len(metadata[INCLUDE_KEY]) > len(set(metadata[INCLUDE_KEY])): raise ConfigYamlMalformed( "Duplicate include detected in {}".format(config_file) ) # check that included yaml is defined elsewhere for included_yaml in metadata[INCLUDE_KEY]: if included_yaml not in config_metadata.keys(): raise ConfigYamlMalformed( "definition could not be found for {} in {}".format( included_yaml, config_file ) ) if PARAMETER_KEY in metadata: # validate correct format with schema try: jsonschema.validate(metadata[PARAMETER_KEY], PARAM_DEF_SCHEMA) except jsonschema.exceptions.ValidationError as jsval: raise ConfigYamlSchemaViolation( "Schema violation in {}: {}".format(config_file, jsval) ) from None # Get all parameter names as a list, the parameter type comes # first and the name follows in the dictionary. If the schema # check above succeeded, its safe to assume the "name" key # exists in the parameter dictionary param_names = [ list(param_entry.values())[0]["name"] for param_entry in metadata[PARAMETER_KEY] ] # check duplicates if len(param_names) > len(set(param_names)): raise ConfigYamlMalformed( "Duplicate parameter detected in {}".format(config_file) ) # This is an ugly artifact of how the yaml is defined and loaded # We are extracting all the requested types to check that # they are all supported. This is the one thing the schema # can't validate that we would like to check requested_types = [ key[0] for key in [list(entry.keys()) for entry in metadata[PARAMETER_KEY]] ] # check if type requested is supported for requested_type in requested_types: if requested_type not in SUPPORTED_TYPES: raise ConfigYamlMalformed( "{} type unsupported in {}".format( requested_type, config_file ) )
def __validate_config_metadata(config_metadata: dict): """Validates the config_metadata that was loaded against the dynamic_parameter_schemas and then checks for duplicate includes and duplicate parameters in the same config. :raises ConfigYamlMalformed: When the yaml is malformed :raises ConfigSchemaViolation: When the shema is violated :param config_metadata: Metadata describing params and config includes :type config_metadata: dict """ for config_file, metadata in config_metadata.items(): if INCLUDE_KEY in metadata: # validate correct format with schema try: jsonschema.validate(metadata[INCLUDE_KEY], INCLUDE_DEF_SCHEMA) except jsonschema.exceptions.ValidationError as jsval: raise ConfigYamlSchemaViolation( "Schema violation in {}: {}".format(config_file, jsval) ) from None # check duplicates if len(metadata[INCLUDE_KEY]) > len(set(metadata[INCLUDE_KEY])): raise ConfigYamlMalformed( "Duplicate include detected in {}".format(config_file) ) # check that included yaml is defined elsewhere for included_yaml in metadata[INCLUDE_KEY]: if included_yaml not in config_metadata.keys(): raise ConfigYamlMalformed( "definition could not be found for {} in {}".format( included_yaml, config_file ) ) if PARAMETER_KEY in metadata: # validate correct format with schema try: jsonschema.validate(metadata[PARAMETER_KEY], PARAM_DEF_SCHEMA) except jsonschema.exceptions.ValidationError as jsval: raise ConfigYamlSchemaViolation( "Schema violation in {}: {}".format(config_file, jsval) ) from None # Get all parameter names as a list, the parameter type comes # first and the name follows in the dictionary. If the schema # check above succeeded, its safe to assume the "name" key # exists in the parameter dictionary param_names = [ list(param_entry.values())[0]["name"] for param_entry in metadata[PARAMETER_KEY] ] # check duplicates if len(param_names) > len(set(param_names)): raise ConfigYamlMalformed( "Duplicate parameter detected in {}".format(config_file) ) # This is an ugly artifact of how the yaml is defined and loaded # We are extracting all the requested types to check that # they are all supported. This is the one thing the schema # can't validate that we would like to check requested_types = [ key[0] for key in [list(entry.keys()) for entry in metadata[PARAMETER_KEY]] ] # check if type requested is supported for requested_type in requested_types: if requested_type not in SUPPORTED_TYPES: raise ConfigYamlMalformed( "{} type unsupported in {}".format( requested_type, config_file ) )
Python
def __detect_cycles_in_config_metadata(config_metadata: dict): """Creates a DiGraph from all the included configs and checks if there are cycles. Raises to the main thread if a cycle is detected :raises ConfigYamlCycleDetected: When a cycle is detected in the includes :param config_metadata: Metadata describing params and config includes :type config_metadata: dict """ edges = [] for config, metadata in config_metadata.items(): if INCLUDE_KEY in metadata: for included_config in metadata[INCLUDE_KEY]: edges.append((config, included_config)) G = networkx.DiGraph(edges) for cycle in networkx.simple_cycles(G): raise ConfigYamlCycleDetected( "Cycle detected in the include statements: " + " -> ".join(cycle + [cycle[0]]) )
def __detect_cycles_in_config_metadata(config_metadata: dict): """Creates a DiGraph from all the included configs and checks if there are cycles. Raises to the main thread if a cycle is detected :raises ConfigYamlCycleDetected: When a cycle is detected in the includes :param config_metadata: Metadata describing params and config includes :type config_metadata: dict """ edges = [] for config, metadata in config_metadata.items(): if INCLUDE_KEY in metadata: for included_config in metadata[INCLUDE_KEY]: edges.append((config, included_config)) G = networkx.DiGraph(edges) for cycle in networkx.simple_cycles(G): raise ConfigYamlCycleDetected( "Cycle detected in the include statements: " + " -> ".join(cycle + [cycle[0]]) )
Python
def keyPressEvent(self, event): """Propagate keypress event to all field layers :param event: The event """ for layer in self.layers: layer.keyPressEvent(event)
def keyPressEvent(self, event): """Propagate keypress event to all field layers :param event: The event """ for layer in self.layers: layer.keyPressEvent(event)
Python
def keyReleaseEvent(self, event): """Propagate keyrelease event to all field layers :param event: The event """ for layer in self.layers: layer.keyReleaseEvent(event)
def keyReleaseEvent(self, event): """Propagate keyrelease event to all field layers :param event: The event """ for layer in self.layers: layer.keyReleaseEvent(event)
Python
def add_layer(self, name: str, layer: FieldLayer): """Add a layer to this field and to the legend. :param name: The name of the layer :param layer: The FieldLayer graphics object """ self.layers.append(layer) self.addItem(layer) self.legend.addItem(layer, name)
def add_layer(self, name: str, layer: FieldLayer): """Add a layer to this field and to the legend. :param name: The name of the layer :param layer: The FieldLayer graphics object """ self.layers.append(layer) self.addItem(layer) self.legend.addItem(layer, name)
Python
def refresh(self): """Trigger an update on all the layers """ for layer in self.layers: layer.update()
def refresh(self): """Trigger an update on all the layers """ for layer in self.layers: layer.update()
Python
def send(self, proto): """Buffer a protobuf to be sent by the send thread :param proto: The protobuf to send """ try: self.proto_buffer.put_nowait(proto) except queue.Full as queue_full: logging.warning("send buffer overrun for {}".format(self.unix_path))
def send(self, proto): """Buffer a protobuf to be sent by the send thread :param proto: The protobuf to send """ try: self.proto_buffer.put_nowait(proto) except queue.Full as queue_full: logging.warning("send buffer overrun for {}".format(self.unix_path))
Python
def change_button_state(self, button, enable): """ Change button color and clickable state. :param button: button to change the state of :param enable: bool: if True: enable this button, if False: disable :return: """ if enable: button.setStyleSheet("background-color: White") button.setCheckable(True) else: button.setStyleSheet("background-color: Grey") button.setCheckable(False)
def change_button_state(self, button, enable): """ Change button color and clickable state. :param button: button to change the state of :param enable: bool: if True: enable this button, if False: disable :return: """ if enable: button.setStyleSheet("background-color: White") button.setCheckable(True) else: button.setStyleSheet("background-color: Grey") button.setCheckable(False)
Python
def encode_sha256_checksum(file_path: str) -> str: """ Determines the SHA-256 checksum for a given file. :param file_path: the path to a file :return the SHA-256 checksum for the given file """ # make hash by reading consecutive parts of data hash = hashlib.sha256() with open(file_path, "rb") as f: for byte_block in iter(lambda: f.read(4096), b""): hash.update(byte_block) return hash.hexdigest()
def encode_sha256_checksum(file_path: str) -> str: """ Determines the SHA-256 checksum for a given file. :param file_path: the path to a file :return the SHA-256 checksum for the given file """ # make hash by reading consecutive parts of data hash = hashlib.sha256() with open(file_path, "rb") as f: for byte_block in iter(lambda: f.read(4096), b""): hash.update(byte_block) return hash.hexdigest()
Python
def dfs_helper( self, config: CppSourceConfig, arg_prefix: str, load_dependency: str ): """A depth first search helper for adding the necessary prefix to accessing and setting parameters of included configs in loadFromCommmandLineArguments function :param config: the current CppSourceConfig object :param arg_prefix: the prefix for accessing the arg struct :param load_dependency: the prefix for accessing the actual parameter """ arg_prefix = ( to_snake_case(config.config_name) if not arg_prefix else arg_prefix + "." + to_snake_case(config.config_name) ) load_dependency = load_dependency + "getMutable{config_name}()->".format( config_name=config.config_name ) mutable_param_gen = ( param for param in config.parameters if not param.is_constant ) for param in mutable_param_gen: self.included_config_command_line_arg_entries.append( param.command_line_option_entry_with_prefix(arg_prefix + ".") ) self.included_config_load_command_line_args_into_config_contents.append( param.load_command_line_arg_into_config_with_dependencies( load_dependency, arg_prefix + "." ) ) # top level config has access to all configs, so no need to recursively add options # if not self.is_top_level_config: for included_config in config.configs: self.dfs_helper(included_config, arg_prefix, load_dependency)
def dfs_helper( self, config: CppSourceConfig, arg_prefix: str, load_dependency: str ): """A depth first search helper for adding the necessary prefix to accessing and setting parameters of included configs in loadFromCommmandLineArguments function :param config: the current CppSourceConfig object :param arg_prefix: the prefix for accessing the arg struct :param load_dependency: the prefix for accessing the actual parameter """ arg_prefix = ( to_snake_case(config.config_name) if not arg_prefix else arg_prefix + "." + to_snake_case(config.config_name) ) load_dependency = load_dependency + "getMutable{config_name}()->".format( config_name=config.config_name ) mutable_param_gen = ( param for param in config.parameters if not param.is_constant ) for param in mutable_param_gen: self.included_config_command_line_arg_entries.append( param.command_line_option_entry_with_prefix(arg_prefix + ".") ) self.included_config_load_command_line_args_into_config_contents.append( param.load_command_line_arg_into_config_with_dependencies( load_dependency, arg_prefix + "." ) ) # top level config has access to all configs, so no need to recursively add options # if not self.is_top_level_config: for included_config in config.configs: self.dfs_helper(included_config, arg_prefix, load_dependency)
Python
def circular_arc_to_line_segs(arc: SSL_FieldCircularArc): """ Sample line segments from a circular section described by a SSL_FieldCircularArc proto and return them as a tuple of a list of list of line segment x's, and a list of list of line segment y's return will look like: ( [[start x, end x], ... [start x, end x]], [[start y, end y], ... [start y, end y]] ) :param fig: a Bokeh figure :param center_x: x of the center of the circle :param center_y: y of the center of the circle :param radius: radius of the circle :param start_angle: angle of the start of the circular section :param end_angle: angle of the end of the circular section :return a tuple of list[list[x]], list[list[y]], a series of line segment x's and y's representing LINE SEGMENTS sampled along the circular arc """ center_x = arc.center.x center_y = arc.center.y radius = arc.radius start_angle = arc.a1 end_angle = arc.a2 # decide how many samples to take along the arc, a full circle will be a 36-gon num_samples = (end_angle - start_angle) * (18 / np.pi) sample_angles = np.linspace( start_angle, end_angle, np.ceil(num_samples).astype(int) ) sampled_pt_xs = [] sampled_pt_ys = [] for i in range(len(sample_angles) - 1): # sometimes we have to convert to numpy arrays, do the MM_PER_M division, and then convert it back because # bokeh really dislikes 2d numpy arrays in some circumstances sampled_pt_xs.append( ( np.asarray( [ center_x + np.cos(sample_angles[i]) * radius, center_x + np.cos(sample_angles[i + 1]) * radius, ] ) / MM_PER_M ).tolist() ) sampled_pt_ys.append( ( np.asarray( [ center_y + np.sin(sample_angles[i]) * radius, center_y + np.sin(sample_angles[i + 1]) * radius, ] ) / MM_PER_M ).tolist() ) return sampled_pt_xs, sampled_pt_ys
def circular_arc_to_line_segs(arc: SSL_FieldCircularArc): """ Sample line segments from a circular section described by a SSL_FieldCircularArc proto and return them as a tuple of a list of list of line segment x's, and a list of list of line segment y's return will look like: ( [[start x, end x], ... [start x, end x]], [[start y, end y], ... [start y, end y]] ) :param fig: a Bokeh figure :param center_x: x of the center of the circle :param center_y: y of the center of the circle :param radius: radius of the circle :param start_angle: angle of the start of the circular section :param end_angle: angle of the end of the circular section :return a tuple of list[list[x]], list[list[y]], a series of line segment x's and y's representing LINE SEGMENTS sampled along the circular arc """ center_x = arc.center.x center_y = arc.center.y radius = arc.radius start_angle = arc.a1 end_angle = arc.a2 # decide how many samples to take along the arc, a full circle will be a 36-gon num_samples = (end_angle - start_angle) * (18 / np.pi) sample_angles = np.linspace( start_angle, end_angle, np.ceil(num_samples).astype(int) ) sampled_pt_xs = [] sampled_pt_ys = [] for i in range(len(sample_angles) - 1): # sometimes we have to convert to numpy arrays, do the MM_PER_M division, and then convert it back because # bokeh really dislikes 2d numpy arrays in some circumstances sampled_pt_xs.append( ( np.asarray( [ center_x + np.cos(sample_angles[i]) * radius, center_x + np.cos(sample_angles[i + 1]) * radius, ] ) / MM_PER_M ).tolist() ) sampled_pt_ys.append( ( np.asarray( [ center_y + np.sin(sample_angles[i]) * radius, center_y + np.sin(sample_angles[i + 1]) * radius, ] ) / MM_PER_M ).tolist() ) return sampled_pt_xs, sampled_pt_ys
Python
def __setup_robot_plotters(self, robot_colour, robot_plot_colour): """ A helper function to set circles, orientation lines, and labels for a given robot colour and a corresponding plot colour. :param robot_colour: Robot colour to set the legend label and robot_sources dict key with :param robot_plot_colour: Colour to actually plot the robot circles with """ # blue robots self.robots_sources[robot_colour] = ColumnDataSource( dict( robot_ids=[], robot_xs=[], robot_ys=[], robot_ori_line_seg_xs=[[]], robot_ori_line_seg_ys=[[]], ) ) # circles representing the robots self.fig.circle( source=self.robots_sources[robot_colour], x="robot_xs", y="robot_ys", radius=ROBOT_MAX_RADIUS, fill_color=robot_plot_colour, line_color="black", legend_label=robot_colour + " robots", ) # line segments representing robot orientations self.fig.multi_line( source=self.robots_sources[robot_colour], xs="robot_ori_line_seg_xs", ys="robot_ori_line_seg_ys", line_color="black", legend_label=robot_colour + " robot orientations", ) # labels for the robot ids labels = LabelSet( x="robot_xs", y="robot_ys", text="robot_ids", source=self.robots_sources[robot_colour], text_font_size="12pt", ) self.fig.add_layout(labels)
def __setup_robot_plotters(self, robot_colour, robot_plot_colour): """ A helper function to set circles, orientation lines, and labels for a given robot colour and a corresponding plot colour. :param robot_colour: Robot colour to set the legend label and robot_sources dict key with :param robot_plot_colour: Colour to actually plot the robot circles with """ # blue robots self.robots_sources[robot_colour] = ColumnDataSource( dict( robot_ids=[], robot_xs=[], robot_ys=[], robot_ori_line_seg_xs=[[]], robot_ori_line_seg_ys=[[]], ) ) # circles representing the robots self.fig.circle( source=self.robots_sources[robot_colour], x="robot_xs", y="robot_ys", radius=ROBOT_MAX_RADIUS, fill_color=robot_plot_colour, line_color="black", legend_label=robot_colour + " robots", ) # line segments representing robot orientations self.fig.multi_line( source=self.robots_sources[robot_colour], xs="robot_ori_line_seg_xs", ys="robot_ori_line_seg_ys", line_color="black", legend_label=robot_colour + " robot orientations", ) # labels for the robot ids labels = LabelSet( x="robot_xs", y="robot_ys", text="robot_ids", source=self.robots_sources[robot_colour], text_font_size="12pt", ) self.fig.add_layout(labels)
Python
def createLogger(name): """Create a logger given the name of the logger :returns: A Logger """ return logging.getLogger(name)
def createLogger(name): """Create a logger given the name of the logger :returns: A Logger """ return logging.getLogger(name)
Python
def on_click(): """ Execute on click callback of curr screen """ action = self.screens[self.curr_screen].on_click() if screen_actions.CHANGE_SCREEN == action["screen action"]: self.curr_screen = action["value"] self.screens[self.curr_screen].update_screen() self.lcd_display.show() elif screen_actions.UPDATE_REDIS == action["screen action"]: self.redis_client.set(action["redis key"], action["value"]) self.redis_dict[action["redis key"]] = action["value"] print( "Key: {}, Value: {}".format( action["redis key"], self.redis_client.get(action["redis key"]).decode("UTF-8"), ) )
def on_click(): """ Execute on click callback of curr screen """ action = self.screens[self.curr_screen].on_click() if screen_actions.CHANGE_SCREEN == action["screen action"]: self.curr_screen = action["value"] self.screens[self.curr_screen].update_screen() self.lcd_display.show() elif screen_actions.UPDATE_REDIS == action["screen action"]: self.redis_client.set(action["redis key"], action["value"]) self.redis_dict[action["redis key"]] = action["value"] print( "Key: {}, Value: {}".format( action["redis key"], self.redis_client.get(action["redis key"]).decode("UTF-8"), ) )
Python
def poll_redis(self, timeout=3): """ Update redis dict every timeout seconds """ while not self.shutdown: for key in redis_keys: self.redis_dict[key] = float(self.redis_client.get(key).decode("UTF-8")) for screen_name, screen in self.screens.items(): if screen_name != "Menu": screen.update_values(self.redis_dict) time.sleep(timeout)
def poll_redis(self, timeout=3): """ Update redis dict every timeout seconds """ while not self.shutdown: for key in redis_keys: self.redis_dict[key] = float(self.redis_client.get(key).decode("UTF-8")) for screen_name, screen in self.screens.items(): if screen_name != "Menu": screen.update_values(self.redis_dict) time.sleep(timeout)
Python
def send_sensor_proto(self, sensor_proto): """Send a sensor msg to full system. :param sensor_proto: The sensor msg to send """ self.sensor_proto_sender.send(sensor_proto)
def send_sensor_proto(self, sensor_proto): """Send a sensor msg to full system. :param sensor_proto: The sensor msg to send """ self.sensor_proto_sender.send(sensor_proto)
Python
def send_tactic_override(self, assigned_tactic_play_control_params): """Send the control params for the assigned tactic play to run specific tactics on assigned robots. :param assigned_tactic_play_control_params: The control params of the AssignedTacticPlay """ self.tactic_override.send(assigned_tactic_play_control_params)
def send_tactic_override(self, assigned_tactic_play_control_params): """Send the control params for the assigned tactic play to run specific tactics on assigned robots. :param assigned_tactic_play_control_params: The control params of the AssignedTacticPlay """ self.tactic_override.send(assigned_tactic_play_control_params)
Python
def stop(): """Stop all listeners and senders. """ for unix_socket in [ self.robot_status_sender, self.ssl_wrapper_sender, self.ssl_referee_sender, self.tactic_override, self.sensor_proto_sender, self.world_listener, ]: unix_socket.force_stop() self.primitive_listener.force_stop()
def stop(): """Stop all listeners and senders. """ for unix_socket in [ self.robot_status_sender, self.ssl_wrapper_sender, self.ssl_referee_sender, self.tactic_override, self.sensor_proto_sender, self.world_listener, ]: unix_socket.force_stop() self.primitive_listener.force_stop()
Python
def start(self): """ Distributes protobuf from the proto_receiver to all widgets that consume that specific protobuf """ while True: proto = self.proto_receiver.buffer.get() if proto.DESCRIPTOR.full_name in self.proto_map: for buffer in self.proto_map[proto.DESCRIPTOR.full_name]: try: buffer.put_nowait(proto) except queue.Full: pass
def start(self): """ Distributes protobuf from the proto_receiver to all widgets that consume that specific protobuf """ while True: proto = self.proto_receiver.buffer.get() if proto.DESCRIPTOR.full_name in self.proto_map: for buffer in self.proto_map[proto.DESCRIPTOR.full_name]: try: buffer.put_nowait(proto) except queue.Full: pass
Python
def register_observer(self, proto_type, buffer): """Register a widget to consume from a given protobuf class param: proto_type: Class of protobuf to consume param: buffer: buffer from the widget to register """ if proto_type in self.proto_map: self.proto_map[proto_type.DESCRIPTOR.full_name].append(buffer) else: self.proto_map[proto_type.DESCRIPTOR.full_name] = [buffer]
def register_observer(self, proto_type, buffer): """Register a widget to consume from a given protobuf class param: proto_type: Class of protobuf to consume param: buffer: buffer from the widget to register """ if proto_type in self.proto_map: self.proto_map[proto_type.DESCRIPTOR.full_name].append(buffer) else: self.proto_map[proto_type.DESCRIPTOR.full_name] = [buffer]
Python
def receive_announcements(port: int, duration: int) -> [Announcement]: """ Returns a list of Announcements, without duplicates received within a time window of 4s on a specified port :param duration: how long to listen for announcements :param port: the port to listen for announcements on :return: a list of Announcements, without duplicates """ receiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) receiver.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) receiver.settimeout(RECEIVE_TIMEOUT_SECONDS) receiver.bind(("", port)) announcements = [] timeout = time() + duration while time() < timeout: try: data = receiver.recv(1024) except socket.timeout: # ignore timeout errors continue else: # parse announcement protobuf announcement = Announcement() announcement.ParseFromString(data) # filter out duplicates if announcement not in announcements: announcements.append(announcement) return announcements
def receive_announcements(port: int, duration: int) -> [Announcement]: """ Returns a list of Announcements, without duplicates received within a time window of 4s on a specified port :param duration: how long to listen for announcements :param port: the port to listen for announcements on :return: a list of Announcements, without duplicates """ receiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) receiver.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) receiver.settimeout(RECEIVE_TIMEOUT_SECONDS) receiver.bind(("", port)) announcements = [] timeout = time() + duration while time() < timeout: try: data = receiver.recv(1024) except socket.timeout: # ignore timeout errors continue else: # parse announcement protobuf announcement = Announcement() announcement.ParseFromString(data) # filter out duplicates if announcement not in announcements: announcements.append(announcement) return announcements
Python
def __buffer_protobuf(self, proto): """Buffer the protobuf, and raise a warning if we overrun the buffer :param proto: The protobuf to buffer :raises: Warning """ try: self.proto_buffer.put_nowait(proto) except queue.Full as queue_full: logger.warning("buffer overrun for {}".format(self.unix_path))
def __buffer_protobuf(self, proto): """Buffer the protobuf, and raise a warning if we overrun the buffer :param proto: The protobuf to buffer :raises: Warning """ try: self.proto_buffer.put_nowait(proto) except queue.Full as queue_full: logger.warning("buffer overrun for {}".format(self.unix_path))
Python
def serve_till_stopped(self): """Keep handling requests until force_stop is called """ while not self.stop: self.server.handle_request()
def serve_till_stopped(self): """Keep handling requests until force_stop is called """ while not self.stop: self.server.handle_request()
Python
def handle(self): """Handle the two cases: 1. Given the proto_class, decode the incoming data and trigger a callback. This is mostly used for direct protobuf communication. 2. LOG(VISUALIZE) calls from g3log in the C++ code sending over stuff to visualize follows a specific format (see handle_log_visualize) we need to decode. """ if self.proto_class: self.handle_proto() else: self.handle_log_visualize()
def handle(self): """Handle the two cases: 1. Given the proto_class, decode the incoming data and trigger a callback. This is mostly used for direct protobuf communication. 2. LOG(VISUALIZE) calls from g3log in the C++ code sending over stuff to visualize follows a specific format (see handle_log_visualize) we need to decode. """ if self.proto_class: self.handle_proto() else: self.handle_log_visualize()
Python
def handle_proto(self): """If a specific protobuf class is passed in, this handler is called. It deserializes the incoming msg into the class and triggers the handle callback. """ if self.proto_class: self.handle_callback(self.proto_class.FromString(self.request[0])) else: raise Exception("proto_class is None but handle_proto called")
def handle_proto(self): """If a specific protobuf class is passed in, this handler is called. It deserializes the incoming msg into the class and triggers the handle callback. """ if self.proto_class: self.handle_callback(self.proto_class.FromString(self.request[0])) else: raise Exception("proto_class is None but handle_proto called")
Python
def handle_log_visualize(self): """We send protobufs from our C++ code to python for visualization. If we used the handle_proto handler and passed in a proto_class, we would need to setup a sender/receiver pair for every protobuf we want to visualize. So instead, we special case the communication coming from the ProtobufSink (C++ side). The ProtobufSink sends the typename prefixed at the beginning of the payload delimited by the TYPE_DELIMITER (!!!). | -- data -- | PackageName.TypeName!!!eW91Zm91bmR0aGVzZWNyZXRtZXNzYWdl This allows us to call LOG(VISUALIZE) _anywhere_ in C++ and receive/decode here with minimum boilerplate code. """ payload = self.request[0] type_name = str(payload.split(b"!!!")[0], "utf-8") proto_type = self.find_proto_class(type_name.split(".")[1]) msg = proto_type() payload = base64.b64decode(payload.split(b"!!!")[1]) any_msg = Any.FromString(payload) any_msg.Unpack(msg) self.handle_callback(msg)
def handle_log_visualize(self): """We send protobufs from our C++ code to python for visualization. If we used the handle_proto handler and passed in a proto_class, we would need to setup a sender/receiver pair for every protobuf we want to visualize. So instead, we special case the communication coming from the ProtobufSink (C++ side). The ProtobufSink sends the typename prefixed at the beginning of the payload delimited by the TYPE_DELIMITER (!!!). | -- data -- | PackageName.TypeName!!!eW91Zm91bmR0aGVzZWNyZXRtZXNzYWdl This allows us to call LOG(VISUALIZE) _anywhere_ in C++ and receive/decode here with minimum boilerplate code. """ payload = self.request[0] type_name = str(payload.split(b"!!!")[0], "utf-8") proto_type = self.find_proto_class(type_name.split(".")[1]) msg = proto_type() payload = base64.b64decode(payload.split(b"!!!")[1]) any_msg = Any.FromString(payload) any_msg.Unpack(msg) self.handle_callback(msg)
Python
def find_proto_class(self, proto_class_name): """Search through all protobufs and return class of proto_type :param proto_class_name: String of the proto class name to search for """ proto_path = os.path.dirname(proto.__file__) for file in glob.glob(proto_path + "**/*.py"): name = os.path.splitext(os.path.basename(file))[0] # Ignore __ files if name.startswith("__"): continue module = importlib.import_module("proto." + name) for member in dir(module): handler_class = getattr(module, member) if handler_class and inspect.isclass(handler_class): if str(member) == proto_class_name: return handler_class
def find_proto_class(self, proto_class_name): """Search through all protobufs and return class of proto_type :param proto_class_name: String of the proto class name to search for """ proto_path = os.path.dirname(proto.__file__) for file in glob.glob(proto_path + "**/*.py"): name = os.path.splitext(os.path.basename(file))[0] # Ignore __ files if name.startswith("__"): continue module = importlib.import_module("proto." + name) for member in dir(module): handler_class = getattr(module, member) if handler_class and inspect.isclass(handler_class): if str(member) == proto_class_name: return handler_class
Python
def handler_factory(handle_callback, proto_class): """To pass in an arbitrary handle callback into the SocketServer, we need to create a constructor that can create a Session object with appropriate handle function. :param handle_callback: The callback to run :param proto_class: The protobuf to unpack from (None if its encoded in the payload) """ def create_handler(*args, **keys): return Session(handle_callback, proto_class, *args, **keys) return create_handler
def handler_factory(handle_callback, proto_class): """To pass in an arbitrary handle callback into the SocketServer, we need to create a constructor that can create a Session object with appropriate handle function. :param handle_callback: The callback to run :param proto_class: The protobuf to unpack from (None if its encoded in the payload) """ def create_handler(*args, **keys): return Session(handle_callback, proto_class, *args, **keys) return create_handler
Python
def __runner(): """Step simulation, full_system and run validation """ time_elapsed_s = 0 while time_elapsed_s < test_timeout_s: self.simulator.tick(tick_duration_s * MILLISECONDS_PER_SECOND) time_elapsed_s += tick_duration_s if self.enable_thunderscope: time.sleep(tick_duration_s) # Send the sensor_proto and get world ssl_wrapper = self.simulator.get_ssl_wrapper_packet(block=True) self.yellow_full_system.send_sensor_proto( self.simulator.get_yellow_sensor_proto(ssl_wrapper) ) world = self.yellow_full_system.get_world(block=True) # Validate ( eventually_validation_proto_set, always_validation_proto_set, ) = validation.run_validation_sequence_sets( world, eventually_validation_sequence_set, always_validation_sequence_set, ) if self.enable_thunderscope: # Send out the validation proto to thunderscope self.eventually_validation_sender.send( eventually_validation_proto_set ) self.always_validation_sender.send(always_validation_proto_set) # Check that all always validations are always valid validation.check_validation(always_validation_proto_set) # Step the primtives self.simulator.send_yellow_primitive_set_and_world( world, self.yellow_full_system.get_primitive_set(), ) # Check that all eventually validations are eventually valid validation.check_validation(eventually_validation_proto_set) __stopper()
def __runner(): """Step simulation, full_system and run validation """ time_elapsed_s = 0 while time_elapsed_s < test_timeout_s: self.simulator.tick(tick_duration_s * MILLISECONDS_PER_SECOND) time_elapsed_s += tick_duration_s if self.enable_thunderscope: time.sleep(tick_duration_s) # Send the sensor_proto and get world ssl_wrapper = self.simulator.get_ssl_wrapper_packet(block=True) self.yellow_full_system.send_sensor_proto( self.simulator.get_yellow_sensor_proto(ssl_wrapper) ) world = self.yellow_full_system.get_world(block=True) # Validate ( eventually_validation_proto_set, always_validation_proto_set, ) = validation.run_validation_sequence_sets( world, eventually_validation_sequence_set, always_validation_sequence_set, ) if self.enable_thunderscope: # Send out the validation proto to thunderscope self.eventually_validation_sender.send( eventually_validation_proto_set ) self.always_validation_sender.send(always_validation_proto_set) # Check that all always validations are always valid validation.check_validation(always_validation_proto_set) # Step the primtives self.simulator.send_yellow_primitive_set_and_world( world, self.yellow_full_system.get_primitive_set(), ) # Check that all eventually validations are eventually valid validation.check_validation(eventually_validation_proto_set) __stopper()
Python
def excepthook(args): """This function is _critical_ for enable_thunderscope to work. If the test Thread will raises an exception we won't be able to close the window from the main thread. :param args: The args passed in from the hook """ __stopper(delay=PAUSE_AFTER_FAIL_DELAY_S) self.last_exception = args.exc_value raise self.last_exception
def excepthook(args): """This function is _critical_ for enable_thunderscope to work. If the test Thread will raises an exception we won't be able to close the window from the main thread. :param args: The args passed in from the hook """ __stopper(delay=PAUSE_AFTER_FAIL_DELAY_S) self.last_exception = args.exc_value raise self.last_exception
Python
def load_command_line_arguments(): """Load from command line arguments using argpase NOTE: Pytest has its own built in argument parser (conftest.py, pytest_addoption) but it doesn't seem to play nicely with bazel. We just use argparse instead. """ parser = argparse.ArgumentParser(description="Run simulated pytests") parser.add_argument( "--enable_thunderscope", action="store_true", help="enable the visualizer" ) return parser.parse_args()
def load_command_line_arguments(): """Load from command line arguments using argpase NOTE: Pytest has its own built in argument parser (conftest.py, pytest_addoption) but it doesn't seem to play nicely with bazel. We just use argparse instead. """ parser = argparse.ArgumentParser(description="Run simulated pytests") parser.add_argument( "--enable_thunderscope", action="store_true", help="enable the visualizer" ) return parser.parse_args()
Python
def create_button(text: list): """ Creates QPushButton objects inside a QGroupBox object. The default color of button will be white with black background. :param text: type:list - list of text for all buttons :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QPushButton objects - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) buttons = [] for i in range(num_buttons): button = QPushButton(text[i]) button.setCheckable(True) buttons.append(button) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for button in buttons: hbox.addWidget(button) group_box.setLayout(hbox) return group_box, buttons
def create_button(text: list): """ Creates QPushButton objects inside a QGroupBox object. The default color of button will be white with black background. :param text: type:list - list of text for all buttons :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QPushButton objects - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) buttons = [] for i in range(num_buttons): button = QPushButton(text[i]) button.setCheckable(True) buttons.append(button) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for button in buttons: hbox.addWidget(button) group_box.setLayout(hbox) return group_box, buttons
Python
def create_radio(text: list, radio_group): """ Creates QRadioButton objects inside a QGroupBox object. The default color of button background will be white. :param text: type:list - list of text for all buttons :param radio_group: QButtonGroup to add these buttons to :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QRadioButton object - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) radios = [] for i in range(num_buttons): radio = QRadioButton(text[i]) # this is so that the button is properly visible in black background radio.setStyleSheet("background-color: white") radio_group.addButton(radio) radios.append(radio) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for radio in radios: hbox.addWidget(radio) group_box.setLayout(hbox) return group_box, radios
def create_radio(text: list, radio_group): """ Creates QRadioButton objects inside a QGroupBox object. The default color of button background will be white. :param text: type:list - list of text for all buttons :param radio_group: QButtonGroup to add these buttons to :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QRadioButton object - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) radios = [] for i in range(num_buttons): radio = QRadioButton(text[i]) # this is so that the button is properly visible in black background radio.setStyleSheet("background-color: white") radio_group.addButton(radio) radios.append(radio) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for radio in radios: hbox.addWidget(radio) group_box.setLayout(hbox) return group_box, radios
Python
def create_slider(text, min_val, max_val, tick_spacing): """ Creates a QSlider object inside a QGroupBox object, along with a value label on the right The slider orientation will be horizontal. :param text: text to display above the slider :param min_val: lowest value of the slider :param max_val: highest value of the slider :param tick_spacing: interval between two ticks on the slider :return: group_box: QGroupBox object - add this to the widget - see example :return: slider: QSlider object - use this to perform tasks on the button :return: value_label: displays value of slider, update this when value is changed """ group_box = QGroupBox(text) slider = QSlider(Qt.Orientation.Horizontal) slider.setMinimum(min_val) slider.setMaximum(max_val) slider.setTickPosition(QSlider.TickPosition.TicksBothSides) slider.setTickInterval(tick_spacing) value_label = QLabel(str(slider.value())) group_box.setStyleSheet("color: white") vbox = QVBoxLayout() vbox.addWidget(slider) vbox.addWidget(value_label) group_box.setLayout(vbox) return group_box, slider, value_label
def create_slider(text, min_val, max_val, tick_spacing): """ Creates a QSlider object inside a QGroupBox object, along with a value label on the right The slider orientation will be horizontal. :param text: text to display above the slider :param min_val: lowest value of the slider :param max_val: highest value of the slider :param tick_spacing: interval between two ticks on the slider :return: group_box: QGroupBox object - add this to the widget - see example :return: slider: QSlider object - use this to perform tasks on the button :return: value_label: displays value of slider, update this when value is changed """ group_box = QGroupBox(text) slider = QSlider(Qt.Orientation.Horizontal) slider.setMinimum(min_val) slider.setMaximum(max_val) slider.setTickPosition(QSlider.TickPosition.TicksBothSides) slider.setTickInterval(tick_spacing) value_label = QLabel(str(slider.value())) group_box.setStyleSheet("color: white") vbox = QVBoxLayout() vbox.addWidget(slider) vbox.addWidget(value_label) group_box.setLayout(vbox) return group_box, slider, value_label
Python
def write_config_metadata_proto( output_proto: str, top_level_proto: str, config_metadata: dict, ): """Generates the .proto file contain all the protobuf representations of all dynamic parameter configs. :param output_proto: the name of the proto :param top_level_proto: the top level proto name :param config_metadata: the dictionary containing the config metadata """ output_proto_contents = "" list_of_includes = [] for config, config_definition in config_metadata.items(): message_contents = "" entry_count = 1 name = to_pascal_case(config.split(".")[0]) list_of_includes.append(config) # generate includes if "include" in config_definition: for included_config in config_definition["include"]: # There is no way to forward declare messages in proto # so lets make use of google.protobuf.Any to store nested # configs # # Since we are autogenerating, we should know which index # corresponds to which type message_contents += PROTO_CONFIG_ENTRY.format( name=included_config.split(".")[0], count=entry_count ) entry_count += 1 # generate parameters if "parameters" in config_definition: for param_entry in config_definition["parameters"]: for param_type, param_definition in param_entry.items(): message_contents += "".join( PROTO_PARAM_ENTRY.format( type=type_map.PROTO_TYPE_MAP[param_type], name=param_definition["name"], count=entry_count, ) ) entry_count += 1 # append to output output_proto_contents += PROTO_MESSAGE_DEFINITION.format( name=name, contents=message_contents, ) # make the top level config top_level_config_contents = "" entry_count = 1 for include in list(set(list_of_includes)): top_level_config_contents += PROTO_CONFIG_ENTRY.format( name=include.split(".")[0], count=entry_count ) entry_count += 1 output_proto_contents += PROTO_MESSAGE_DEFINITION.format( name=top_level_proto, contents=top_level_config_contents ) # write the output with open(f"{output_proto}", "w") as proto_file: proto_file.write( CONFIG_PROTO.format( autogen_warning=AUTOGEN_WARNING, contents=output_proto_contents, ) )
def write_config_metadata_proto( output_proto: str, top_level_proto: str, config_metadata: dict, ): """Generates the .proto file contain all the protobuf representations of all dynamic parameter configs. :param output_proto: the name of the proto :param top_level_proto: the top level proto name :param config_metadata: the dictionary containing the config metadata """ output_proto_contents = "" list_of_includes = [] for config, config_definition in config_metadata.items(): message_contents = "" entry_count = 1 name = to_pascal_case(config.split(".")[0]) list_of_includes.append(config) # generate includes if "include" in config_definition: for included_config in config_definition["include"]: # There is no way to forward declare messages in proto # so lets make use of google.protobuf.Any to store nested # configs # # Since we are autogenerating, we should know which index # corresponds to which type message_contents += PROTO_CONFIG_ENTRY.format( name=included_config.split(".")[0], count=entry_count ) entry_count += 1 # generate parameters if "parameters" in config_definition: for param_entry in config_definition["parameters"]: for param_type, param_definition in param_entry.items(): message_contents += "".join( PROTO_PARAM_ENTRY.format( type=type_map.PROTO_TYPE_MAP[param_type], name=param_definition["name"], count=entry_count, ) ) entry_count += 1 # append to output output_proto_contents += PROTO_MESSAGE_DEFINITION.format( name=name, contents=message_contents, ) # make the top level config top_level_config_contents = "" entry_count = 1 for include in list(set(list_of_includes)): top_level_config_contents += PROTO_CONFIG_ENTRY.format( name=include.split(".")[0], count=entry_count ) entry_count += 1 output_proto_contents += PROTO_MESSAGE_DEFINITION.format( name=top_level_proto, contents=top_level_config_contents ) # write the output with open(f"{output_proto}", "w") as proto_file: proto_file.write( CONFIG_PROTO.format( autogen_warning=AUTOGEN_WARNING, contents=output_proto_contents, ) )
Python
def generate_heatmap( x_bounds: tuple, y_bounds: tuple, grid_size: float, heatmap_function: Callable[[float, float], float], ) -> np.ndarray: """ Generate a heatmap by creating a grid where each grid cell has grid_size length and width and contains the value of the heatmap function called at its position. :param x_bounds: the x-axis boundaries of the heatmap :param y_bounds: the y-axis boundaries of the heatmap :param grid_size: the size of one grid cell on the heatmap :param heatmap_function: the function to generate the heatmap :return: a heatmap """ grid_dims = ( int((max(x_bounds) - min(x_bounds)) // grid_size), int((max(y_bounds) - min(y_bounds)) // grid_size), ) heatmap_grid = np.ndarray(grid_dims) xcoords = np.arange(min(x_bounds), max(x_bounds), grid_size) ycoords = np.arange(min(y_bounds), max(y_bounds), grid_size) for x_idx, x in enumerate(xcoords): for y_idx, y in enumerate(ycoords): if x_idx < grid_dims[0] and y_idx < grid_dims[1]: heatmap_grid[x_idx, y_idx] = heatmap_function(x, y) return np.flip(np.rot90(heatmap_grid), axis=0)
def generate_heatmap( x_bounds: tuple, y_bounds: tuple, grid_size: float, heatmap_function: Callable[[float, float], float], ) -> np.ndarray: """ Generate a heatmap by creating a grid where each grid cell has grid_size length and width and contains the value of the heatmap function called at its position. :param x_bounds: the x-axis boundaries of the heatmap :param y_bounds: the y-axis boundaries of the heatmap :param grid_size: the size of one grid cell on the heatmap :param heatmap_function: the function to generate the heatmap :return: a heatmap """ grid_dims = ( int((max(x_bounds) - min(x_bounds)) // grid_size), int((max(y_bounds) - min(y_bounds)) // grid_size), ) heatmap_grid = np.ndarray(grid_dims) xcoords = np.arange(min(x_bounds), max(x_bounds), grid_size) ycoords = np.arange(min(y_bounds), max(y_bounds), grid_size) for x_idx, x in enumerate(xcoords): for y_idx, y in enumerate(ycoords): if x_idx < grid_dims[0] and y_idx < grid_dims[1]: heatmap_grid[x_idx, y_idx] = heatmap_function(x, y) return np.flip(np.rot90(heatmap_grid), axis=0)
Python
def plot_heatmap(self, heatmap_function: Callable[[float, float], float]): """ Plot a heatmap for the given function. :param heatmap_function: a function to evaluate on a grid to generate a heatmap """ heatmap = generate_heatmap( self.x_bounds, self.y_bounds, self.grid_size, heatmap_function ) self.image_data_source.data.update(dict(image=[heatmap]))
def plot_heatmap(self, heatmap_function: Callable[[float, float], float]): """ Plot a heatmap for the given function. :param heatmap_function: a function to evaluate on a grid to generate a heatmap """ heatmap = generate_heatmap( self.x_bounds, self.y_bounds, self.grid_size, heatmap_function ) self.image_data_source.data.update(dict(image=[heatmap]))
Python
def refresh(self): """Update the play info widget with new play information """ try: playinfo = self.log_buffer.get_nowait() except queue.Empty as empty: return play_info_dict = MessageToDict(playinfo) robot_ids = [] tactic_fsm_states = [] tactic_names = [] play_name = [] play_name.append(play_info_dict["play"]["playName"]) for robot_id in sorted(play_info_dict["robotTacticAssignment"]): robot_ids.append(robot_id) tactic_fsm_states.append( play_info_dict["robotTacticAssignment"][robot_id]["tacticFsmState"] ) tactic_names.append( play_info_dict["robotTacticAssignment"][robot_id]["tacticName"] ) self.set_data( { "Robot ID": robot_ids, "Tactic Name": tactic_names, "Tactic FSM State": tactic_fsm_states, "Play Name": play_name, } ) self.resizeColumnsToContents() self.resizeRowsToContents()
def refresh(self): """Update the play info widget with new play information """ try: playinfo = self.log_buffer.get_nowait() except queue.Empty as empty: return play_info_dict = MessageToDict(playinfo) robot_ids = [] tactic_fsm_states = [] tactic_names = [] play_name = [] play_name.append(play_info_dict["play"]["playName"]) for robot_id in sorted(play_info_dict["robotTacticAssignment"]): robot_ids.append(robot_id) tactic_fsm_states.append( play_info_dict["robotTacticAssignment"][robot_id]["tacticFsmState"] ) tactic_names.append( play_info_dict["robotTacticAssignment"][robot_id]["tacticName"] ) self.set_data( { "Robot ID": robot_ids, "Tactic Name": tactic_names, "Tactic FSM State": tactic_fsm_states, "Play Name": play_name, } ) self.resizeColumnsToContents() self.resizeRowsToContents()
Python
def dfs_helper( self, config: CppHeaderConfig, arg_prefix: str, load_dependency: str ): """A depth first search helper for adding the necessary prefix to accessing and setting parameters of included configs in loadFromCommmandLineArguments function :param config: the current CppHeaderConfig object :param arg_prefix: the prefix for accessing the arg struct :param load_depndency: the prefix for accessing the actual parameter """ arg_prefix = ( to_snake_case(config.config_name) if not arg_prefix else arg_prefix + "." + to_snake_case(config.config_name) ) load_dependency = load_dependency + "getMutable{config_name}()->".format( config_name=config.config_name ) mutable_param_gen = ( param for param in config.parameters if not param.is_constant ) for param in mutable_param_gen: self.included_config_command_line_arg_entries.append( param.command_line_option_entry_with_prefix(arg_prefix + ".") ) self.included_config_load_command_line_args_into_config_contents.append( param.load_command_line_arg_into_config_with_dependencies( load_dependency, arg_prefix + "." ) ) # top level config has access to all configs, so no need to recursively add options for included_config in config.configs: self.dfs_helper(included_config, arg_prefix, load_dependency)
def dfs_helper( self, config: CppHeaderConfig, arg_prefix: str, load_dependency: str ): """A depth first search helper for adding the necessary prefix to accessing and setting parameters of included configs in loadFromCommmandLineArguments function :param config: the current CppHeaderConfig object :param arg_prefix: the prefix for accessing the arg struct :param load_depndency: the prefix for accessing the actual parameter """ arg_prefix = ( to_snake_case(config.config_name) if not arg_prefix else arg_prefix + "." + to_snake_case(config.config_name) ) load_dependency = load_dependency + "getMutable{config_name}()->".format( config_name=config.config_name ) mutable_param_gen = ( param for param in config.parameters if not param.is_constant ) for param in mutable_param_gen: self.included_config_command_line_arg_entries.append( param.command_line_option_entry_with_prefix(arg_prefix + ".") ) self.included_config_load_command_line_args_into_config_contents.append( param.load_command_line_arg_into_config_with_dependencies( load_dependency, arg_prefix + "." ) ) # top level config has access to all configs, so no need to recursively add options for included_config in config.configs: self.dfs_helper(included_config, arg_prefix, load_dependency)
Python
def add_disc_rew(seg, gamma): """ Discount the reward of the generated batch of trajectories. """ new = np.append(seg['new'], 1) rew = seg['rew'] n_ep = len(seg['ep_rets']) n_samp = len(rew) seg['ep_disc_ret'] = ep_disc_ret = np.empty(n_ep, 'float32') seg['disc_rew'] = disc_rew = np.empty(n_samp, 'float32') discounter = 0 ret = 0. i = 0 for t in range(n_samp): disc_rew[t] = rew[t] * gamma ** discounter ret += disc_rew[t] if new[t + 1]: discounter = 0 ep_disc_ret[i] = ret i += 1 ret = 0. else: discounter += 1
def add_disc_rew(seg, gamma): """ Discount the reward of the generated batch of trajectories. """ new = np.append(seg['new'], 1) rew = seg['rew'] n_ep = len(seg['ep_rets']) n_samp = len(rew) seg['ep_disc_ret'] = ep_disc_ret = np.empty(n_ep, 'float32') seg['disc_rew'] = disc_rew = np.empty(n_samp, 'float32') discounter = 0 ret = 0. i = 0 for t in range(n_samp): disc_rew[t] = rew[t] * gamma ** discounter ret += disc_rew[t] if new[t + 1]: discounter = 0 ep_disc_ret[i] = ret i += 1 ret = 0. else: discounter += 1
Python
def cluster_rewards(ep_reward, reward_clustering='none'): """ Cluster the episode return with the provided strategy. """ if reward_clustering == 'none': pass elif reward_clustering == 'floor': ep_reward = np.floor(ep_reward) elif reward_clustering == 'ceil': ep_reward = np.ceil(ep_reward) elif reward_clustering == 'floor10': ep_reward = np.floor(ep_reward * 10) / 10 elif reward_clustering == 'ceil10': ep_reward = np.ceil(ep_reward * 10) / 10 elif reward_clustering == 'floor100': ep_reward = np.floor(ep_reward * 100) / 100 elif reward_clustering == 'ceil100': ep_reward = np.ceil(ep_reward * 100) / 100 return ep_reward
def cluster_rewards(ep_reward, reward_clustering='none'): """ Cluster the episode return with the provided strategy. """ if reward_clustering == 'none': pass elif reward_clustering == 'floor': ep_reward = np.floor(ep_reward) elif reward_clustering == 'ceil': ep_reward = np.ceil(ep_reward) elif reward_clustering == 'floor10': ep_reward = np.floor(ep_reward * 10) / 10 elif reward_clustering == 'ceil10': ep_reward = np.ceil(ep_reward * 10) / 10 elif reward_clustering == 'floor100': ep_reward = np.floor(ep_reward * 100) / 100 elif reward_clustering == 'ceil100': ep_reward = np.ceil(ep_reward * 100) / 100 return ep_reward
Python
def act(self, ob, resample=False): """ Sample weights for the actor network, then sample action(s) from the resulting actor depending on state(s) Params: ob: current state, or a list of states resample: whether to resample actor params before acting """ if resample: actor_param = self.resample() action = self._act(np.atleast_2d(ob))[0] return (action, actor_param) if resample else action
def act(self, ob, resample=False): """ Sample weights for the actor network, then sample action(s) from the resulting actor depending on state(s) Params: ob: current state, or a list of states resample: whether to resample actor params before acting """ if resample: actor_param = self.resample() action = self._act(np.atleast_2d(ob))[0] return (action, actor_param) if resample else action
Python
def eval_renyi(self, other, order=2): """Renyi divergence Special case: order=1 is kl divergence Params: other: policy to evaluate the distance from order: order of the Renyi divergence exponentiate: if true, actually returns e^Renyi(self||other) """ if other is not self._renyi_other: if self.verbose: print('Building graph') self._renyi_order = tf.placeholder(name='renyi_order', dtype=tf.float32, shape=[]) self._renyi_other = other if order<1: raise ValueError('Order must be >= 1') else: renyi = self.pd.renyi(other.pd, alpha=self._renyi_order) self._get_renyi = U.function([self._renyi_order], [renyi]) return self._get_renyi(order)[0]
def eval_renyi(self, other, order=2): """Renyi divergence Special case: order=1 is kl divergence Params: other: policy to evaluate the distance from order: order of the Renyi divergence exponentiate: if true, actually returns e^Renyi(self||other) """ if other is not self._renyi_other: if self.verbose: print('Building graph') self._renyi_order = tf.placeholder(name='renyi_order', dtype=tf.float32, shape=[]) self._renyi_other = other if order<1: raise ValueError('Order must be >= 1') else: renyi = self.pd.renyi(other.pd, alpha=self._renyi_order) self._get_renyi = U.function([self._renyi_order], [renyi]) return self._get_renyi(order)[0]
Python
def eval_gradient(self, actor_params, rets, use_baseline=True, behavioral=None): """ Compute PGPE policy gradient given a batch of episodes Params: actor_params: list of actor parameters (arrays), one per episode rets: flat list of total [discounted] returns, one per episode use_baseline: wether to employ a variance-minimizing baseline (may be more efficient without) behavioral: higher-order policy used to collect data (off-policy case). If None, the present policy is assumed to be the behavioral(on-policy case) References: Optimal baseline for PGPE: Zhao, Tingting, et al. "Analysis and improvement of policy gradient estimation." Advances in Neural Information Processing Systems. 2011. """ assert rets and len(actor_params)==len(rets) batch_size = len(rets) if not behavioral: #On policy if not use_baseline: #Without baseline (more efficient) pgpe_times_n = np.ravel(self._get_pgpe_times_n(actor_params, rets)[0]) return pgpe_times_n/batch_size else: #With optimal baseline rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = self._get_score_norm(theta)[0] b = np.sum(rets * score_norms**2) / np.sum(score_norms**2) pgpe = np.mean(((rets - b).T * scores.T).T, axis=0) return pgpe else: #Off-policy if behavioral is not self._behavioral: self._build_iw_graph(behavioral) self._behavioral = behavioral if not use_baseline: #Without baseline (more efficient) off_pgpe_times_n = np.ravel(self._get_off_pgpe_times_n(actor_params, rets)[0]) return off_pgpe_times_n/batch_size else: #With optimal baseline rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = self._get_score_norm(theta)[0] iws = np.ravel(self._get_iws(actor_params)[0]) b = np.sum(rets * iws**2 * score_norms**2)/ np.sum(iws**2 * score_norms**2) pgpe = np.mean(((rets - b).T * scores.T).T, axis=0) return pgpe
def eval_gradient(self, actor_params, rets, use_baseline=True, behavioral=None): """ Compute PGPE policy gradient given a batch of episodes Params: actor_params: list of actor parameters (arrays), one per episode rets: flat list of total [discounted] returns, one per episode use_baseline: wether to employ a variance-minimizing baseline (may be more efficient without) behavioral: higher-order policy used to collect data (off-policy case). If None, the present policy is assumed to be the behavioral(on-policy case) References: Optimal baseline for PGPE: Zhao, Tingting, et al. "Analysis and improvement of policy gradient estimation." Advances in Neural Information Processing Systems. 2011. """ assert rets and len(actor_params)==len(rets) batch_size = len(rets) if not behavioral: #On policy if not use_baseline: #Without baseline (more efficient) pgpe_times_n = np.ravel(self._get_pgpe_times_n(actor_params, rets)[0]) return pgpe_times_n/batch_size else: #With optimal baseline rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = self._get_score_norm(theta)[0] b = np.sum(rets * score_norms**2) / np.sum(score_norms**2) pgpe = np.mean(((rets - b).T * scores.T).T, axis=0) return pgpe else: #Off-policy if behavioral is not self._behavioral: self._build_iw_graph(behavioral) self._behavioral = behavioral if not use_baseline: #Without baseline (more efficient) off_pgpe_times_n = np.ravel(self._get_off_pgpe_times_n(actor_params, rets)[0]) return off_pgpe_times_n/batch_size else: #With optimal baseline rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = self._get_score_norm(theta)[0] iws = np.ravel(self._get_iws(actor_params)[0]) b = np.sum(rets * iws**2 * score_norms**2)/ np.sum(iws**2 * score_norms**2) pgpe = np.mean(((rets - b).T * scores.T).T, axis=0) return pgpe
Python
def eval_natural_gradient(self, actor_params, rets, use_baseline=True, behavioral=None): """ Compute PGPE policy gradient given a batch of episodes Params: actor_params: list of actor parameters (arrays), one per episode rets: flat list of total [discounted] returns, one per episode use_baseline: wether to employ a variance-minimizing baseline (may be more efficient without) behavioral: higher-order policy used to collect data (off-policy case). If None, the present policy is assumed to be the behavioral(on-policy case) References: Optimal baseline for PGPE: Zhao, Tingting, et al. "Analysis and improvement of policy gradient estimation." Advances in Neural Information Processing Systems. 2011. """ assert rets and len(actor_params)==len(rets) batch_size = len(rets) fisher = self.eval_fisher() + 1e-24 if not behavioral: #On policy if not use_baseline: #Without baseline (more efficient) pgpe_times_n = np.ravel(self._get_pgpe_times_n(actor_params, rets)[0]) grad = pgpe_times_n/batch_size if self.diagonal: return grad/fisher else: raise NotImplementedError #TODO: full on w/o baseline else: #With optimal baseline if self.diagonal: rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = np.linalg.norm(scores[i]/fisher) b = np.sum(rets * score_norms**2) / np.sum(score_norms**2) npgpe = np.mean(((rets - b).T * scores.T).T, axis=0)/fisher return npgpe else: raise NotImplementedError #TODO: full on with baseline else: #Off-policy if behavioral is not self._behavioral: self._build_iw_graph(behavioral) self._behavioral = behavioral if not use_baseline and self.diagonal: #Without baseline (more efficient) off_pgpe_times_n = np.ravel(self._get_off_pgpe_times_n(actor_params, rets)[0]) grad = off_pgpe_times_n/batch_size return grad/fisher else: raise NotImplementedError
def eval_natural_gradient(self, actor_params, rets, use_baseline=True, behavioral=None): """ Compute PGPE policy gradient given a batch of episodes Params: actor_params: list of actor parameters (arrays), one per episode rets: flat list of total [discounted] returns, one per episode use_baseline: wether to employ a variance-minimizing baseline (may be more efficient without) behavioral: higher-order policy used to collect data (off-policy case). If None, the present policy is assumed to be the behavioral(on-policy case) References: Optimal baseline for PGPE: Zhao, Tingting, et al. "Analysis and improvement of policy gradient estimation." Advances in Neural Information Processing Systems. 2011. """ assert rets and len(actor_params)==len(rets) batch_size = len(rets) fisher = self.eval_fisher() + 1e-24 if not behavioral: #On policy if not use_baseline: #Without baseline (more efficient) pgpe_times_n = np.ravel(self._get_pgpe_times_n(actor_params, rets)[0]) grad = pgpe_times_n/batch_size if self.diagonal: return grad/fisher else: raise NotImplementedError #TODO: full on w/o baseline else: #With optimal baseline if self.diagonal: rets = np.array(rets) scores = np.zeros((batch_size, self._n_higher_params)) score_norms = np.zeros(batch_size) for (theta, i) in zip(actor_params, range(batch_size)): scores[i] = self._get_score(theta)[0] score_norms[i] = np.linalg.norm(scores[i]/fisher) b = np.sum(rets * score_norms**2) / np.sum(score_norms**2) npgpe = np.mean(((rets - b).T * scores.T).T, axis=0)/fisher return npgpe else: raise NotImplementedError #TODO: full on with baseline else: #Off-policy if behavioral is not self._behavioral: self._build_iw_graph(behavioral) self._behavioral = behavioral if not use_baseline and self.diagonal: #Without baseline (more efficient) off_pgpe_times_n = np.ravel(self._get_off_pgpe_times_n(actor_params, rets)[0]) grad = off_pgpe_times_n/batch_size return grad/fisher else: raise NotImplementedError
Python
def traj_segment_generator(pi, env, n_episodes, horizon, stochastic, gamma): """ Returns a generator of complete rollouts. It need to be fed a vectorized environment and a single policy. """ policy_time = 0 env_time = 0 # Initialize state variables t = 0 ac = np.array([env.action_space.sample()] * env.num_envs) _env_s = time.time() ob = env.reset() env_time += time.time() - _env_s zero_ob = np.zeros(ob.shape) current_indexes = np.arange(0, env.num_envs) def filter_indexes(idx_vector, t_vector): return map(list, zip(*[(i, v, t) for i,(v,t) in enumerate(zip(idx_vector, t_vector)) if v != -1])) def has_ended(idx_vector): return sum(idx_vector) == -len(idx_vector) # Iterate to make yield continuous while True: _tt = time.time() # Initialize history arrays obs = np.array([[zero_ob[0] for _t in range(horizon)] for _e in range(n_episodes)]) rews = np.zeros((n_episodes, horizon), 'float32') vpreds = np.zeros((n_episodes, horizon), 'float32') news = np.zeros((n_episodes, horizon), 'int32') acs = np.array([[ac[0] for _t in range(horizon)] for _e in range(n_episodes)]) prevacs = acs.copy() mask = np.zeros((n_episodes, horizon), 'int32') # Initialize indexes and timesteps current_indexes = np.arange(0, env.num_envs) current_timesteps = np.zeros((env.num_envs), dtype=np.int32) # Set to -1 indexes if njobs > num_episodes current_indexes[n_episodes:] = -1 # Indexes log: remember which indexes have been completed indexes_log = list(current_indexes) while not has_ended(current_indexes): # Get the action and save the previous one prevac = ac _pi_s = time.time() ac, vpred = pi.act(stochastic, ob) policy_time += time.time() - _pi_s # Filter the current indexes ci_ob, ci_memory, ct = filter_indexes(current_indexes, current_timesteps) # Save the current properties obs[ci_memory, ct,:] = ob[ci_ob] #vpreds[ci_memory, ct] = np.reshape(np.array(vpred), (-1,))[ci_ob] acs[ci_memory, ct] = ac[ci_ob] prevacs[ci_memory, ct] = prevac[ci_ob] # Take the action _env_s = time.time() env.step_async(ac) ob, rew, done, _ = env.step_wait() env_time += time.time() - _env_s # Save the reward rews[ci_memory, ct] = rew[ci_ob] mask[ci_memory, ct] = 1 news[ci_memory, ct] = np.reshape(np.array(done), (-1, ))[ci_ob] # Update the indexes and timesteps for i, d in enumerate(done): if not d and current_timesteps[i] < (horizon-1): current_timesteps[i] += 1 elif max(indexes_log) < n_episodes - 1: current_timesteps[i] = 0 # Reset the timestep current_indexes[i] = max(indexes_log) + 1 # Increment the index indexes_log.append(current_indexes[i]) else: current_indexes[i] = -1 # Disabling # Add discounted reward (here is simpler) gamma_log = np.log(np.full((horizon), gamma, dtype='float32')) gamma_discounter = np.exp(np.cumsum(gamma_log)) discounted_reward = rews * gamma_discounter total_time = time.time() - _tt # Reshape to flatten episodes and yield yield {'ob': np.reshape(obs, (n_episodes * horizon,)+obs.shape[2:]), 'rew': np.reshape(rews, (n_episodes * horizon)), 'vpred': np.reshape(vpreds, (n_episodes * horizon)), 'ac': np.reshape(acs, (n_episodes * horizon,)+acs.shape[2:]), 'prevac': np.reshape(prevacs, (n_episodes * horizon,)+prevacs.shape[2:]), 'nextvpred': [], # FIXME: what is my goal? 'ep_rets': np.sum(rews * mask, axis=1), 'ep_lens': np.sum(mask, axis=1), 'mask': np.reshape(mask, (n_episodes * horizon)), 'new': np.reshape(news, (n_episodes * horizon)), 'disc_rew': np.reshape(discounted_reward, (n_episodes * horizon)), 'ep_disc_ret': np.sum(discounted_reward, axis=1), 'total_time': total_time, 'policy_time': policy_time, 'env_time': env_time} # Reset time counters policy_time = 0 env_time = 0
def traj_segment_generator(pi, env, n_episodes, horizon, stochastic, gamma): """ Returns a generator of complete rollouts. It need to be fed a vectorized environment and a single policy. """ policy_time = 0 env_time = 0 # Initialize state variables t = 0 ac = np.array([env.action_space.sample()] * env.num_envs) _env_s = time.time() ob = env.reset() env_time += time.time() - _env_s zero_ob = np.zeros(ob.shape) current_indexes = np.arange(0, env.num_envs) def filter_indexes(idx_vector, t_vector): return map(list, zip(*[(i, v, t) for i,(v,t) in enumerate(zip(idx_vector, t_vector)) if v != -1])) def has_ended(idx_vector): return sum(idx_vector) == -len(idx_vector) # Iterate to make yield continuous while True: _tt = time.time() # Initialize history arrays obs = np.array([[zero_ob[0] for _t in range(horizon)] for _e in range(n_episodes)]) rews = np.zeros((n_episodes, horizon), 'float32') vpreds = np.zeros((n_episodes, horizon), 'float32') news = np.zeros((n_episodes, horizon), 'int32') acs = np.array([[ac[0] for _t in range(horizon)] for _e in range(n_episodes)]) prevacs = acs.copy() mask = np.zeros((n_episodes, horizon), 'int32') # Initialize indexes and timesteps current_indexes = np.arange(0, env.num_envs) current_timesteps = np.zeros((env.num_envs), dtype=np.int32) # Set to -1 indexes if njobs > num_episodes current_indexes[n_episodes:] = -1 # Indexes log: remember which indexes have been completed indexes_log = list(current_indexes) while not has_ended(current_indexes): # Get the action and save the previous one prevac = ac _pi_s = time.time() ac, vpred = pi.act(stochastic, ob) policy_time += time.time() - _pi_s # Filter the current indexes ci_ob, ci_memory, ct = filter_indexes(current_indexes, current_timesteps) # Save the current properties obs[ci_memory, ct,:] = ob[ci_ob] #vpreds[ci_memory, ct] = np.reshape(np.array(vpred), (-1,))[ci_ob] acs[ci_memory, ct] = ac[ci_ob] prevacs[ci_memory, ct] = prevac[ci_ob] # Take the action _env_s = time.time() env.step_async(ac) ob, rew, done, _ = env.step_wait() env_time += time.time() - _env_s # Save the reward rews[ci_memory, ct] = rew[ci_ob] mask[ci_memory, ct] = 1 news[ci_memory, ct] = np.reshape(np.array(done), (-1, ))[ci_ob] # Update the indexes and timesteps for i, d in enumerate(done): if not d and current_timesteps[i] < (horizon-1): current_timesteps[i] += 1 elif max(indexes_log) < n_episodes - 1: current_timesteps[i] = 0 # Reset the timestep current_indexes[i] = max(indexes_log) + 1 # Increment the index indexes_log.append(current_indexes[i]) else: current_indexes[i] = -1 # Disabling # Add discounted reward (here is simpler) gamma_log = np.log(np.full((horizon), gamma, dtype='float32')) gamma_discounter = np.exp(np.cumsum(gamma_log)) discounted_reward = rews * gamma_discounter total_time = time.time() - _tt # Reshape to flatten episodes and yield yield {'ob': np.reshape(obs, (n_episodes * horizon,)+obs.shape[2:]), 'rew': np.reshape(rews, (n_episodes * horizon)), 'vpred': np.reshape(vpreds, (n_episodes * horizon)), 'ac': np.reshape(acs, (n_episodes * horizon,)+acs.shape[2:]), 'prevac': np.reshape(prevacs, (n_episodes * horizon,)+prevacs.shape[2:]), 'nextvpred': [], # FIXME: what is my goal? 'ep_rets': np.sum(rews * mask, axis=1), 'ep_lens': np.sum(mask, axis=1), 'mask': np.reshape(mask, (n_episodes * horizon)), 'new': np.reshape(news, (n_episodes * horizon)), 'disc_rew': np.reshape(discounted_reward, (n_episodes * horizon)), 'ep_disc_ret': np.sum(discounted_reward, axis=1), 'total_time': total_time, 'policy_time': policy_time, 'env_time': env_time} # Reset time counters policy_time = 0 env_time = 0
Python
def route(self, path, method="GET"): """ Decorator Function for handler registration :params path :params method """ @wraps(func) def handle(func): self._router.add_route(method, path, func) return handle
def route(self, path, method="GET"): """ Decorator Function for handler registration :params path :params method """ @wraps(func) def handle(func): self._router.add_route(method, path, func) return handle
Python
def handle(self, *args, **options): """Check if the DB is ready first""" self.stdout.write('Waiting for the DB') db_conn = None while not db_conn: try: db_conn = connections['default'] except OperationalError: self.stdout.write('Database unavailable, waiting 1 second') time.sleep(1) self.stdout.write(self.style.SUCCESS('Database is available!'))
def handle(self, *args, **options): """Check if the DB is ready first""" self.stdout.write('Waiting for the DB') db_conn = None while not db_conn: try: db_conn = connections['default'] except OperationalError: self.stdout.write('Database unavailable, waiting 1 second') time.sleep(1) self.stdout.write(self.style.SUCCESS('Database is available!'))
Python
def sample_recipe(user, **params): """Create and return a sample recipe""" defaults = { 'title': 'sample recipe', 'time_minutes': 10, 'price': 5 } defaults.update(params) return Recipe.objects.create(user=user, **defaults)
def sample_recipe(user, **params): """Create and return a sample recipe""" defaults = { 'title': 'sample recipe', 'time_minutes': 10, 'price': 5 } defaults.update(params) return Recipe.objects.create(user=user, **defaults)
Python
def split_detector_sections(hits, phi_edges, eta_edges): """Split hits according to provided phi and eta boundaries.""" hits_sections = [] # Loop over sections for i in range(len(phi_edges) - 1): phi_min, phi_max = phi_edges[i], phi_edges[i+1] # Select hits in this phi section phi_hits = hits[(hits.phi > phi_min) & (hits.phi < phi_max)] # Center these hits on phi=0 centered_phi = phi_hits.phi - (phi_min + phi_max) / 2 phi_hits = phi_hits.assign(phi=centered_phi, phi_section=i) for j in range(len(eta_edges) - 1): eta_min, eta_max = eta_edges[j], eta_edges[j+1] # Select hits in this eta section eta = calc_eta(phi_hits.r, phi_hits.z) sec_hits = phi_hits[(eta > eta_min) & (eta < eta_max)] hits_sections.append(sec_hits.assign(eta_section=j)) return hits_sections
def split_detector_sections(hits, phi_edges, eta_edges): """Split hits according to provided phi and eta boundaries.""" hits_sections = [] # Loop over sections for i in range(len(phi_edges) - 1): phi_min, phi_max = phi_edges[i], phi_edges[i+1] # Select hits in this phi section phi_hits = hits[(hits.phi > phi_min) & (hits.phi < phi_max)] # Center these hits on phi=0 centered_phi = phi_hits.phi - (phi_min + phi_max) / 2 phi_hits = phi_hits.assign(phi=centered_phi, phi_section=i) for j in range(len(eta_edges) - 1): eta_min, eta_max = eta_edges[j], eta_edges[j+1] # Select hits in this eta section eta = calc_eta(phi_hits.r, phi_hits.z) sec_hits = phi_hits[(eta > eta_min) & (eta < eta_max)] hits_sections.append(sec_hits.assign(eta_section=j)) return hits_sections
Python
def main(rootdir, labeldir=None, classfile=None, port=5000): """Create image label annotations rootdir: path to directory containing images labeldir: if specified, alternative directory for labels classfile: if specified, alternative file containing class mapping """ rootdir = os.path.realpath(rootdir) labeldir = os.path.realpath(labeldir) if labeldir is not None else rootdir app.config.from_object(config) app.config['ROOT_DIR'] = rootdir app.config['LABEL_DIR'] = labeldir if classfile is None: app.config['CLASSFILE'] = '{}/classmap.json'.format( app.config['LABEL_DIR']) if not os.path.exists(app.config['CLASSFILE']): shutil.copyfile('defaultclassmap.json', app.config['CLASSFILE']) else: app.config['CLASSFILE'] = classfile app.run(host="0.0.0.0", port=port, debug=True)
def main(rootdir, labeldir=None, classfile=None, port=5000): """Create image label annotations rootdir: path to directory containing images labeldir: if specified, alternative directory for labels classfile: if specified, alternative file containing class mapping """ rootdir = os.path.realpath(rootdir) labeldir = os.path.realpath(labeldir) if labeldir is not None else rootdir app.config.from_object(config) app.config['ROOT_DIR'] = rootdir app.config['LABEL_DIR'] = labeldir if classfile is None: app.config['CLASSFILE'] = '{}/classmap.json'.format( app.config['LABEL_DIR']) if not os.path.exists(app.config['CLASSFILE']): shutil.copyfile('defaultclassmap.json', app.config['CLASSFILE']) else: app.config['CLASSFILE'] = classfile app.run(host="0.0.0.0", port=port, debug=True)
Python
def identify_sources(people, sentences=None, corefs=None, people_to_quotes=None, people_to_verbs=None): """ Given the people mentioned, identify which of them are sources. Sources is defined as people who we have identified as quoting, as well as people who are the subject of a verb that corresponds to saying something. For flexibility, we can pass it either sentences and corefs, in which case it will call get_quotes and get_associated_verbs to get people_to_quotes and people_to_verbs, or directly pass in those if you've computed them already. """ SPEAKING_LEMMAS = {'say', 'tell', 'speak', 'ask', 'mention', 'suggest', 'claim', 'question', 'tweet', 'write'} assert (sentences is not None and corefs is not None) or ( people_to_quotes is not None and people_to_verbs is not None) if people_to_quotes is None: people_to_quotes = get_quotes(people, sentences, corefs) if people_to_verbs is None: people_to_verbs = get_associated_verbs(people, sentences, corefs) # Sources is a dictionary which contains only people who are sources, # and has, for each of them, a list of reasons why we classified them # as sources. sources = defaultdict(list) for p, quotes in people_to_quotes.iteritems(): if len(quotes) > 0: sources[p].append('Quoted saying {} words'.format(len(quotes))) for p, verbs in people_to_verbs.iteritems(): # Verbs is a list of (actual verb from text, lemma). For example, # [(said, say), (say, say), (spoke, speak)] verb_lemma_set = set([v[1] for v in verbs]) speaking_verbs_used = verb_lemma_set.intersection(SPEAKING_LEMMAS) if len(speaking_verbs_used) > 0: sources[p].append('Subject of {}'.format( ', '.join(speaking_verbs_used))) return sources
def identify_sources(people, sentences=None, corefs=None, people_to_quotes=None, people_to_verbs=None): """ Given the people mentioned, identify which of them are sources. Sources is defined as people who we have identified as quoting, as well as people who are the subject of a verb that corresponds to saying something. For flexibility, we can pass it either sentences and corefs, in which case it will call get_quotes and get_associated_verbs to get people_to_quotes and people_to_verbs, or directly pass in those if you've computed them already. """ SPEAKING_LEMMAS = {'say', 'tell', 'speak', 'ask', 'mention', 'suggest', 'claim', 'question', 'tweet', 'write'} assert (sentences is not None and corefs is not None) or ( people_to_quotes is not None and people_to_verbs is not None) if people_to_quotes is None: people_to_quotes = get_quotes(people, sentences, corefs) if people_to_verbs is None: people_to_verbs = get_associated_verbs(people, sentences, corefs) # Sources is a dictionary which contains only people who are sources, # and has, for each of them, a list of reasons why we classified them # as sources. sources = defaultdict(list) for p, quotes in people_to_quotes.iteritems(): if len(quotes) > 0: sources[p].append('Quoted saying {} words'.format(len(quotes))) for p, verbs in people_to_verbs.iteritems(): # Verbs is a list of (actual verb from text, lemma). For example, # [(said, say), (say, say), (spoke, speak)] verb_lemma_set = set([v[1] for v in verbs]) speaking_verbs_used = verb_lemma_set.intersection(SPEAKING_LEMMAS) if len(speaking_verbs_used) > 0: sources[p].append('Subject of {}'.format( ', '.join(speaking_verbs_used))) return sources
Python
def add_flag_last_name_to_be_inferred(mentions_dictionary, sentences, corefs): """ Add a flag: Is this the first time we are seeing this name, and is this a single name? If yes, we are on the alert for a person who is related to another person, and whose last name is to be inferred from the text. For example, in the sentence "President Barack Obama and his wife Michelle spoke at the gathering," we have that Michelle's last name is to be inferred from her relationship with her husband. Then, a "Ms. Obama" in the text refers to Michelle, but this connection is not made explicit. This is, of course, just a rough heuristic. There are cases (e.g. Lorde) where a person is referred to exclusively by just one name. """ set_of_mentions = set() for key in sorted(mentions_dictionary): mention = mentions_dictionary[key]['text'] if len(mention.split()) == 1: first_time = True for el in set_of_mentions: if mention in el: first_time = False if first_time: mentions_dictionary[key]['flag_last_name_to_infer'] = True set_of_mentions.add(mention) for key in sorted(mentions_dictionary): if not mentions_dictionary[key].get('flag_last_name_to_infer'): continue related_mention_info = detect_relationships( mentions_dictionary, key, sentences, corefs) if related_mention_info is not None: related_mention = mentions_dictionary[related_mention_info['key']] if related_mention_info['rel'] == 'and_surname_sharing': mentions_dictionary[key]['potential_surname'] = \ related_mention['text'].split()[-1] elif related_mention_info['rel'] in RELATIONSHIP_WORDS: mentions_dictionary[key]['potential_surname'] = \ related_mention['text'].split()[-1]
def add_flag_last_name_to_be_inferred(mentions_dictionary, sentences, corefs): """ Add a flag: Is this the first time we are seeing this name, and is this a single name? If yes, we are on the alert for a person who is related to another person, and whose last name is to be inferred from the text. For example, in the sentence "President Barack Obama and his wife Michelle spoke at the gathering," we have that Michelle's last name is to be inferred from her relationship with her husband. Then, a "Ms. Obama" in the text refers to Michelle, but this connection is not made explicit. This is, of course, just a rough heuristic. There are cases (e.g. Lorde) where a person is referred to exclusively by just one name. """ set_of_mentions = set() for key in sorted(mentions_dictionary): mention = mentions_dictionary[key]['text'] if len(mention.split()) == 1: first_time = True for el in set_of_mentions: if mention in el: first_time = False if first_time: mentions_dictionary[key]['flag_last_name_to_infer'] = True set_of_mentions.add(mention) for key in sorted(mentions_dictionary): if not mentions_dictionary[key].get('flag_last_name_to_infer'): continue related_mention_info = detect_relationships( mentions_dictionary, key, sentences, corefs) if related_mention_info is not None: related_mention = mentions_dictionary[related_mention_info['key']] if related_mention_info['rel'] == 'and_surname_sharing': mentions_dictionary[key]['potential_surname'] = \ related_mention['text'].split()[-1] elif related_mention_info['rel'] in RELATIONSHIP_WORDS: mentions_dictionary[key]['potential_surname'] = \ related_mention['text'].split()[-1]
Python
def is_mention_subset(small_mention_text, large_mention_text): """ Check if the smaller mention is a "subset" of the larger mention. We define "subset" in a very specific way: 1. Subsequence: Example: Barack is a subset of Barack Obama, John Kelly is a subset of John Kelly Smith, Kelly Smith is a subset of John Kelly Smith, etc. And, Barack is a subset of Barack. 2. The smaller string is equal to the larger string minus the words in the middle. Example: John Smith is a subset of John Jackson Smith. """ small_mention_tokens = small_mention_text.split() large_mention_tokens = large_mention_text.split() if small_mention_text in large_mention_text: return True elif len(large_mention_tokens) > 2: if small_mention_tokens == \ [large_mention_tokens[0], large_mention_tokens[-1]]: return True return False
def is_mention_subset(small_mention_text, large_mention_text): """ Check if the smaller mention is a "subset" of the larger mention. We define "subset" in a very specific way: 1. Subsequence: Example: Barack is a subset of Barack Obama, John Kelly is a subset of John Kelly Smith, Kelly Smith is a subset of John Kelly Smith, etc. And, Barack is a subset of Barack. 2. The smaller string is equal to the larger string minus the words in the middle. Example: John Smith is a subset of John Jackson Smith. """ small_mention_tokens = small_mention_text.split() large_mention_tokens = large_mention_text.split() if small_mention_text in large_mention_text: return True elif len(large_mention_tokens) > 2: if small_mention_tokens == \ [large_mention_tokens[0], large_mention_tokens[-1]]: return True return False
Python
def _get_mentions_coreferent_with_word(mentions_dictionary, corefs, word_sent_idx, word_idx): """ Given a particular word (which is identified by its sentence number and its word number, both 1-based, so as to match the numbering in corefs), returns the list of mentions from mentions_dictionary it is coreferent with, if any. Assumes that every entry in mentions_dictionary has the 'coref_mention_ids' field populated with a list of coref mention ids. NOTE: Only matches the word if the entry in corefs contains exactly the word -- the idea is, if the word is 'his', then it won't match a phrase containing 'his', like 'his mother'. """ keys_of_coreferent_mentions = set() coref_id_to_mention_key = {} for key, mentions_dict in mentions_dictionary.iteritems(): coref_ids_of_mention = mentions_dict.get('coref_mention_ids', []) for coref_id in coref_ids_of_mention: coref_id_to_mention_key[coref_id] = key for coref_id, coref_chain in corefs.iteritems(): chain_contains_word = False for coref_mention_dict in coref_chain: if coref_mention_dict['sentNum'] == word_sent_idx: if coref_mention_dict['startIndex'] == word_idx: if coref_mention_dict['endIndex'] == word_idx + 1: chain_contains_word = True break if chain_contains_word: ids_in_chain = [coref_mention_dict['id'] for coref_mention_dict in coref_chain] for _id in ids_in_chain: if _id in coref_id_to_mention_key: keys_of_coreferent_mentions.add( coref_id_to_mention_key[_id]) return list(keys_of_coreferent_mentions)
def _get_mentions_coreferent_with_word(mentions_dictionary, corefs, word_sent_idx, word_idx): """ Given a particular word (which is identified by its sentence number and its word number, both 1-based, so as to match the numbering in corefs), returns the list of mentions from mentions_dictionary it is coreferent with, if any. Assumes that every entry in mentions_dictionary has the 'coref_mention_ids' field populated with a list of coref mention ids. NOTE: Only matches the word if the entry in corefs contains exactly the word -- the idea is, if the word is 'his', then it won't match a phrase containing 'his', like 'his mother'. """ keys_of_coreferent_mentions = set() coref_id_to_mention_key = {} for key, mentions_dict in mentions_dictionary.iteritems(): coref_ids_of_mention = mentions_dict.get('coref_mention_ids', []) for coref_id in coref_ids_of_mention: coref_id_to_mention_key[coref_id] = key for coref_id, coref_chain in corefs.iteritems(): chain_contains_word = False for coref_mention_dict in coref_chain: if coref_mention_dict['sentNum'] == word_sent_idx: if coref_mention_dict['startIndex'] == word_idx: if coref_mention_dict['endIndex'] == word_idx + 1: chain_contains_word = True break if chain_contains_word: ids_in_chain = [coref_mention_dict['id'] for coref_mention_dict in coref_chain] for _id in ids_in_chain: if _id in coref_id_to_mention_key: keys_of_coreferent_mentions.add( coref_id_to_mention_key[_id]) return list(keys_of_coreferent_mentions)
Python
def _add_mention_to_dict(mention, people_mentioned): """ Helps the get_people_mentioned function by adding this mention to the dictionary. Sees if the mention already existed. If it's a sub/super-string of another mention, then we fold the two together to keep the largest mention. """ sp_mention = tuple(mention.split()) # We find if this entity already exists in our dict of # people mentioned. We find out whether we should overwrite # that element, or just add one to its tally (our policy # is to keep the longest mention only.) existing_elem = None overwrite = False for pm in people_mentioned: if pm == sp_mention: existing_elem = pm break if len(set(pm).intersection(set(sp_mention))) > 0: existing_elem = pm if len(sp_mention) > len(pm): overwrite = True break if existing_elem: if overwrite: people_mentioned[sp_mention] = 1 + \ people_mentioned.pop(existing_elem) else: people_mentioned[existing_elem] += 1 else: people_mentioned[sp_mention] = 1
def _add_mention_to_dict(mention, people_mentioned): """ Helps the get_people_mentioned function by adding this mention to the dictionary. Sees if the mention already existed. If it's a sub/super-string of another mention, then we fold the two together to keep the largest mention. """ sp_mention = tuple(mention.split()) # We find if this entity already exists in our dict of # people mentioned. We find out whether we should overwrite # that element, or just add one to its tally (our policy # is to keep the longest mention only.) existing_elem = None overwrite = False for pm in people_mentioned: if pm == sp_mention: existing_elem = pm break if len(set(pm).intersection(set(sp_mention))) > 0: existing_elem = pm if len(sp_mention) > len(pm): overwrite = True break if existing_elem: if overwrite: people_mentioned[sp_mention] = 1 + \ people_mentioned.pop(existing_elem) else: people_mentioned[existing_elem] += 1 else: people_mentioned[sp_mention] = 1
Python
def _get_honorifics(sentences): ''' Extract gender cues from annotated sentences: Mrs., Ms., Mr. For each of these gender cues, we have a list of associated names. For example, if our content was: 'Mr. Barack Obama was the President. His wife Mrs. Michelle was the First Lady. Their daughter Ms. Sasha is in high school. Mr. Biden is the Vice President.', then honorofics should be: {'Mr.': set(['Barack Obama', 'Biden']), 'Mrs.': set(['Michelle']), 'Ms.': set(['Sasha'])} ''' honorifics = {h: set() for h in HONORIFICS} for sentence in sentences: tokens = sentence['tokens'] for token_i, token in enumerate(tokens): if token_i == 0: person_name = '' # saveAs is a flag of sorts: tells you whether # to be on the lookout for a name saveAs = '' if token['originalText'] in HONORIFICS: ''' After seeing a gender cue ('Mr.'/'Mrs.'/'Ms.'), get ready to: 1. store a person's name (which would logically follow this token as person_name (initialized to an empty string). 2. save the gender cue we have just seen as saveAs. ''' saveAs = token['originalText'] person_name = '' continue if saveAs != '': if token['ner'] == 'PERSON': if person_name == '': person_name = token['originalText'] else: person_name += ' ' + token['originalText'] else: if person_name != '': honorifics[saveAs].add(person_name) person_name = '' saveAs = '' return honorifics
def _get_honorifics(sentences): ''' Extract gender cues from annotated sentences: Mrs., Ms., Mr. For each of these gender cues, we have a list of associated names. For example, if our content was: 'Mr. Barack Obama was the President. His wife Mrs. Michelle was the First Lady. Their daughter Ms. Sasha is in high school. Mr. Biden is the Vice President.', then honorofics should be: {'Mr.': set(['Barack Obama', 'Biden']), 'Mrs.': set(['Michelle']), 'Ms.': set(['Sasha'])} ''' honorifics = {h: set() for h in HONORIFICS} for sentence in sentences: tokens = sentence['tokens'] for token_i, token in enumerate(tokens): if token_i == 0: person_name = '' # saveAs is a flag of sorts: tells you whether # to be on the lookout for a name saveAs = '' if token['originalText'] in HONORIFICS: ''' After seeing a gender cue ('Mr.'/'Mrs.'/'Ms.'), get ready to: 1. store a person's name (which would logically follow this token as person_name (initialized to an empty string). 2. save the gender cue we have just seen as saveAs. ''' saveAs = token['originalText'] person_name = '' continue if saveAs != '': if token['ner'] == 'PERSON': if person_name == '': person_name = token['originalText'] else: person_name += ' ' + token['originalText'] else: if person_name != '': honorifics[saveAs].add(person_name) person_name = '' saveAs = '' return honorifics
Python
def feature_duplicator( df: pd.DataFrame, columns_to_duplicate: Optional[List[str]] = None, columns_mapping: Optional[Dict[str, str]] = None, prefix: Optional[str] = None, suffix: Optional[str] = None ) -> LearnerReturnType: """ Duplicates some columns in the dataframe. When encoding features, a good practice is to save the encoded version in a different column rather than replacing the original values. The purpose of this function is to duplicate the column to be encoded, to be later replaced by the encoded values. The duplication method is used to preserve the original behaviour (replace). Parameters ---------- df: pandas.DataFrame A Pandas' DataFrame with columns_to_duplicate columns columns_to_duplicate: list of str List of columns names columns_mapping: int (default None) Mapping of source columns to destination columns prefix: int (default None) prefix to add to columns to duplicate suffix: int (default None) Suffix to add to columns to duplicate Returns ---------- increased_dataset : pandas.DataFrame A dataset with repeated columns """ columns_final_mapping = ( columns_mapping if columns_mapping is not None else { col: (prefix or '') + str(col) + (suffix or '') for col in columns_to_duplicate } if columns_to_duplicate else dict() ) def p(new_df: pd.DataFrame) -> pd.DataFrame: categ_columns = {dest_col: new_df[src_col] for src_col, dest_col in columns_final_mapping.items()} return new_df.assign(**categ_columns) p.__doc__ = feature_duplicator.__doc__ log: LearnerLogType = { 'feature_duplicator': { 'columns_to_duplicate': columns_to_duplicate, 'columns_mapping': columns_mapping, 'prefix': prefix, 'suffix': suffix, 'columns_final_mapping': columns_final_mapping, } } return p, p(df.copy()), log
def feature_duplicator( df: pd.DataFrame, columns_to_duplicate: Optional[List[str]] = None, columns_mapping: Optional[Dict[str, str]] = None, prefix: Optional[str] = None, suffix: Optional[str] = None ) -> LearnerReturnType: """ Duplicates some columns in the dataframe. When encoding features, a good practice is to save the encoded version in a different column rather than replacing the original values. The purpose of this function is to duplicate the column to be encoded, to be later replaced by the encoded values. The duplication method is used to preserve the original behaviour (replace). Parameters ---------- df: pandas.DataFrame A Pandas' DataFrame with columns_to_duplicate columns columns_to_duplicate: list of str List of columns names columns_mapping: int (default None) Mapping of source columns to destination columns prefix: int (default None) prefix to add to columns to duplicate suffix: int (default None) Suffix to add to columns to duplicate Returns ---------- increased_dataset : pandas.DataFrame A dataset with repeated columns """ columns_final_mapping = ( columns_mapping if columns_mapping is not None else { col: (prefix or '') + str(col) + (suffix or '') for col in columns_to_duplicate } if columns_to_duplicate else dict() ) def p(new_df: pd.DataFrame) -> pd.DataFrame: categ_columns = {dest_col: new_df[src_col] for src_col, dest_col in columns_final_mapping.items()} return new_df.assign(**categ_columns) p.__doc__ = feature_duplicator.__doc__ log: LearnerLogType = { 'feature_duplicator': { 'columns_to_duplicate': columns_to_duplicate, 'columns_mapping': columns_mapping, 'prefix': prefix, 'suffix': suffix, 'columns_final_mapping': columns_final_mapping, } } return p, p(df.copy()), log
Python
def column_duplicatable(columns_to_bind: str) -> Callable: """ Decorator to prepend the feature_duplicator learner. Identifies the columns to be duplicated and applies duplicator. Parameters ---------- columns_to_bind: str Sets feature_duplicator's "columns_to_duplicate" parameter equal to the `columns_to_bind` parameter from the decorated learner """ def _decorator(child: Callable) -> Callable: mixin = feature_duplicator def _init( *args: List[Any], **kwargs: Dict[str, Any] ) -> Union[Callable, LearnerReturnType]: mixin_spec = inspect.getfullargspec(mixin) mixin_named_args = set(mixin_spec.args) | set(mixin_spec.kwonlyargs) child_spec = inspect.getfullargspec(child) child_named_args = set(child_spec.args) | set(child_spec.kwonlyargs) def _learn(df: pd.DataFrame) -> LearnerReturnType: mixin_kwargs = { key: value for key, value in kwargs.items() if key in mixin_named_args } if 'prefix' in kwargs.keys() or 'suffix' in kwargs.keys(): columns_to_duplicate: Any = (kwargs[columns_to_bind] if columns_to_bind in kwargs.keys() else args[child_spec.args.index(columns_to_bind)]) mixin_kwargs['columns_to_duplicate'] = columns_to_duplicate mixin_fn, mixin_df, mixin_log = mixin(df, **mixin_kwargs) child_kwargs: Dict[str, Any] = { key: value for key, value in kwargs.items() if key in child_named_args } child_fn, child_df, child_log = child(mixin_df, *args[1:], **child_kwargs) child_kwargs[columns_to_bind] = \ list(mixin_log['feature_duplicator']['columns_final_mapping'].values()) return toolz.compose(child_fn, mixin_fn), child_df, {**mixin_log, **child_log} if not len(args): _learn.__doc__ = child.__doc__ return _learn else: return _learn(args[0]) callable_fn = functools.wraps(child)(_init) callable_fn.__doc__ = child.__doc__ return callable_fn return _decorator
def column_duplicatable(columns_to_bind: str) -> Callable: """ Decorator to prepend the feature_duplicator learner. Identifies the columns to be duplicated and applies duplicator. Parameters ---------- columns_to_bind: str Sets feature_duplicator's "columns_to_duplicate" parameter equal to the `columns_to_bind` parameter from the decorated learner """ def _decorator(child: Callable) -> Callable: mixin = feature_duplicator def _init( *args: List[Any], **kwargs: Dict[str, Any] ) -> Union[Callable, LearnerReturnType]: mixin_spec = inspect.getfullargspec(mixin) mixin_named_args = set(mixin_spec.args) | set(mixin_spec.kwonlyargs) child_spec = inspect.getfullargspec(child) child_named_args = set(child_spec.args) | set(child_spec.kwonlyargs) def _learn(df: pd.DataFrame) -> LearnerReturnType: mixin_kwargs = { key: value for key, value in kwargs.items() if key in mixin_named_args } if 'prefix' in kwargs.keys() or 'suffix' in kwargs.keys(): columns_to_duplicate: Any = (kwargs[columns_to_bind] if columns_to_bind in kwargs.keys() else args[child_spec.args.index(columns_to_bind)]) mixin_kwargs['columns_to_duplicate'] = columns_to_duplicate mixin_fn, mixin_df, mixin_log = mixin(df, **mixin_kwargs) child_kwargs: Dict[str, Any] = { key: value for key, value in kwargs.items() if key in child_named_args } child_fn, child_df, child_log = child(mixin_df, *args[1:], **child_kwargs) child_kwargs[columns_to_bind] = \ list(mixin_log['feature_duplicator']['columns_final_mapping'].values()) return toolz.compose(child_fn, mixin_fn), child_df, {**mixin_log, **child_log} if not len(args): _learn.__doc__ = child.__doc__ return _learn else: return _learn(args[0]) callable_fn = functools.wraps(child)(_init) callable_fn.__doc__ = child.__doc__ return callable_fn return _decorator
Python
def detect_audio_begin(sound, offset, silence_threshold=-60.0): ''' sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound ''' sound_length = len(sound) chunk_size = 10 # ms audio_begin = offset # ms # print("audio_begin:", end="") while sound[audio_begin:audio_begin + chunk_size].dBFS < silence_threshold: audio_begin += chunk_size if audio_begin >= sound_length: return -1 return audio_begin
def detect_audio_begin(sound, offset, silence_threshold=-60.0): ''' sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound ''' sound_length = len(sound) chunk_size = 10 # ms audio_begin = offset # ms # print("audio_begin:", end="") while sound[audio_begin:audio_begin + chunk_size].dBFS < silence_threshold: audio_begin += chunk_size if audio_begin >= sound_length: return -1 return audio_begin
Python
def simulate( self, market_state: M, allocation: Array, wealth: Array, time_to_simulate: Array ) -> tuple[M, Array]: """simulate market state forward a given amount, updating wealth according to allocation amoung assets."""
def simulate( self, market_state: M, allocation: Array, wealth: Array, time_to_simulate: Array ) -> tuple[M, Array]: """simulate market state forward a given amount, updating wealth according to allocation amoung assets."""
Python
def thermo_plots(pressure,temperature,mixing_ratio): """" plots for vertical profiles of temperature, dewpoint, mixing ratio and relative humidity. Parameters ---------- pressure : array-like Atmospheric pressure profile (surface to TOA) temperature: array-like Atmospheric temperature profile (surface to TOA) dewpoint: array-like Atmospheric dewpoint profile (surface to TOA) Returns ------- """ p = pressure*units('mbar') q = mixing_ratio*units('kilogram/kilogram') T = temperature*units('degC') Td = mpcalc.dewpoint_from_specific_humidity(q,T,p) # dewpoint Tp = mpcalc.parcel_profile(p,T[0],Td[0]) # parcel plt.figure(figsize = (12,5)) lev = find_nearest(p.magnitude,100); plt.subplot(1,3,1) plt.plot(T[:lev],p[:lev],'-ob') plt.plot(Td[:lev],p[:lev],'-og') plt.plot(Tp[:lev],p[:lev],'-or') plt.xlabel('Temperature [C]',fontsize=12) plt.ylabel('Pressure [hpa]',fontsize=12) plt.gca().invert_yaxis() plt.legend(['Temp','Temp_Dew','Temp_Parcel'],loc=1) plt.grid() qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p) # Relative humidity RH = q/qs*100 # Relative humidity plt.subplot(1,3,2) plt.plot(q[:lev],p[:lev],'-og') plt.xlabel('Mixing ratio [kg/kg]',fontsize=12) plt.gca().invert_yaxis() plt.grid() plt.subplot(1,3,3) plt.plot(RH[:lev],p[:lev],'-og') plt.xlabel('Relative humiduty [%]',fontsize=12) plt.gca().invert_yaxis() plt.grid() plt.tight_layout() return (plt)
def thermo_plots(pressure,temperature,mixing_ratio): """" plots for vertical profiles of temperature, dewpoint, mixing ratio and relative humidity. Parameters ---------- pressure : array-like Atmospheric pressure profile (surface to TOA) temperature: array-like Atmospheric temperature profile (surface to TOA) dewpoint: array-like Atmospheric dewpoint profile (surface to TOA) Returns ------- """ p = pressure*units('mbar') q = mixing_ratio*units('kilogram/kilogram') T = temperature*units('degC') Td = mpcalc.dewpoint_from_specific_humidity(q,T,p) # dewpoint Tp = mpcalc.parcel_profile(p,T[0],Td[0]) # parcel plt.figure(figsize = (12,5)) lev = find_nearest(p.magnitude,100); plt.subplot(1,3,1) plt.plot(T[:lev],p[:lev],'-ob') plt.plot(Td[:lev],p[:lev],'-og') plt.plot(Tp[:lev],p[:lev],'-or') plt.xlabel('Temperature [C]',fontsize=12) plt.ylabel('Pressure [hpa]',fontsize=12) plt.gca().invert_yaxis() plt.legend(['Temp','Temp_Dew','Temp_Parcel'],loc=1) plt.grid() qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p) # Relative humidity RH = q/qs*100 # Relative humidity plt.subplot(1,3,2) plt.plot(q[:lev],p[:lev],'-og') plt.xlabel('Mixing ratio [kg/kg]',fontsize=12) plt.gca().invert_yaxis() plt.grid() plt.subplot(1,3,3) plt.plot(RH[:lev],p[:lev],'-og') plt.xlabel('Relative humiduty [%]',fontsize=12) plt.gca().invert_yaxis() plt.grid() plt.tight_layout() return (plt)
Python
def theta_plots(pressure,temperature,mixing_ratio): """ plots for vertical profiles of potential temperature, equivalent potential temperature, and saturated equivalent potential temperature """ p = pressure*units('mbar') T = temperature*units('degC') q = mixing_ratio*units('kilogram/kilogram') lev = find_nearest(p.magnitude,100) Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint theta = mpcalc.potential_temperature(p,T) theta_e = mpcalc.equivalent_potential_temperature(p,T,Td) theta_es = mpcalc.equivalent_potential_temperature(p,T,T) plt.figure(figsize=(7,7)) plt.plot(theta[:lev],p[:lev],'-ok') plt.plot(theta_e[:lev],p[:lev],'-ob') plt.plot(theta_es[:lev],p[:lev],'-or') plt.xlabel('Temperature [K]',fontsize=12) plt.ylabel('Pressure [hpa]',fontsize=12) plt.gca().invert_yaxis() plt.legend(['$\\theta$','$\\theta_e$','$\\theta_{es}$'],loc=1) plt.grid() return (plt)
def theta_plots(pressure,temperature,mixing_ratio): """ plots for vertical profiles of potential temperature, equivalent potential temperature, and saturated equivalent potential temperature """ p = pressure*units('mbar') T = temperature*units('degC') q = mixing_ratio*units('kilogram/kilogram') lev = find_nearest(p.magnitude,100) Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint theta = mpcalc.potential_temperature(p,T) theta_e = mpcalc.equivalent_potential_temperature(p,T,Td) theta_es = mpcalc.equivalent_potential_temperature(p,T,T) plt.figure(figsize=(7,7)) plt.plot(theta[:lev],p[:lev],'-ok') plt.plot(theta_e[:lev],p[:lev],'-ob') plt.plot(theta_es[:lev],p[:lev],'-or') plt.xlabel('Temperature [K]',fontsize=12) plt.ylabel('Pressure [hpa]',fontsize=12) plt.gca().invert_yaxis() plt.legend(['$\\theta$','$\\theta_e$','$\\theta_{es}$'],loc=1) plt.grid() return (plt)
Python
def add_curves_Wyoming(ax,datetime,station,linewidth=1.0,LH_Tdepend=False): """ overlaying new curves of multiple soundings from Wyoming datasets date: using datetime module. ex. datetime(2018,06,06) station: station name. ex. 'MFL' Miami, Florida """ from siphon.simplewebservice.wyoming import WyomingUpperAir date = datetime station = station df = WyomingUpperAir.request_data(date, station) pressure = df['pressure'].values Temp = df['temperature'].values Temp_dew = df['dewpoint'].values altitude = df['height'].values q = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp_dew*units('degC')),pressure*units('mbar')) q = mpcalc.specific_humidity_from_mixing_ratio(q) qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp*units('degC')),pressure*units('mbar')) # specific energies if LH_Tdepend == False: mse = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),q) mse_s = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),qs) dse = mpcalc.dry_static_energy(altitude*units('meter'),Temp*units('degC')) else: # A short course in cloud physics, Roger and Yau (1989) Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion mse = Cp_d*T + g*altitude + Lvt*q mse_s = Cp_d*T + g*altitude + Lvt*qs dse = mpcalc.dry_static_energy(altitude,T) # adding curves on the main axes ax.plot(dse.magnitude, pressure, 'k', linewidth=linewidth) ax.plot(mse.magnitude, pressure, 'b', linewidth=linewidth) ax.plot(mse_s.magnitude, pressure, 'r', linewidth=linewidth)
def add_curves_Wyoming(ax,datetime,station,linewidth=1.0,LH_Tdepend=False): """ overlaying new curves of multiple soundings from Wyoming datasets date: using datetime module. ex. datetime(2018,06,06) station: station name. ex. 'MFL' Miami, Florida """ from siphon.simplewebservice.wyoming import WyomingUpperAir date = datetime station = station df = WyomingUpperAir.request_data(date, station) pressure = df['pressure'].values Temp = df['temperature'].values Temp_dew = df['dewpoint'].values altitude = df['height'].values q = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp_dew*units('degC')),pressure*units('mbar')) q = mpcalc.specific_humidity_from_mixing_ratio(q) qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp*units('degC')),pressure*units('mbar')) # specific energies if LH_Tdepend == False: mse = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),q) mse_s = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),qs) dse = mpcalc.dry_static_energy(altitude*units('meter'),Temp*units('degC')) else: # A short course in cloud physics, Roger and Yau (1989) Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion mse = Cp_d*T + g*altitude + Lvt*q mse_s = Cp_d*T + g*altitude + Lvt*qs dse = mpcalc.dry_static_energy(altitude,T) # adding curves on the main axes ax.plot(dse.magnitude, pressure, 'k', linewidth=linewidth) ax.plot(mse.magnitude, pressure, 'b', linewidth=linewidth) ax.plot(mse_s.magnitude, pressure, 'r', linewidth=linewidth)
Python
def add_curves(ax,pressure,temperature,mixing_ratio,linewidth=1.0,LH_Tdepend=False): """ overlaying new curves of multiple soundings from profiles """ p = pressure*units('mbar') T = temperature*units('degC') q = mixing_ratio*units('kilogram/kilogram') qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p) Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint Tp = mpcalc.parcel_profile(p,T[0],Td[0]).to('degC') # parcel profile # Altitude based on the hydrostatic eq. altitude = np.zeros((np.size(T)))*units('meter') # surface is 0 meter for i in range(np.size(T)): altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) # Hypsometric Eq. for height # specific energies if LH_Tdepend == False: mse = mpcalc.moist_static_energy(altitude,T,q) mse_s = mpcalc.moist_static_energy(altitude,T,qs) dse = mpcalc.dry_static_energy(altitude,T) else: # A short course in cloud physics, Roger and Yau (1989) Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion mse = Cp_d*T + g*altitude + Lvt*q mse_s = Cp_d*T + g*altitude + Lvt*qs dse = mpcalc.dry_static_energy(altitude,T) ax.plot(dse, p, '--k', linewidth=linewidth) ax.plot(mse, p, '--b', linewidth=linewidth) ax.plot(mse_s, p, '--r', linewidth=linewidth)
def add_curves(ax,pressure,temperature,mixing_ratio,linewidth=1.0,LH_Tdepend=False): """ overlaying new curves of multiple soundings from profiles """ p = pressure*units('mbar') T = temperature*units('degC') q = mixing_ratio*units('kilogram/kilogram') qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p) Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint Tp = mpcalc.parcel_profile(p,T[0],Td[0]).to('degC') # parcel profile # Altitude based on the hydrostatic eq. altitude = np.zeros((np.size(T)))*units('meter') # surface is 0 meter for i in range(np.size(T)): altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) # Hypsometric Eq. for height # specific energies if LH_Tdepend == False: mse = mpcalc.moist_static_energy(altitude,T,q) mse_s = mpcalc.moist_static_energy(altitude,T,qs) dse = mpcalc.dry_static_energy(altitude,T) else: # A short course in cloud physics, Roger and Yau (1989) Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion mse = Cp_d*T + g*altitude + Lvt*q mse_s = Cp_d*T + g*altitude + Lvt*qs dse = mpcalc.dry_static_energy(altitude,T) ax.plot(dse, p, '--k', linewidth=linewidth) ax.plot(mse, p, '--b', linewidth=linewidth) ax.plot(mse_s, p, '--r', linewidth=linewidth)
Python
def run_apyori_apriori(df: pd.DataFrame, min_suppport_thr: float) -> List[Transaction]: """ Takes a data frame and a support threshold and returns itemsets which satisfy the threshold. The idea is to basically 1. make a list of strings out of the df 2. and run apriori api on it 3. return the frequent itemsets :param df: dataframe, where each row is a viewed as a transaction :param min_suppport_thr: :return: """ from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori import RelationRecord, apriori from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori_utils import print_relation_record dataset_transactions: List[Transaction] = dataframe_to_list_of_transactions(df) results: List[RelationRecord] = list(apriori(dataset_transactions, min_support=min_suppport_thr)) for relation_record in results: print_relation_record(relation_record) print("=====================================") list_of_frequent_itemsets: List[Transaction] = [] for relation_record in results: # type: RelationRecord itemset: Transaction = [] for pred in relation_record.items: itemset.append(pred) list_of_frequent_itemsets.append(itemset) return list_of_frequent_itemsets
def run_apyori_apriori(df: pd.DataFrame, min_suppport_thr: float) -> List[Transaction]: """ Takes a data frame and a support threshold and returns itemsets which satisfy the threshold. The idea is to basically 1. make a list of strings out of the df 2. and run apriori api on it 3. return the frequent itemsets :param df: dataframe, where each row is a viewed as a transaction :param min_suppport_thr: :return: """ from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori import RelationRecord, apriori from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori_utils import print_relation_record dataset_transactions: List[Transaction] = dataframe_to_list_of_transactions(df) results: List[RelationRecord] = list(apriori(dataset_transactions, min_support=min_suppport_thr)) for relation_record in results: print_relation_record(relation_record) print("=====================================") list_of_frequent_itemsets: List[Transaction] = [] for relation_record in results: # type: RelationRecord itemset: Transaction = [] for pred in relation_record.items: itemset.append(pred) list_of_frequent_itemsets.append(itemset) return list_of_frequent_itemsets
Python
def createCARs(rules) -> List[ClassAssocationRule]: """Function for converting output from fim.arules or fim.apriori to a list of ClassAssociationRules Parameters ---------- rules : output from fim.arules or from generateCARs Returns ------- list of CARs """ CARs: List[ClassAssocationRule] = [] for rule in rules: con_tmp, ant_tmp, support, confidence = rule con = Consequent(*con_tmp.split(":=:")) ant_items = [Item(*i.split(":=:")) for i in ant_tmp] ant = Antecedent(ant_items) CAR = ClassAssocationRule(ant, con, support=support, confidence=confidence) CARs.append(CAR) CARs.sort(reverse=True) return CARs
def createCARs(rules) -> List[ClassAssocationRule]: """Function for converting output from fim.arules or fim.apriori to a list of ClassAssociationRules Parameters ---------- rules : output from fim.arules or from generateCARs Returns ------- list of CARs """ CARs: List[ClassAssocationRule] = [] for rule in rules: con_tmp, ant_tmp, support, confidence = rule con = Consequent(*con_tmp.split(":=:")) ant_items = [Item(*i.split(":=:")) for i in ant_tmp] ant = Antecedent(ant_items) CAR = ClassAssocationRule(ant, con, support=support, confidence=confidence) CARs.append(CAR) CARs.sort(reverse=True) return CARs