repo_id
stringclasses
205 values
file_path
stringlengths
33
141
content
stringlengths
1
307k
__index_level_0__
int64
0
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/__init__.py
# Empty Python module.
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/perception.py
import numpy as np from pydrake.common.value import Value from pydrake.math import RigidTransform from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors = None for id in points_dict: if scene_points is None: scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): """ .. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS """ def __init__(self, id_list, default_rgb=[255., 255., 255.]): """ A system that takes in N point clouds of points Si in frame Ci, and N RigidTransforms from frame Ci to F, to put each point cloud in a common frame F. The system returns one point cloud combining all of the transformed point clouds. Each point cloud must have XYZs. RGBs are optional. If absent, those points will be the provided default color. @param id_list A list containing the string IDs of all of the point clouds. This is often the serial number of the camera they came from, such as "1" for a simulated camera or "805212060373" for a real camera. @param default_rgb A list of length 3 containing the RGB values to use in the absence of PointCloud.rgbs. Values should be between 0 and 255. The default is white. """ LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {} self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( "point_cloud_CiSi_{}".format(id), Value(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( "X_FCi_{}".format(id), Value(RigidTransform.Identity())) self.DeclareAbstractOutputPort("point_cloud_FS", lambda: Value( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points = {} colors = {} for id in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points output.get_mutable_value().mutable_rgbs()[:] = scene_colors
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/sensors_py_rgbd.cc
#include "drake/bindings/pydrake/documentation_pybind.h" #include "drake/bindings/pydrake/systems/sensors_py.h" #include "drake/systems/sensors/camera_info.h" #include "drake/systems/sensors/rgbd_sensor.h" #include "drake/systems/sensors/rgbd_sensor_async.h" #include "drake/systems/sensors/rgbd_sensor_discrete.h" namespace drake { namespace pydrake { namespace internal { using geometry::render::ColorRenderCamera; using geometry::render::DepthRenderCamera; using systems::Diagram; using systems::LeafSystem; void DefineSensorsRgbd(py::module m) { // NOLINTNEXTLINE(build/namespaces): Emulate placement in namespace. using namespace drake::systems::sensors; constexpr auto& doc = pydrake_doc.drake.systems.sensors; auto def_camera_ports = [](auto* ppy_class, auto cls_doc) { auto& py_class = *ppy_class; using PyClass = std::decay_t<decltype(py_class)>; using Class = typename PyClass::type; py_class .def("query_object_input_port", &Class::query_object_input_port, py_rvp::reference_internal, cls_doc.query_object_input_port.doc) .def("color_image_output_port", &Class::color_image_output_port, py_rvp::reference_internal, cls_doc.color_image_output_port.doc) .def("depth_image_32F_output_port", &Class::depth_image_32F_output_port, py_rvp::reference_internal, cls_doc.depth_image_32F_output_port.doc) .def("depth_image_16U_output_port", &Class::depth_image_16U_output_port, py_rvp::reference_internal, cls_doc.depth_image_16U_output_port.doc) .def("label_image_output_port", &Class::label_image_output_port, py_rvp::reference_internal, cls_doc.label_image_output_port.doc) .def("body_pose_in_world_output_port", &Class::body_pose_in_world_output_port, py_rvp::reference_internal, cls_doc.body_pose_in_world_output_port.doc) .def("image_time_output_port", &Class::image_time_output_port, py_rvp::reference_internal, cls_doc.image_time_output_port.doc); }; { using Class = CameraInfo; constexpr auto& cls_doc = doc.CameraInfo; py::class_<Class> cls(m, "CameraInfo", cls_doc.doc); cls // BR .def(py::init<int, int, double>(), py::arg("width"), py::arg("height"), py::arg("fov_y"), cls_doc.ctor.doc_3args_width_height_fov_y) .def(py::init<int, int, const Eigen::Matrix3d&>(), py::arg("width"), py::arg("height"), py::arg("intrinsic_matrix"), cls_doc.ctor.doc_3args_width_height_intrinsic_matrix) .def(py::init<int, int, double, double, double, double>(), py::arg("width"), py::arg("height"), py::arg("focal_x"), py::arg("focal_y"), py::arg("center_x"), py::arg("center_y"), cls_doc.ctor .doc_6args_width_height_focal_x_focal_y_center_x_center_y) .def("width", &Class::width, cls_doc.width.doc) .def("height", &Class::height, cls_doc.height.doc) .def("focal_x", &Class::focal_x, cls_doc.focal_x.doc) .def("focal_y", &Class::focal_y, cls_doc.focal_y.doc) .def("fov_x", &Class::fov_x, cls_doc.fov_x.doc) .def("fov_y", &Class::fov_y, cls_doc.fov_y.doc) .def("center_x", &Class::center_x, cls_doc.center_x.doc) .def("center_y", &Class::center_y, cls_doc.center_y.doc) .def("intrinsic_matrix", &Class::intrinsic_matrix, cls_doc.intrinsic_matrix.doc) .def(py::pickle( [](const Class& self) { return py::make_tuple(self.width(), self.height(), self.focal_x(), self.focal_y(), self.center_x(), self.center_y()); }, [](py::tuple t) { DRAKE_DEMAND(t.size() == 6); return Class(t[0].cast<int>(), t[1].cast<int>(), t[2].cast<double>(), t[3].cast<double>(), t[4].cast<double>(), t[5].cast<double>()); })); } py::class_<RgbdSensor, LeafSystem<double>> rgbd_sensor( m, "RgbdSensor", doc.RgbdSensor.doc); rgbd_sensor .def(py::init<geometry::FrameId, const math::RigidTransformd&, ColorRenderCamera, DepthRenderCamera>(), py::arg("parent_id"), py::arg("X_PB"), py::arg("color_camera"), py::arg("depth_camera"), doc.RgbdSensor.ctor.doc_individual_intrinsics) .def(py::init<geometry::FrameId, const math::RigidTransformd&, const DepthRenderCamera&, bool>(), py::arg("parent_id"), py::arg("X_PB"), py::arg("depth_camera"), py::arg("show_window") = false, doc.RgbdSensor.ctor.doc_combined_intrinsics) .def("color_camera_info", &RgbdSensor::color_camera_info, py_rvp::reference_internal, doc.RgbdSensor.color_camera_info.doc) .def("depth_camera_info", &RgbdSensor::depth_camera_info, py_rvp::reference_internal, doc.RgbdSensor.depth_camera_info.doc) .def("X_BC", &RgbdSensor::X_BC, doc.RgbdSensor.X_BC.doc) .def("X_BD", &RgbdSensor::X_BD, doc.RgbdSensor.X_BD.doc) .def("parent_frame_id", &RgbdSensor::parent_frame_id, py_rvp::reference_internal, doc.RgbdSensor.parent_frame_id.doc); def_camera_ports(&rgbd_sensor, doc.RgbdSensor); py::class_<RgbdSensorDiscrete, Diagram<double>> rgbd_camera_discrete( m, "RgbdSensorDiscrete", doc.RgbdSensorDiscrete.doc); rgbd_camera_discrete .def(py::init<std::unique_ptr<RgbdSensor>, double, bool>(), py::arg("sensor"), py::arg("period") = double{RgbdSensorDiscrete::kDefaultPeriod}, py::arg("render_label_image") = true, // Keep alive, ownership: `sensor` keeps `self` alive. py::keep_alive<2, 1>(), doc.RgbdSensorDiscrete.ctor.doc) // N.B. Since `camera` is already connected, we do not need additional // `keep_alive`s. .def("sensor", &RgbdSensorDiscrete::sensor, py_rvp::reference_internal, doc.RgbdSensorDiscrete.sensor.doc) .def("period", &RgbdSensorDiscrete::period, doc.RgbdSensorDiscrete.period.doc); def_camera_ports(&rgbd_camera_discrete, doc.RgbdSensorDiscrete); rgbd_camera_discrete.attr("kDefaultPeriod") = double{RgbdSensorDiscrete::kDefaultPeriod}; { using Class = RgbdSensorAsync; constexpr auto& cls_doc = doc.RgbdSensorAsync; py::class_<Class, LeafSystem<double>>(m, "RgbdSensorAsync", cls_doc.doc) .def(py::init<const geometry::SceneGraph<double>*, geometry::FrameId, const math::RigidTransformd&, double, double, double, std::optional<ColorRenderCamera>, std::optional<DepthRenderCamera>, bool>(), py::arg("scene_graph"), py::arg("parent_id"), py::arg("X_PB"), py::arg("fps"), py::arg("capture_offset"), py::arg("output_delay"), py::arg("color_camera"), py::arg("depth_camera") = std::nullopt, py::arg("render_label_image") = false, cls_doc.ctor.doc) .def("parent_id", &Class::parent_id, cls_doc.parent_id.doc) .def("X_PB", &Class::X_PB, cls_doc.X_PB.doc) .def("fps", &Class::fps, cls_doc.fps.doc) .def("capture_offset", &Class::capture_offset, cls_doc.capture_offset.doc) .def("output_delay", &Class::output_delay, cls_doc.output_delay.doc) .def("color_camera", &Class::color_camera, cls_doc.color_camera.doc) .def("depth_camera", &Class::depth_camera, cls_doc.depth_camera.doc) .def("color_image_output_port", &Class::color_image_output_port, py_rvp::reference_internal, cls_doc.color_image_output_port.doc) .def("depth_image_32F_output_port", &Class::depth_image_32F_output_port, py_rvp::reference_internal, cls_doc.depth_image_32F_output_port.doc) .def("depth_image_16U_output_port", &Class::depth_image_16U_output_port, py_rvp::reference_internal, cls_doc.depth_image_16U_output_port.doc) .def("label_image_output_port", &Class::label_image_output_port, py_rvp::reference_internal, cls_doc.label_image_output_port.doc) .def("body_pose_in_world_output_port", &Class::body_pose_in_world_output_port, py_rvp::reference_internal, cls_doc.body_pose_in_world_output_port.doc) .def("image_time_output_port", &Class::image_time_output_port, py_rvp::reference_internal, cls_doc.image_time_output_port.doc); } } } // namespace internal } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/sensors_py_camera_config.cc
#include "drake/bindings/pydrake/common/serialize_pybind.h" #include "drake/bindings/pydrake/documentation_pybind.h" #include "drake/bindings/pydrake/systems/sensors_py.h" #include "drake/systems/sensors/camera_config_functions.h" namespace drake { namespace pydrake { namespace internal { using systems::DiagramBuilder; void DefineSensorsCameraConfig(py::module m) { // NOLINTNEXTLINE(build/namespaces): Emulate placement in namespace. using namespace drake::systems::sensors; constexpr auto& doc = pydrake_doc.drake.systems.sensors; { // To bind nested serializable structs without errors, we declare the outer // struct first, then bind its inner structs, then bind the outer struct. constexpr auto& config_cls_doc = doc.CameraConfig; py::class_<CameraConfig> config_cls(m, "CameraConfig", config_cls_doc.doc); // Inner struct. constexpr auto& fov_degrees_doc = doc.CameraConfig.FovDegrees; py::class_<CameraConfig::FovDegrees> fov_class( config_cls, "FovDegrees", fov_degrees_doc.doc); fov_class // BR .def(ParamInit<CameraConfig::FovDegrees>()); DefAttributesUsingSerialize(&fov_class, fov_degrees_doc); DefReprUsingSerialize(&fov_class); DefCopyAndDeepCopy(&fov_class); // Inner struct. constexpr auto& focal_doc = doc.CameraConfig.FocalLength; py::class_<CameraConfig::FocalLength> focal_class( config_cls, "FocalLength", focal_doc.doc); focal_class // BR .def(ParamInit<CameraConfig::FocalLength>()); DefAttributesUsingSerialize(&focal_class, focal_doc); DefReprUsingSerialize(&focal_class); DefCopyAndDeepCopy(&focal_class); // Now we can bind the outer struct (see above). config_cls // BR .def(ParamInit<CameraConfig>()) .def("focal_x", &CameraConfig::focal_x, config_cls_doc.focal_x.doc) .def("focal_y", &CameraConfig::focal_y, config_cls_doc.focal_y.doc) .def("principal_point", &CameraConfig::principal_point, config_cls_doc.principal_point.doc) .def("MakeCameras", &CameraConfig::MakeCameras, config_cls_doc.MakeCameras.doc); DefAttributesUsingSerialize(&config_cls, config_cls_doc); DefReprUsingSerialize(&config_cls); DefCopyAndDeepCopy(&config_cls); } m.def("ApplyCameraConfig", py::overload_cast<const CameraConfig&, DiagramBuilder<double>*, const systems::lcm::LcmBuses*, const multibody::MultibodyPlant<double>*, geometry::SceneGraph<double>*, drake::lcm::DrakeLcmInterface*>( &ApplyCameraConfig), py::arg("config"), py::arg("builder"), py::arg("lcm_buses") = nullptr, py::arg("plant") = nullptr, py::arg("scene_graph") = nullptr, py::arg("lcm") = nullptr, // Keep alive, reference: `builder` keeps `lcm` alive. py::keep_alive<2, 6>(), doc.ApplyCameraConfig.doc); } } // namespace internal } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/lcm_py_bind_cpp_serializers.cc
#include "drake/bindings/pydrake/systems/lcm_py_bind_cpp_serializers.h" #include "drake/bindings/pydrake/systems/lcm_pybind.h" #include "drake/experimental_lcmt_deformable_tri.hpp" #include "drake/experimental_lcmt_deformable_tri_mesh_init.hpp" #include "drake/experimental_lcmt_deformable_tri_mesh_update.hpp" #include "drake/experimental_lcmt_deformable_tri_meshes_init.hpp" #include "drake/experimental_lcmt_deformable_tri_meshes_update.hpp" #include "drake/lcmt_acrobot_u.hpp" #include "drake/lcmt_acrobot_x.hpp" #include "drake/lcmt_acrobot_y.hpp" #include "drake/lcmt_allegro_command.hpp" #include "drake/lcmt_allegro_status.hpp" #include "drake/lcmt_call_python.hpp" #include "drake/lcmt_call_python_data.hpp" #include "drake/lcmt_contact_results_for_viz.hpp" #include "drake/lcmt_drake_signal.hpp" #include "drake/lcmt_force_torque.hpp" #include "drake/lcmt_header.hpp" #include "drake/lcmt_hydroelastic_contact_surface_for_viz.hpp" #include "drake/lcmt_hydroelastic_quadrature_per_point_data_for_viz.hpp" #include "drake/lcmt_iiwa_command.hpp" #include "drake/lcmt_iiwa_status.hpp" #include "drake/lcmt_iiwa_status_telemetry.hpp" #include "drake/lcmt_image.hpp" #include "drake/lcmt_image_array.hpp" #include "drake/lcmt_jaco_command.hpp" #include "drake/lcmt_jaco_status.hpp" #include "drake/lcmt_panda_command.hpp" #include "drake/lcmt_panda_status.hpp" #include "drake/lcmt_planar_gripper_command.hpp" #include "drake/lcmt_planar_gripper_finger_command.hpp" #include "drake/lcmt_planar_gripper_finger_face_assignment.hpp" #include "drake/lcmt_planar_gripper_finger_face_assignments.hpp" #include "drake/lcmt_planar_gripper_finger_status.hpp" #include "drake/lcmt_planar_gripper_status.hpp" #include "drake/lcmt_planar_manipuland_status.hpp" #include "drake/lcmt_planar_plant_state.hpp" #include "drake/lcmt_point.hpp" #include "drake/lcmt_point_cloud.hpp" #include "drake/lcmt_point_cloud_field.hpp" #include "drake/lcmt_point_pair_contact_info_for_viz.hpp" #include "drake/lcmt_quaternion.hpp" #include "drake/lcmt_robot_plan.hpp" #include "drake/lcmt_robot_state.hpp" #include "drake/lcmt_schunk_wsg_command.hpp" #include "drake/lcmt_schunk_wsg_status.hpp" #include "drake/lcmt_scope.hpp" #include "drake/lcmt_viewer_command.hpp" #include "drake/lcmt_viewer_draw.hpp" #include "drake/lcmt_viewer_geometry_data.hpp" #include "drake/lcmt_viewer_link_data.hpp" #include "drake/lcmt_viewer_load_robot.hpp" namespace drake { namespace pydrake { namespace pysystems { namespace pylcm { void BindCppSerializers() { // N.B. These should be placed in the same order as the headers. BindCppSerializer<drake::experimental_lcmt_deformable_tri>("drake"); BindCppSerializer<drake::experimental_lcmt_deformable_tri_mesh_init>("drake"); BindCppSerializer<drake::experimental_lcmt_deformable_tri_mesh_update>( "drake"); BindCppSerializer<drake::experimental_lcmt_deformable_tri_meshes_init>( "drake"); BindCppSerializer<drake::experimental_lcmt_deformable_tri_meshes_update>( "drake"); BindCppSerializer<drake::lcmt_acrobot_u>("drake"); BindCppSerializer<drake::lcmt_acrobot_x>("drake"); BindCppSerializer<drake::lcmt_acrobot_y>("drake"); BindCppSerializer<drake::lcmt_allegro_command>("drake"); BindCppSerializer<drake::lcmt_allegro_status>("drake"); BindCppSerializer<drake::lcmt_call_python>("drake"); BindCppSerializer<drake::lcmt_call_python_data>("drake"); BindCppSerializer<drake::lcmt_contact_results_for_viz>("drake"); BindCppSerializer<drake::lcmt_drake_signal>("drake"); BindCppSerializer<drake::lcmt_force_torque>("drake"); BindCppSerializer<drake::lcmt_header>("drake"); BindCppSerializer<drake::lcmt_hydroelastic_contact_surface_for_viz>("drake"); BindCppSerializer<drake::lcmt_hydroelastic_quadrature_per_point_data_for_viz>( "drake"); BindCppSerializer<drake::lcmt_iiwa_command>("drake"); BindCppSerializer<drake::lcmt_iiwa_status>("drake"); BindCppSerializer<drake::lcmt_iiwa_status_telemetry>("drake"); BindCppSerializer<drake::lcmt_image>("drake"); BindCppSerializer<drake::lcmt_image_array>("drake"); BindCppSerializer<drake::lcmt_jaco_command>("drake"); BindCppSerializer<drake::lcmt_jaco_status>("drake"); BindCppSerializer<drake::lcmt_panda_command>("drake"); BindCppSerializer<drake::lcmt_panda_status>("drake"); BindCppSerializer<drake::lcmt_planar_gripper_command>("drake"); BindCppSerializer<drake::lcmt_planar_gripper_finger_command>("drake"); BindCppSerializer<drake::lcmt_planar_gripper_finger_face_assignment>("drake"); BindCppSerializer<drake::lcmt_planar_gripper_finger_face_assignments>( "drake"); BindCppSerializer<drake::lcmt_planar_gripper_finger_status>("drake"); BindCppSerializer<drake::lcmt_planar_gripper_status>("drake"); BindCppSerializer<drake::lcmt_planar_manipuland_status>("drake"); BindCppSerializer<drake::lcmt_planar_plant_state>("drake"); BindCppSerializer<drake::lcmt_point>("drake"); BindCppSerializer<drake::lcmt_point_cloud>("drake"); BindCppSerializer<drake::lcmt_point_cloud_field>("drake"); BindCppSerializer<drake::lcmt_point_pair_contact_info_for_viz>("drake"); BindCppSerializer<drake::lcmt_quaternion>("drake"); BindCppSerializer<drake::lcmt_robot_plan>("drake"); BindCppSerializer<drake::lcmt_robot_state>("drake"); BindCppSerializer<drake::lcmt_schunk_wsg_command>("drake"); BindCppSerializer<drake::lcmt_schunk_wsg_status>("drake"); BindCppSerializer<drake::lcmt_scope>("drake"); BindCppSerializer<drake::lcmt_viewer_command>("drake"); BindCppSerializer<drake::lcmt_viewer_draw>("drake"); BindCppSerializer<drake::lcmt_viewer_geometry_data>("drake"); BindCppSerializer<drake::lcmt_viewer_link_data>("drake"); BindCppSerializer<drake::lcmt_viewer_load_robot>("drake"); } } // namespace pylcm } // namespace pysystems } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/pyplot_visualizer.py
import matplotlib import numpy as np from warnings import warn from pydrake.systems.framework import LeafSystem, PublishEvent, TriggerType from pydrake.systems.primitives import VectorLog from pydrake.trajectories import Trajectory from pydrake.systems._resample_interp1d import _resample_interp1d class PyPlotVisualizer(LeafSystem): """ Base class from planar visualization that relies on pyplot. In the configuration set up here, this visualizer provides one visualization window (self.fig) with axes (self.ax). The axes can optionally be supplied externally to allow other visualizers to overlay additional information. Subclasses must: - During initialization, set up the figure bounds and register input port with the appropriate message type. - Override the draw method to parse the input and draw the robot in the appropriate state. """ def __init__(self, draw_period=None, facecolor=[1, 1, 1], figsize=None, ax=None, show=None): LeafSystem.__init__(self) # On Ubuntu the Debian package python3-tk is a recommended (but not # required) dependency of python3-matplotlib; help users understand # that by providing a nicer message upon a failure to import. try: import matplotlib.pyplot as plt self._plt = plt except ImportError as e: if e.name == 'tkinter': self._plt = None else: raise if self._plt is None: raise NotImplementedError( "On Ubuntu when using the default pyplot configuration (i.e.," " the TkAgg backend) you must 'sudo apt install python3-tk' to" " obtain Tk support. Alternatively, you may set MPLBACKEND to" " something else (e.g., Qt5Agg).") # To help avoid small simulation time steps, we use a default period # that has an exact representation in binary floating point; see # drake#15021 for details. default_draw_period = 1./32 self.set_name('pyplot_visualization') self.time_step = draw_period or default_draw_period self.DeclareForcedPublishEvent(self._on_any_publish) self.DeclarePeriodicPublishEvent( self.time_step, 0.0, self._on_any_publish) if ax is None: self.fig = self._plt.figure(facecolor=facecolor, figsize=figsize) self.ax = self._plt.axes() self.fig.add_axes(self.ax) else: self.ax = ax self.fig = ax.get_figure() if show is None: show = (matplotlib.get_backend().lower() != 'template') self._show = show self.ax.axis('equal') self.ax.axis('off') if not show: # This is the preferred way to support the jupyter notebook # animation workflow and the `inline` backend grabbing an # extraneous render of the figure. self._plt.close(self.fig) self._is_recording = False self._recorded_contexts = [] def on_initialize(context): if self._show: self.fig.show() self.DeclareInitializationPublishEvent(on_initialize) self.DeclareInitializationPublishEvent(self._on_any_publish) def _on_any_publish(self, context): if self._show: self.draw(context) self.fig.canvas.draw() self._plt.pause(1e-10) if self._is_recording: snapshot = self.AllocateContext() snapshot.SetTimeStateAndParametersFrom(context) self.FixInputPortsFrom(self, context, snapshot) self._recorded_contexts.append(snapshot) def draw(self, context): """Draws a single frame. `context` can either be a Context object, or a raw vector (for ease of interpolation). """ raise NotImplementedError def start_recording(self): self._is_recording = True def stop_recording(self): self._is_recording = False def reset_recording(self): self._recorded_contexts = [] # Reset recorded data. def _draw_recorded_frame(self, i): return self.draw(self._recorded_contexts[i]) def get_recording_as_animation(self, **kwargs): # We defer this import to this call site to prevent the import # from hanging. See #18323. import matplotlib.animation as animation ani = animation.FuncAnimation(fig=self.fig, func=self._draw_recorded_frame, frames=len(self._recorded_contexts), interval=1000*self.time_step, **kwargs) return ani def animate(self, log, resample=True, **kwargs): """ Args: log: A reference to a pydrake.systems.primitives.VectorLog, or a pydrake.trajectories.Trajectory that contains the plant state after running a simulation. resample: Whether we should do a resampling operation to make the samples more consistent in time. This can be disabled if you know the draw_period passed into the constructor exactly matches the sample time step of the log. Additional kwargs are passed through to FuncAnimation. """ if isinstance(log, VectorLog): t = log.sample_times() x = log.data() if resample: t, x = _resample_interp1d(t, x, self.time_step) elif isinstance(log, Trajectory): t = np.arange(log.start_time(), log.end_time(), self.time_step) x = np.hstack([log.value(time) for time in t]) def animate_update(i): self.draw(x[:, i]) # We defer this import to this call site to prevent the import # from hanging. See #18323. import matplotlib.animation as animation ani = animation.FuncAnimation(fig=self.fig, func=animate_update, frames=t.shape[0], # Convert from s to ms. interval=1000*self.time_step, **kwargs) return ani
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/framework_py_systems.h
#pragma once /// @file /// Provides the Python bindings for the core system classes of the systems /// framework. #include "drake/bindings/pydrake/pydrake_pybind.h" namespace drake { namespace pydrake { void DefineFrameworkPySystems(py::module m); } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/_lcm_extra.py
# See `ExecuteExtraPythonCode` in `pydrake_pybind.h` for usage details and # rationale. from pydrake.common.value import AbstractValue as _AbstractValue class PySerializer(SerializerInterface): """Provides a Python implementation of `SerializerInterface` for use with `LcmPublisherSystem` and `LcmSubscriberSystem` when the given `lcm_type` is a Python object (not a C++ object). """ def __init__(self, lcm_type): SerializerInterface.__init__(self) self._lcm_type = lcm_type def __repr__(self): return f"PySerializer({self._lcm_type.__name__})" def CreateDefaultValue(self): return _AbstractValue.Make(self._lcm_type()) def Deserialize(self, buffer, abstract_value): message = self._lcm_type.decode(buffer) abstract_value.set_value(message) def Serialize(self, abstract_value): assert isinstance(abstract_value, _AbstractValue) message = abstract_value.get_value() assert isinstance(message, self._lcm_type) return message.encode() @staticmethod def _make_lcm_subscriber(channel, lcm_type, lcm, use_cpp_serializer=False, *, wait_for_message_on_initialization_timeout=0.0): """Convenience to create an LCM subscriber system with a concrete type. Args: channel: LCM channel name. lcm_type: Python class generated by lcmgen. lcm: LCM service instance. use_cpp_serializer: Use C++ serializer to interface with LCM converter systems that are implemented in C++. LCM types must be registered in C++ via ``BindCppSerializer``. wait_for_message_on_initialization_timeout: Configures the behavior of initialization events (see ``System.ExecuteInitializationEvents`` and ``Simulator.Initialize``) by specifying the number of seconds (wall-clock elapsed time) to wait for a new message. If this timeout is <= 0, initialization will copy any already-received messages into the Context but will not process any new messages. If this timeout is > 0, initialization will call ``lcm.HandleSubscriptions()`` until at least one message is received or until the timeout. Pass ∞ to wait indefinitely. """ # TODO(eric.cousineau): Make `use_cpp_serializer` be kwarg-only. # N.B. This documentation is actually public, as it is assigned to classes # below as a static class method. if not use_cpp_serializer: serializer = PySerializer(lcm_type) else: serializer = _Serializer_[lcm_type]() return LcmSubscriberSystem(channel, serializer, lcm, wait_for_message_on_initialization_timeout) @staticmethod def _make_lcm_publisher( channel, lcm_type, lcm, publish_period=0.0, use_cpp_serializer=False, *, publish_triggers=None, publish_offset=0.0): """Convenience to create an LCM publisher system with a concrete type. Args: channel: LCM channel name. lcm_type: Python class generated by lcmgen. lcm: LCM service instance. publish_period: System's publish period (in seconds). Default is 0. publish_offset: System's publish offset (in seconds). Default is 0. use_cpp_serializer: Use C++ serializer to interface with LCM converter systems that are implemented in C++. LCM types must be registered in C++ via `BindCppSerializer`. """ # TODO(eric.cousineau): Make `use_cpp_serializer` be kwarg-only. if not use_cpp_serializer: serializer = PySerializer(lcm_type) else: serializer = _Serializer_[lcm_type]() if publish_triggers is not None: return LcmPublisherSystem( channel=channel, serializer=serializer, lcm=lcm, publish_triggers=publish_triggers, publish_period=publish_period, publish_offset=publish_offset) else: return LcmPublisherSystem( channel=channel, serializer=serializer, lcm=lcm, publish_period=publish_period, publish_offset=publish_offset) LcmSubscriberSystem.Make = _make_lcm_subscriber LcmPublisherSystem.Make = _make_lcm_publisher
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/systems/jupyter_widgets_examples.ipynb
running_as_notebook = False # Manually set this to True if you are a human (see #13862).from IPython.display import display from ipywidgets import ToggleButton import numpy as np from pydrake.common.value import Value from pydrake.geometry import DrakeVisualizer, FramePoseVector, SceneGraph from pydrake.math import RigidTransform from pydrake.multibody.plant import MultibodyPlant from pydrake.multibody.parsing import Parser from pydrake.multibody.tree import BodyIndex from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder, LeafSystem from pydrake.systems.jupyter_widgets import PoseSliders class PoseToFramePoseVector(LeafSystem): """ Wraps a single pose input into a FramePoseVector. """ def __init__(self, frame_id): LeafSystem.__init__(self) self.frame_id = frame_id self.DeclareAbstractInputPort( "pose", Value(RigidTransform.Identity())) self.DeclareAbstractOutputPort( "vector", lambda: Value(FramePoseVector()), self.CalcOutput) def CalcOutput(self, context, output): pose = self.EvalAbstractInput(context, 0).get_value() output.get_mutable_value().set_value(id=frame_id, value=pose) builder = DiagramBuilder() # Note: Don't use AddMultibodyPlantSceneGraph because we are only using # MultibodyPlant for parsing, then wiring our sliders directly to SceneGraph. scene_graph = builder.AddSystem(SceneGraph()) plant = MultibodyPlant(time_step=0.0) plant.RegisterAsSourceForSceneGraph(scene_graph) Parser(plant, scene_graph).AddModelsFromUrl( url="package://drake_models/ycb/006_mustard_bottle.sdf") plant.Finalize() frame_id = plant.GetBodyFrameIdOrThrow(BodyIndex(1)) # Add pose sliders. sliders = builder.AddSystem(PoseSliders( visible=PoseSliders.Visible(), min_range=PoseSliders.MinRange(), max_range=PoseSliders.MaxRange(), value=PoseSliders.Value())) to_vector = builder.AddSystem(PoseToFramePoseVector(frame_id)) builder.Connect(sliders.get_output_port(0), to_vector.get_input_port(0)) builder.Connect( to_vector.get_output_port(0), scene_graph.get_source_pose_port(plant.get_source_id())) # Note: We can't use AddDefaultVisualization because there is no plant # in the Diagram. DrakeVisualizer.AddToBuilder(builder, scene_graph) diagram = builder.Build() simulator = Simulator(diagram) if running_as_notebook: simulator.set_target_realtime_rate(1.0) stop_button = ToggleButton(value=False, description='Stop Simulation') display(stop_button) while not stop_button.value: simulator.AdvanceTo(simulator.get_context().get_time() + 5.0) stop_button.value = False else: # running as a test. simulator.AdvanceTo(0.1) from ipywidgets import Checkbox, FloatSlider, FloatText, ToggleButtons from pydrake.systems.jupyter_widgets import WidgetSystem checkbox = Checkbox(value=False, description='Check box') slider = FloatSlider(value=3.27, min=0, max=10.0, step=0.1, description='Slider', continuous_update=True) text = FloatText(value=7.5, description='Text') wsg = ToggleButtons(value=0.107, description="SchunkWsg", options=[('Open', 0.107), ('Close', 0.002)]) sys = WidgetSystem([checkbox], [slider], [text], [wsg], [checkbox, slider, text, wsg]) context = sys.CreateDefaultContext()for i in range(sys.num_output_ports()): print(sys.get_output_port(i).Eval(context))
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/test_util_py.cc
#include "drake/bindings/pydrake/common/value_pybind.h" #include "drake/bindings/pydrake/pydrake_pybind.h" #include "drake/systems/analysis/simulator.h" #include "drake/systems/framework/basic_vector.h" #include "drake/systems/framework/leaf_system.h" #include "drake/systems/framework/vector_system.h" using std::unique_ptr; namespace drake { using systems::BasicVector; using systems::LeafSystem; using systems::Simulator; namespace pydrake { namespace { using T = double; // Informs listener when this class is deleted. class DeleteListenerSystem : public LeafSystem<T> { public: explicit DeleteListenerSystem(std::function<void()> delete_callback) : LeafSystem<T>(), delete_callback_(delete_callback) {} ~DeleteListenerSystem() override { delete_callback_(); } private: std::function<void()> delete_callback_; }; class DeleteListenerVector : public BasicVector<T> { public: explicit DeleteListenerVector(std::function<void()> delete_callback) : BasicVector(VectorX<T>::Constant(1, 0.)), delete_callback_(delete_callback) {} ~DeleteListenerVector() override { delete_callback_(); } private: std::function<void()> delete_callback_; }; // A simple 2-dimensional subclass of BasicVector for testing. template <typename T> class MyVector2 : public BasicVector<T> { public: DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(MyVector2) explicit MyVector2(const Vector2<T>& data) : BasicVector<T>(data) {} private: MyVector2* DoClone() const override { return new MyVector2(this->get_value()); } }; } // namespace PYBIND11_MODULE(test_util, m) { // NOLINTNEXTLINE(build/namespaces): Emulate placement in namespace. using namespace systems; // Import dependencies. py::module::import("pydrake.systems.framework"); py::module::import("pydrake.systems.primitives"); py::class_<DeleteListenerSystem, LeafSystem<T>>(m, "DeleteListenerSystem") .def(py::init<std::function<void()>>()); py::class_<DeleteListenerVector, BasicVector<T>>(m, "DeleteListenerVector") .def(py::init<std::function<void()>>()); // A 2-dimensional subclass of BasicVector. py::class_<MyVector2<T>, BasicVector<T>>(m, "MyVector2") .def(py::init<const Eigen::Vector2d&>(), py::arg("data")); // Call overrides to ensure a custom Python class can override these methods. m.def("call_leaf_system_overrides", [](const LeafSystem<T>& system) { py::dict results; auto context = system.AllocateContext(); { // Leverage simulator to call initialization events. // TODO(eric.cousineau): Simplify as part of #10015. Simulator<T> simulator(system); // Do not publish at initialization because we want to track publishes // from only events of trigger type `kInitialization`. simulator.set_publish_at_initialization(false); simulator.Initialize(); } { // Call `Publish` to test `DoPublish`. auto events = LeafEventCollection<PublishEvent<T>>::MakeForcedEventCollection(); const EventStatus status = system.Publish(*context, *events); DRAKE_DEMAND(status.did_nothing()); } { // Call `HasDirectFeedthrough` to test `DoHasDirectFeedthrough`. results["has_direct_feedthrough"] = system.HasDirectFeedthrough(0, 0); } { // Call `CalcTimeDerivatives` to test `DoCalcTimeDerivatives` auto state_dot = system.AllocateTimeDerivatives(); system.CalcTimeDerivatives(*context, state_dot.get()); } { // Call `CalcForcedDiscreteVariableUpdate` to test // `DoCalcDiscreteVariableUpdates`. auto& state = context->get_mutable_discrete_state(); auto state_copy = state.Clone(); system.CalcForcedDiscreteVariableUpdate(*context, state_copy.get()); // From t=0, return next update time for testing discrete time. // If there is an abstract / unrestricted update, this assumes that // `dt_discrete < dt_abstract`. auto events = system.AllocateCompositeEventCollection(); results["discrete_next_t"] = system.CalcNextUpdateTime(*context, events.get()); } return results; }); m.def("call_vector_system_overrides", [](const VectorSystem<T>& system, Context<T>* context, bool is_discrete, double dt) { // While this is not convention, update state first to ensure that our // output incorporates it correctly, for testing purposes. // TODO(eric.cousineau): Add (Continuous|Discrete)State::Clone(). if (is_discrete) { auto& state = context->get_mutable_discrete_state(); auto state_copy = state.Clone(); system.CalcForcedDiscreteVariableUpdate(*context, state_copy.get()); state.SetFrom(*state_copy); } else { auto& state = context->get_mutable_continuous_state(); auto state_dot = system.AllocateTimeDerivatives(); system.CalcTimeDerivatives(*context, state_dot.get()); state.SetFromVector( state.CopyToVector() + dt * state_dot->CopyToVector()); } // Calculate output. auto output = system.AllocateOutput(); system.CalcOutput(*context, output.get()); return output; }); } } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/scalar_conversion_test.py
import pydrake.systems.scalar_conversion as mut import copy import unittest from pydrake.autodiffutils import AutoDiffXd from pydrake.symbolic import Expression from pydrake.systems.framework import ( DiagramBuilder, LeafSystem_, SystemScalarConverter, ) from pydrake.common.cpp_template import TemplateClass @mut.TemplateSystem.define("Example_") def Example_(T): class Impl(LeafSystem_[T]): """Testing example.""" def _construct(self, value, converter=None): LeafSystem_[T].__init__(self, converter=converter) self.value = value self.copied_from = None def _construct_copy(self, other, converter=None): Impl._construct(self, other.value, converter=converter) self.copied_from = other return Impl Example = Example_[None] class TestScalarConversion(unittest.TestCase): def test_converter_attributes(self): conversion_scalars = ( float, AutoDiffXd, Expression, ) self.assertTupleEqual( SystemScalarConverter.SupportedScalars, conversion_scalars) conversion_pairs = ( (AutoDiffXd, float), (Expression, float), (float, AutoDiffXd), (Expression, AutoDiffXd), (float, Expression), (AutoDiffXd, Expression), ) self.assertTupleEqual( SystemScalarConverter.SupportedConversionPairs, conversion_pairs) def test_example_system(self): """Tests the Example_ system.""" # Test template. self.assertIsInstance(Example_, TemplateClass) self.assertEqual( str(Example_), f"<TemplateSystem {__name__}.Example_>") self.assertIs(Example_[float], Example) # Test parameters. param_list = [(T,) for T in SystemScalarConverter.SupportedScalars] self.assertListEqual(Example_.param_list, param_list) for T in SystemScalarConverter.SupportedScalars: system_T = Example_[T](0) self.assertEqual( system_T.GetSystemType(), f"{__name__}.Example_[{T.__name__}]") # Test private properties (do NOT use these in your code!). self.assertTupleEqual( tuple(Example_._T_list), SystemScalarConverter.SupportedScalars) self.assertTupleEqual( tuple(Example_._T_pairs), SystemScalarConverter.SupportedConversionPairs) converter = Example_._converter for T, U in SystemScalarConverter.SupportedConversionPairs: self.assertTrue(converter.IsConvertible[T, U]()) # Test calls that we have available for scalar conversion. for T, U in SystemScalarConverter.SupportedConversionPairs: system_U = Example_[U](100) self.assertIs(system_U.copied_from, None) system_T = system_U.ToScalarType[T]() self.assertIsInstance(system_T, Example_[T]) self.assertEqual(system_T.value, 100) self.assertIs(system_T.copied_from, system_U) if T == AutoDiffXd: system_ad = system_U.ToAutoDiffXd() self.assertIsInstance(system_ad, Example_[T]) self.assertEqual(system_ad.value, 100) self.assertIs(system_ad.copied_from, system_U) if T == Expression: system_sym = system_U.ToSymbolic() self.assertIsInstance(system_sym, Example_[T]) self.assertEqual(system_sym.value, 100) self.assertIs(system_sym.copied_from, system_U) def test_example_system_in_diagram(self): system_f = Example(value=10) system_f.set_name("example") builder_f = DiagramBuilder() builder_f.AddSystem(system_f) diagram_f = builder_f.Build() diagram_ad = diagram_f.ToAutoDiffXd() system_ad = diagram_ad.GetSubsystemByName(system_f.get_name()) self.assertIsInstance(system_ad, Example_[AutoDiffXd]) self.assertIs(system_ad.copied_from, system_f) def test_define_convertible_system_api(self): """Tests more advanced API of `TemplateSystem.define`, both positive and negative tests.""" def generic_instantiation_func(T): class GenericInstantiation(LeafSystem_[T]): def _construct(self, converter=None): LeafSystem_[T].__init__(self, converter) def _construct_copy(self, other, converter=None): LeafSystem_[T].__init__(self, converter) return GenericInstantiation # Non-symbolic # - Implicit conversion pairs. T_list = [float, AutoDiffXd] T_pairs_full = [ (AutoDiffXd, float), (float, AutoDiffXd), ] A = mut.TemplateSystem.define("A", T_list=T_list)( generic_instantiation_func) self.assertListEqual(A._T_list, T_list) self.assertListEqual(A._T_pairs, T_pairs_full) # - Explicit conversion pairs. T_pairs = [ (float, AutoDiffXd), ] B = mut.TemplateSystem.define("B", T_list=T_list, T_pairs=T_pairs)( generic_instantiation_func) self.assertListEqual(B._T_list, T_list) self.assertListEqual(B._T_pairs, T_pairs) # Negative tests. # - Not a supported scalar. T_list_bad = [int, float] with self.assertRaises(AssertionError): mut.TemplateSystem.define("C", T_list=T_list_bad) # - Not in original `T_list`. T_pairs_bad = [ (float, Expression), ] with self.assertRaises(AssertionError): mut.TemplateSystem.define( "C", T_list=T_list, T_pairs=T_pairs_bad) # - Unsupported conversion. T_pairs_unsupported = [ (float, float), ] with self.assertRaises(AssertionError): mut.TemplateSystem.define("C", T_pairs=T_pairs_unsupported) def test_inheritance(self): @mut.TemplateSystem.define("Child_") def Child_(T): class Impl(Example_[T]): def _construct(self, converter=None): Example_[T].__init__(self, 1000, converter=converter) def _construct_copy(self, other, converter=None): Example_[T].__init__(self, other, converter=converter) return Impl c_float = Child_[float]() self.assertIsInstance(c_float, Child_[float]) self.assertIsInstance(c_float, Example_[float]) self.assertEqual(c_float.value, 1000) self.assertIs(c_float.copied_from, None) c_ad = c_float.ToAutoDiffXd() self.assertEqual(c_ad.value, 1000) self.assertIs(c_ad.copied_from, c_float) self.assertIsInstance(c_ad, Child_[AutoDiffXd]) self.assertIsInstance(c_ad, Example_[AutoDiffXd]) def test_bad_class_definitions(self): """Tests bad class definitions.""" # Should not define `__init__`. @mut.TemplateSystem.define("NoInit_") def NoInit_(T): class NoInitInstantiation(LeafSystem_[T]): def __init__(self): pass def _construct(self, converter=None): pass def _construct_copy(self, converter=None): pass return NoInitInstantiation with self.assertRaises(RuntimeError) as cm: NoInit_[float] self.assertIn( "NoInit_[float] defines `__init__`, but should not", str(cm.exception)) # Should define `_construct_copy`. @mut.TemplateSystem.define("NoConstructCopy_") def NoConstructCopy_(T): class NoConstructCopyInstantiation(LeafSystem_[T]): def _construct(self, converter=None): pass return NoConstructCopyInstantiation with self.assertRaises(RuntimeError) as cm: NoConstructCopy_[float] self.assertIn( "NoConstructCopy_[float] does not define `_construct_copy`", str(cm.exception)) # Should inherit from `LeafSystem_[T]`. @mut.TemplateSystem.define("BadParenting_") def BadParenting_(T): class BadParentingInstantiation: def __init__(self): pass def _construct(self, converter=None): pass def _construct_copy(self, converter=None): pass return BadParentingInstantiation with self.assertRaises(RuntimeError) as cm: BadParenting_[float] self.assertIn("BadParenting_[float]", str(cm.exception)) self.assertIn("LeafSystem", str(cm.exception)) def test_clone(self): """Tests the System.Clone bindings. This is most convenient to do in the scalar conversion test, because cloning uses scalar conversion under the hood. """ for T in SystemScalarConverter.SupportedScalars: dut = Example_[T](0) dut.Clone() copy.copy(dut) copy.deepcopy(dut)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/perception_test.py
# -*- coding: utf-8 -*- import unittest import numpy as np from pydrake.math import RigidTransform, RollPitchYaw, RotationMatrix from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder from pydrake.systems.perception import ( PointCloudConcatenation, _ConcatenatePointClouds, _TileColors, _TransformPoints) class TestConcatenatePointClouds(unittest.TestCase): def setUp(self): self.points_0 = np.array([[1.0], [2.0], [3.0]]) self.colors_0 = np.array([[0], [128], [255]]) self.points_1 = np.array([[4.0], [5.0], [6.0]]) self.colors_1 = np.array([[50], [100], [200]]) self.points_dict = {"0": self.points_0, "1": self.points_1} self.colors_dict = {"0": self.colors_0, "1": self.colors_1} def test_concatenation(self): scene_points, scene_colors = _ConcatenatePointClouds( self.points_dict, self.colors_dict) self.assertEqual(scene_points.shape, (3, len(self.points_dict))) self.assertEqual(scene_colors.shape, (3, len(self.colors_dict))) self.assertEqual(scene_points.shape, scene_colors.shape) for i, value in enumerate(self.points_0.flatten()): self.assertTrue(value in scene_points[i, :]) for i, value in enumerate(self.points_1.flatten()): self.assertTrue(value in scene_points[i, :]) for i, value in enumerate(self.colors_0.flatten()): self.assertTrue(value in scene_colors[i, :]) for i, value in enumerate(self.colors_0.flatten()): self.assertTrue(value in scene_colors[i, :]) class TestTileColors(unittest.TestCase): def setUp(self): self.red = [255, 0, 0] self.blue = [0, 0, 255] def test_one_dim(self): tiled = _TileColors(self.red, 1) expected_tiled = np.array([[255], [0], [0]]) self.assertTrue(np.allclose(tiled, expected_tiled)) def test_three_dims(self): tiled = _TileColors(self.blue, 1) expected_tiled = np.array([[0, 0, 0], [0, 0, 0], [255, 255, 255]]) self.assertTrue(np.allclose(tiled, expected_tiled)) class TestTransformPoints(unittest.TestCase): def setUp(self): self.points = np.array([[1, 1, 0], [2, 1, 0]]).T self.translation = RigidTransform(p=[1, 2, 3]) self.rotation = RigidTransform( RotationMatrix(RollPitchYaw(0, 0, np.pi/2))) def test_translation(self): transformed_points = _TransformPoints( self.points, self.translation.GetAsMatrix4()) expected_translated_points = np.array([[2, 3, 3], [3, 3, 3]]).T self.assertTrue( np.allclose(transformed_points, expected_translated_points)) def test_rotation(self): transformed_points = _TransformPoints( self.points, self.rotation.GetAsMatrix4()) expected_rotated_points = np.array([[-1, 1, 0], [-1, 2, 0]]).T self.assertTrue( np.allclose(transformed_points, expected_rotated_points)) class TestPointCloudConcatenation(unittest.TestCase): def setUp(self): builder = DiagramBuilder() X_WP_0 = RigidTransform.Identity() X_WP_1 = RigidTransform.Identity() X_WP_1.set_translation([1.0, 0, 0]) id_list = ["0", "1"] self.pc_concat = builder.AddSystem(PointCloudConcatenation(id_list)) self.num_points = 10000 xyzs = np.random.uniform(-0.1, 0.1, (3, self.num_points)) # Only go to 254 to distinguish between point clouds with and without # color. rgbs = np.random.uniform(0., 254.0, (3, self.num_points)) self.pc = PointCloud( self.num_points, Fields(BaseField.kXYZs | BaseField.kRGBs)) self.pc.mutable_xyzs()[:] = xyzs self.pc.mutable_rgbs()[:] = rgbs self.pc_no_rgbs = PointCloud( self.num_points, Fields(BaseField.kXYZs)) self.pc_no_rgbs.mutable_xyzs()[:] = xyzs diagram = builder.Build() simulator = Simulator(diagram) self.context = diagram.GetMutableSubsystemContext( self.pc_concat, simulator.get_mutable_context()) self.pc_concat.GetInputPort("X_FCi_0").FixValue(self.context, X_WP_0) self.pc_concat.GetInputPort("X_FCi_1").FixValue(self.context, X_WP_1) def test_no_rgb(self): self.pc_concat.GetInputPort("point_cloud_CiSi_0").FixValue( self.context, self.pc_no_rgbs) self.pc_concat.GetInputPort("point_cloud_CiSi_1").FixValue( self.context, self.pc_no_rgbs) fused_pc = self.pc_concat.GetOutputPort("point_cloud_FS").Eval( self.context) self.assertEqual(fused_pc.size(), 2 * self.num_points) # The first point cloud should be from [-0.1 to 0.1]. # The second point cloud should be from [0.9 to 1.1]. self.assertTrue(np.max(fused_pc.xyzs()[0, :]) >= 1.0) self.assertTrue(np.min(fused_pc.xyzs()[0, :]) <= 0.0) # Even if both input point clouds don't have rgbs, the fused point # cloud should contain rgbs of the default color. self.assertTrue(fused_pc.has_rgbs()) self.assertTrue( np.all(fused_pc.rgbs()[:, 0] == np.array([255, 255, 255]))) self.assertTrue( np.all(fused_pc.rgbs()[:, -1] == np.array([255, 255, 255]))) def test_rgb(self): self.pc_concat.GetInputPort("point_cloud_CiSi_0").FixValue( self.context, self.pc) self.pc_concat.GetInputPort("point_cloud_CiSi_1").FixValue( self.context, self.pc) fused_pc = self.pc_concat.GetOutputPort("point_cloud_FS").Eval( self.context) self.assertEqual(fused_pc.size(), 2 * self.num_points) # The first point cloud should be from [-0.1 to 0.1]. # The second point cloud should be from [0.9 to 1.1]. self.assertTrue(np.max(fused_pc.xyzs()[0, :]) >= 1.0) self.assertTrue(np.min(fused_pc.xyzs()[0, :]) <= 0.0) self.assertTrue(fused_pc.has_rgbs()) self.assertTrue( np.all(fused_pc.rgbs()[:, 0] != np.array([255, 255, 255]))) self.assertTrue( np.all(fused_pc.rgbs()[:, -1] != np.array([255, 255, 255]))) def test_mix_rgb(self): self.pc_concat.GetInputPort("point_cloud_CiSi_0").FixValue( self.context, self.pc) self.pc_concat.GetInputPort("point_cloud_CiSi_1").FixValue( self.context, self.pc_no_rgbs) fused_pc = self.pc_concat.GetOutputPort("point_cloud_FS").Eval( self.context) self.assertEqual(fused_pc.size(), 2 * self.num_points) # The first point cloud should be from [-0.1 to 0.1]. # The second point cloud should be from [0.9 to 1.1]. self.assertTrue(np.max(fused_pc.xyzs()[0, :]) >= 1.0) self.assertTrue(np.min(fused_pc.xyzs()[0, :]) <= 0.0) self.assertTrue(fused_pc.has_rgbs()) # We don't know in what order the two point clouds will be combined. rgb_first = np.all(fused_pc.rgbs()[:, 0] != np.array([255, 255, 255])) rgb_last = np.all(fused_pc.rgbs()[:, -1] != np.array([255, 255, 255])) no_rgb_first = np.all( fused_pc.rgbs()[:, 0] == np.array([255, 255, 255])) no_rgb_last = np.all( fused_pc.rgbs()[:, -1] == np.array([255, 255, 255])) self.assertTrue( (rgb_first and no_rgb_last) or (no_rgb_first and rgb_last))
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/analysis_test.py
import copy import numpy as np import unittest from pydrake.common import Parallelism from pydrake.common.test_utilities import numpy_compare from pydrake.math import isnan from pydrake.symbolic import Variable, Expression from pydrake.autodiffutils import AutoDiffXd from pydrake.systems.primitives import ( ConstantVectorSource, ConstantVectorSource_, FirstOrderLowPassFilter_, LinearSystem_, SymbolicVectorSystem, SymbolicVectorSystem_, ) from pydrake.systems.framework import Context_, EventStatus from pydrake.systems.analysis import ( ApplySimulatorConfig, BatchEvalUniquePeriodicDiscreteUpdate, BatchEvalTimeDerivatives, ExtractSimulatorConfig, InitializeParams, IntegratorBase_, PrintSimulatorStatistics, ResetIntegratorFromFlags, RungeKutta2Integrator, RungeKutta2Integrator_, RungeKutta3Integrator, RungeKutta3Integrator_, RegionOfAttraction, RegionOfAttractionOptions, Simulator, Simulator_, SimulatorConfig, SimulatorStatus, ) from pydrake.trajectories import PiecewisePolynomial, PiecewisePolynomial_ class TestAnalysis(unittest.TestCase): def test_region_of_attraction(self): x = Variable("x") sys = SymbolicVectorSystem(state=[x], dynamics=[-x+x**3]) context = sys.CreateDefaultContext() options = RegionOfAttractionOptions() options.lyapunov_candidate = x*x options.state_variables = [x] numpy_compare.assert_equal(options.state_variables, [x]) options.use_implicit_dynamics = False options.solver_id = None options.solver_options = None V = RegionOfAttraction(system=sys, context=context, options=options) self.assertIsInstance(V, Expression) self.assertGreater(len(repr(options)), 0) self.assertIn("use_implicit_dynamics", repr(options)) def test_integrator_constructors(self): """Test all constructors for all integrator types.""" system = ConstantVectorSource([1]) context = system.CreateDefaultContext() RungeKutta2Integrator(system=system, max_step_size=0.01) RungeKutta2Integrator( system=system, max_step_size=0.01, context=context) RungeKutta3Integrator(system=system) RungeKutta3Integrator(system=system, context=context) @numpy_compare.check_nonsymbolic_types def test_batch_eval(self, T): A = np.matrix("[0.1, 0.2; 0.3, 0.4]") B = np.matrix("[0.5, 0.6; 0.7, 0.8]") dt_system = LinearSystem_[T](A, B, time_period=0.1) dt_context = dt_system.CreateDefaultContext() times = np.matrix("[1, 2, 3]") states = np.matrix("[1.2, 1.3, 1.4; 2.1, 2.2, 2.3]") inputs = np.matrix("[3.1, 3.2, 3.6; 4.6, 4.5, 4.2]") next_state = BatchEvalUniquePeriodicDiscreteUpdate( system=dt_system, context=dt_context, times=times, states=states, inputs=inputs, num_time_steps=1, input_port_index=dt_system.get_input_port().get_index(), parallelize=Parallelism(num_threads=2), ) numpy_compare.assert_float_allclose( next_state, A @ states + B @ inputs) ct_system = LinearSystem_[T](A, B) ct_context = ct_system.CreateDefaultContext() derivatives = BatchEvalTimeDerivatives( system=ct_system, context=ct_context, times=times, states=states, inputs=inputs, input_port_index=ct_system.get_input_port().get_index(), parallelize=Parallelism(num_threads=2), ) numpy_compare.assert_float_allclose( derivatives, A @ states + B @ inputs) @numpy_compare.check_nonsymbolic_types def test_integrator_api(self, T): system = FirstOrderLowPassFilter_[T](time_constant=1.0, size=1) context = system.CreateDefaultContext() system.get_input_port().FixValue(context, [1.0]) integrator = RungeKutta3Integrator_[T](system=system) self.assertIsInstance(integrator, IntegratorBase_[T]) # WARNING: IntegratorBase.get_context() could segfault if context is # not set. integrator.reset_context(context=context) self.assertIs(integrator.get_context(), context) self.assertIs(integrator.get_mutable_context(), context) target_accuracy = 1E-6 integrator.set_target_accuracy(accuracy=target_accuracy) self.assertEqual(integrator.get_target_accuracy(), target_accuracy) maximum_step_size = 0.2 integrator.set_maximum_step_size(max_step_size=maximum_step_size) self.assertEqual(integrator.get_maximum_step_size(), maximum_step_size) minimum_step_size = 2E-2 integrator.set_requested_minimum_step_size( min_step_size=minimum_step_size) self.assertEqual( integrator.get_requested_minimum_step_size(), minimum_step_size) integrator.set_throw_on_minimum_step_size_violation(throws=True) self.assertTrue(integrator.get_throw_on_minimum_step_size_violation()) integrator.set_fixed_step_mode(flag=True) self.assertTrue(integrator.get_fixed_step_mode()) integrator.Initialize() integrator.StartDenseIntegration() dense_output = integrator.get_dense_output() self.assertIsInstance(dense_output, PiecewisePolynomial_[T]) self.assertIs(integrator.StopDenseIntegration(), dense_output) self.assertEqual(integrator.get_num_substep_failures(), 0) self.assertEqual( integrator.get_num_step_shrinkages_from_substep_failures(), 0) self.assertEqual( integrator.get_num_step_shrinkages_from_error_control(), 0) self.assertEqual(integrator.get_num_derivative_evaluations(), 0) self.assertTrue(isnan(integrator.get_actual_initial_step_size_taken())) self.assertTrue( isnan(integrator.get_smallest_adapted_step_size_taken())) self.assertTrue(isnan(integrator.get_largest_step_size_taken())) self.assertEqual(integrator.get_num_steps_taken(), 0) integrator.ResetStatistics() integrator.Reset() def test_symbolic_integrators(self): x = Variable("x") sys = SymbolicVectorSystem_[Expression](state=[x], dynamics=[-x+x**3]) context = sys.CreateDefaultContext() max_h = 0.1 RungeKutta2Integrator_[Expression](sys, max_h, context) def test_dense_integration(self): x = Variable("x") sys = SymbolicVectorSystem(state=[x], dynamics=[-x+x**3]) simulator = Simulator(sys) integrator = simulator.get_mutable_integrator() self.assertIsNone(integrator.get_dense_output()) integrator.StartDenseIntegration() pp = integrator.get_dense_output() self.assertIsInstance(pp, PiecewisePolynomial) simulator.AdvanceTo(1.0) self.assertIs(pp, integrator.StopDenseIntegration()) self.assertEqual(pp.start_time(), 0.0) self.assertEqual(pp.end_time(), 1.0) self.assertIsNone(integrator.get_dense_output()) @numpy_compare.check_nonsymbolic_types def test_simulator_api(self, T): """Tests basic Simulator API.""" # TODO(eric.cousineau): Migrate tests from `general_test.py` to here. system = ConstantVectorSource_[T]([1.]) simulator = Simulator_[T](system=system) simulator = Simulator_[T]( system=system, context=system.CreateDefaultContext()) simulator.Initialize() initialize_params = InitializeParams( suppress_initialization_events=True) self.assertEqual( repr(initialize_params), "InitializeParams(suppress_initialization_events=True)") copy.copy(initialize_params) simulator.Initialize(params=initialize_params) simulator.AdvanceTo(boundary_time=0.0, interruptible=False) simulator.AdvancePendingEvents() simulator.AdvanceTo(boundary_time=0.1, interruptible=True) monitor_called_count = 0 def monitor(root_context): nonlocal monitor_called_count monitor_called_count += 1 return EventStatus.DidNothing() simulator.set_monitor(monitor=monitor) # N.B. This will be round-trip wrapped via pybind11, but should be the # same function underneath. monitor_from_pybind = simulator.get_monitor() self.assertIsNot(monitor_from_pybind, monitor) self.assertEqual(monitor_called_count, 0) monitor_from_pybind(simulator.get_context()) self.assertEqual(monitor_called_count, 1) simulator.clear_monitor() monitor_no_return_called_count = 0 def monitor_no_return(root_context): nonlocal monitor_no_return_called_count monitor_no_return_called_count += 1 simulator.set_monitor(monitor_no_return) monitor_from_pybind = simulator.get_monitor() status = monitor_from_pybind(simulator.get_context()) self.assertEqual(status.severity(), EventStatus.Severity.kDidNothing) self.assertEqual(monitor_no_return_called_count, 1) simulator.clear_monitor() self.assertIsInstance(simulator.get_context(), Context_[T]) self.assertIs(simulator.get_context(), simulator.get_mutable_context()) self.assertTrue(simulator.has_context()) self.assertIsInstance( simulator.get_integrator(), RungeKutta3Integrator_[T]) self.assertIs( simulator.get_integrator(), simulator.get_mutable_integrator()) simulator.reset_context(context=simulator.get_context().Clone()) simulator.set_publish_every_time_step(publish=True) simulator.set_publish_at_initialization(publish=True) simulator.set_target_realtime_rate(realtime_rate=0.0) self.assertEqual(simulator.get_target_realtime_rate(), 0.0) self.assertIsInstance(simulator.get_actual_realtime_rate(), float) simulator.ResetStatistics() self.assertEqual(simulator.get_num_publishes(), 0) self.assertEqual(simulator.get_num_steps_taken(), 0) self.assertEqual(simulator.get_num_discrete_updates(), 0) self.assertEqual(simulator.get_num_unrestricted_updates(), 0) self.assertIs(simulator.get_system(), system) def test_simulator_status(self): SimulatorStatus.ReturnReason.kReachedBoundaryTime SimulatorStatus.ReturnReason.kReachedTerminationCondition SimulatorStatus.ReturnReason.kEventHandlerFailed system = ConstantVectorSource([1.]) simulator = Simulator(system) status = simulator.AdvanceTo(1.) self.assertRegex( status.FormatMessage(), "^Simulator successfully reached the boundary time") self.assertTrue(status.succeeded()) self.assertEqual(status.boundary_time(), 1.) self.assertEqual(status.return_time(), 1.) self.assertEqual( status.reason(), SimulatorStatus.ReturnReason.kReachedBoundaryTime) self.assertIsNone(status.system()) self.assertEqual(status.message(), "") self.assertTrue(status.IsIdenticalStatus(other=status)) PrintSimulatorStatistics(simulator) def test_reset_integrator_from_flags(self): for T in (float, AutoDiffXd): source = ConstantVectorSource_[T]([2, 3]) simulator = Simulator_[T](source) new_integrator = ResetIntegratorFromFlags( simulator=simulator, scheme="runge_kutta2", max_step_size=0.001) self.assertIsInstance(new_integrator, RungeKutta2Integrator_[T]) def test_simulator_config(self): SimulatorConfig() config = SimulatorConfig(target_realtime_rate=2.0) self.assertEqual(config.target_realtime_rate, 2.0) self.assertIn("target_realtime_rate", repr(config)) copy.copy(config) def test_simulator_config_functions(self): for T in (float, AutoDiffXd): source = ConstantVectorSource_[T]([2, 3]) simulator = Simulator_[T](source) config = ExtractSimulatorConfig(simulator) config.target_realtime_rate = 100.0 ApplySimulatorConfig(config=config, simulator=simulator) self.assertEqual(simulator.get_target_realtime_rate(), 100.0) def test_system_monitor(self): x = Variable("x") sys = SymbolicVectorSystem(state=[x], dynamics=[-x+x**3]) simulator = Simulator(sys) def monitor(root_context): context = sys.GetMyContextFromRoot(root_context) if context.get_time() >= 1.: return EventStatus.ReachedTermination(sys, "Time reached") # N.B. We suppress returning anything to test the binding's ability # to handle `None` return type. self.assertIsNone(simulator.get_monitor()) simulator.set_monitor(monitor) self.assertIsNotNone(simulator.get_monitor()) status = simulator.AdvanceTo(2.) self.assertEqual( status.reason(), SimulatorStatus.ReturnReason.kReachedTerminationCondition) self.assertLess(status.return_time(), 1.1) simulator.clear_monitor() self.assertIsNone(simulator.get_monitor())
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/controllers_test.py
import math import unittest import numpy as np from pydrake.common import FindResourceOrThrow from pydrake.examples import PendulumPlant from pydrake.multibody.tree import MultibodyForces from pydrake.multibody.plant import MultibodyPlant from pydrake.multibody.parsing import Parser from pydrake.systems.analysis import Simulator from pydrake.systems.controllers import ( DiscreteTimeLinearQuadraticRegulator, DynamicProgrammingOptions, FiniteHorizonLinearQuadraticRegulator, FiniteHorizonLinearQuadraticRegulatorOptions, FiniteHorizonLinearQuadraticRegulatorResult, FittedValueIteration, InverseDynamicsController, InverseDynamics, JointStiffnessController, LinearQuadraticRegulator, LinearProgrammingApproximateDynamicProgramming, MakeFiniteHorizonLinearQuadraticRegulator, PeriodicBoundaryCondition, PidControlledSystem, PidController, ) from pydrake.systems.framework import ( DiagramBuilder, InputPortSelection, InputPort, OutputPort, ) from pydrake.systems.primitives import Integrator, LinearSystem from pydrake.trajectories import Trajectory class TestControllers(unittest.TestCase): def test_fitted_value_iteration_pendulum(self): plant = PendulumPlant() simulator = Simulator(plant) def quadratic_regulator_cost(context): x = context.get_continuous_state_vector().CopyToVector() x[0] = x[0] - math.pi u = plant.EvalVectorInput(context, 0).CopyToVector() return x.dot(x) + u.dot(u) # Note: intentionally under-sampled to keep the problem small qbins = np.linspace(0., 2.*math.pi, 11) qdotbins = np.linspace(-10., 10., 11) state_grid = [set(qbins), set(qdotbins)] input_limit = 2. input_mesh = [set(np.linspace(-input_limit, input_limit, 5))] time_step = 0.01 num_callbacks = [0] def callback(iteration, mesh, cost_to_go, policy): # Drawing is slow, don't draw every frame. num_callbacks[0] += 1 options = DynamicProgrammingOptions() options.convergence_tol = 1. options.periodic_boundary_conditions = [ DynamicProgrammingOptions.PeriodicBoundaryCondition( state_index=0, low=0., high=2.*math.pi), ] self.assertIs( PeriodicBoundaryCondition, DynamicProgrammingOptions.PeriodicBoundaryCondition) options.visualization_callback = callback options.input_port_index = InputPortSelection.kUseFirstInputIfItExists options.assume_non_continuous_states_are_fixed = False policy, cost_to_go = FittedValueIteration(simulator, quadratic_regulator_cost, state_grid, input_mesh, time_step, options) self.assertGreater(num_callbacks[0], 0) def test_linear_programming_approximate_dynamic_programming(self): integrator = Integrator(1) simulator = Simulator(integrator) # minimum time cost function (1 for all non-zero states). def cost_function(context): x = context.get_continuous_state_vector().CopyToVector() if (math.fabs(x[0]) > 0.1): return 1. else: return 0. def cost_to_go_function(state, parameters): return parameters[0] * math.fabs(state[0]) state_samples = np.array([[-4., -3., -2., -1., 0., 1., 2., 3., 4.]]) input_samples = np.array([[-1., 0., 1.]]) time_step = 1.0 options = DynamicProgrammingOptions() options.discount_factor = 1. J = LinearProgrammingApproximateDynamicProgramming( simulator, cost_function, cost_to_go_function, 1, state_samples, input_samples, time_step, options) self.assertAlmostEqual(J[0], 1., delta=1e-6) def test_joint_stiffness_controller(self): url = ( "package://drake_models/iiwa_description/sdf/" + "iiwa14_no_collision.sdf") plant = MultibodyPlant(time_step=0.01) Parser(plant).AddModels(url=url) plant.WeldFrames(plant.world_frame(), plant.GetFrameByName("iiwa_link_0")) plant.Finalize() kp = np.ones((7,)) kd = 0.1*np.ones((7,)) controller = JointStiffnessController(plant=plant, kp=kp, kd=kd) self.assertEqual(controller.get_input_port_estimated_state().size(), 14) self.assertEqual(controller.get_input_port_desired_state().size(), 14) self.assertEqual(controller.get_output_port_generalized_force().size(), 7) self.assertIsInstance(controller.get_multibody_plant(), MultibodyPlant) def test_inverse_dynamics(self): url = ( "package://drake_models/iiwa_description/sdf/" + "iiwa14_no_collision.sdf") plant = MultibodyPlant(time_step=0.01) Parser(plant).AddModels(url=url) plant.WeldFrames(plant.world_frame(), plant.GetFrameByName("iiwa_link_0")) plant.Finalize() controller = InverseDynamics(plant=plant) self.assertIsInstance(controller.get_input_port_estimated_state(), InputPort) self.assertIsInstance(controller.get_input_port_desired_acceleration(), InputPort) self.assertIsInstance(controller.get_output_port_generalized_force(), OutputPort) self.assertFalse(controller.is_pure_gravity_compensation()) controller = InverseDynamics( plant=plant, mode=InverseDynamics.InverseDynamicsMode.kGravityCompensation, plant_context=plant.CreateDefaultContext()) self.assertIsInstance(controller.get_input_port_estimated_state(), InputPort) self.assertIsInstance(controller.get_output_port_generalized_force(), OutputPort) self.assertTrue(controller.is_pure_gravity_compensation()) def test_inverse_dynamics_controller(self): url = ( "package://drake_models/iiwa_description/sdf/" + "iiwa14_no_collision.sdf") plant = MultibodyPlant(time_step=0.01) Parser(plant).AddModels(url=url) plant.WeldFrames(plant.world_frame(), plant.GetFrameByName("iiwa_link_0")) plant.mutable_gravity_field().set_gravity_vector([0.0, 0.0, 0.0]) plant.Finalize() # We verify the (known) size of the model. kNumPositions = 7 kNumVelocities = 7 kNumActuators = 7 kStateSize = kNumPositions + kNumVelocities self.assertEqual(plant.num_positions(), kNumPositions) self.assertEqual(plant.num_velocities(), kNumVelocities) self.assertEqual(plant.num_actuators(), kNumActuators) kp = np.array([1., 2., 3., 4., 5., 6., 7.]) ki = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) kd = np.array([.5, 1., 1.5, 2., 2.5, 3., 3.5]) controller = InverseDynamicsController( robot=plant, kp=kp, ki=ki, kd=kd, has_reference_acceleration=True, plant_context=plant.CreateDefaultContext()) context = controller.CreateDefaultContext() output = controller.AllocateOutput() estimated_state_port = controller.get_input_port(0) desired_state_port = controller.get_input_port(1) desired_acceleration_port = controller.get_input_port(2) control_port = controller.get_output_port(0) self.assertEqual(desired_acceleration_port.size(), kNumVelocities) self.assertEqual(estimated_state_port.size(), kStateSize) self.assertEqual(desired_state_port.size(), kStateSize) self.assertEqual(control_port.size(), kNumVelocities) self.assertIsInstance(controller.get_multibody_plant_for_control(), MultibodyPlant) # Current state. q = np.array([-0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3]) v = np.array([-0.9, -0.6, -0.3, 0.0, 0.3, 0.6, 0.9]) x = np.concatenate([q, v]) # Reference state and acceleration. q_r = q + 0.1*np.ones_like(q) v_r = v + 0.1*np.ones_like(v) x_r = np.concatenate([q_r, v_r]) vd_r = np.array([1., 2., 3., 4., 5., 6., 7.]) integral_term = np.array([-1., -2., -3., -4., -5., -6., -7.]) vd_d = vd_r + kp*(q_r-q) + kd*(v_r-v) + ki*integral_term estimated_state_port.FixValue(context, x) desired_state_port.FixValue(context, x_r) desired_acceleration_port.FixValue(context, vd_r) controller.set_integral_value(context, integral_term) # Set the plant's context. plant_context = plant.CreateDefaultContext() plant.SetPositionsAndVelocities(plant_context, x) # Compute the expected value of the generalized forces using # inverse dynamics. tau_id = plant.CalcInverseDynamics( plant_context, vd_d, MultibodyForces(plant)) # Verify the result. controller.CalcOutput(context, output) self.assertTrue(np.allclose(output.get_vector_data(0).CopyToVector(), tau_id)) def test_issue14355(self): """ DiagramBuilder.AddSystem() may not propagate keep alive relationships. We use this test to show resolution at a known concrete point of failure. https://github.com/RobotLocomotion/drake/issues/14355 """ def make_diagram(): # Use a nested function to ensure that all locals get garbage # collected quickly. # Construct a trivial plant and ID controller. # N.B. We explicitly do *not* add this plant to the diagram. controller_plant = MultibodyPlant(time_step=0.002) controller_plant.Finalize() builder = DiagramBuilder() controller = builder.AddSystem( InverseDynamicsController( controller_plant, kp=[], ki=[], kd=[], has_reference_acceleration=False, ) ) # Forward ports for ease of testing. builder.ExportInput( controller.get_input_port_estimated_state(), "x_estimated") builder.ExportInput( controller.get_input_port_desired_state(), "x_desired") builder.ExportOutput(controller.get_output_port_control(), "u") diagram = builder.Build() return diagram diagram = make_diagram() # N.B. Without the workaround for #14355, we get a segfault when # creating the context. context = diagram.CreateDefaultContext() diagram.GetInputPort("x_estimated").FixValue(context, []) diagram.GetInputPort("x_desired").FixValue(context, []) u = diagram.GetOutputPort("u").Eval(context) np.testing.assert_equal(u, []) def test_pid_controlled_system(self): controllers = [ PidControlledSystem(plant=PendulumPlant(), kp=1., ki=0., kd=2., state_output_port_index=0, plant_input_port_index=0), PidControlledSystem(plant=PendulumPlant(), kp=[0], ki=[1], kd=[2], state_output_port_index=0, plant_input_port_index=0), PidControlledSystem(plant=PendulumPlant(), feedback_selector=np.eye(2), kp=1., ki=0., kd=2., state_output_port_index=0, plant_input_port_index=0), PidControlledSystem(plant=PendulumPlant(), feedback_selector=np.eye(2), kp=[0], ki=[1], kd=[2], state_output_port_index=0, plant_input_port_index=0), ] for controller in controllers: self.assertIsNotNone(controller.get_control_input_port()) self.assertIsNotNone(controller.get_state_input_port()) self.assertIsNotNone(controller.get_state_output_port()) def test_pid_controller(self): controllers = [ PidController(kp=np.ones(3), ki=np.zeros(3), kd=[1, 2, 3]), PidController(state_projection=np.ones((6, 4)), kp=np.ones(3), ki=np.zeros(3), kd=[1, 2, 3]), PidController(state_projection=np.ones((6, 4)), output_projection=np.ones((4, 3)), kp=np.ones(3), ki=np.zeros(3), kd=[1, 2, 3]), ] for controller in controllers: self.assertEqual(controller.num_input_ports(), 2) self.assertEqual(len(controller.get_Kp_vector()), 3) self.assertEqual(len(controller.get_Ki_vector()), 3) self.assertEqual(len(controller.get_Kd_vector()), 3) self.assertIsNotNone(controller.get_input_port_estimated_state()) self.assertIsNotNone(controller.get_input_port_desired_state()) self.assertIsNotNone(controller.get_output_port_control()) def test_linear_quadratic_regulator(self): A = np.array([[0, 1], [0, 0]]) B = np.array([[0], [1]]) C = np.identity(2) D = np.array([[0], [0]]) double_integrator = LinearSystem(A, B, C, D) Q = np.identity(2) R = np.identity(1) K_expected = np.array([[1, math.sqrt(3.)]]) S_expected = np.array([[math.sqrt(3), 1.], [1., math.sqrt(3)]]) (K, S) = LinearQuadraticRegulator(A, B, Q, R) np.testing.assert_almost_equal(K, K_expected) np.testing.assert_almost_equal(S, S_expected) # Test with N and F. (K, S) = LinearQuadraticRegulator( A=A, B=B, Q=Q, R=R, N=np.array([[0.1], [0.2]]), F=np.array([[1, 2.]])) controller = LinearQuadraticRegulator(double_integrator, Q, R) np.testing.assert_almost_equal(controller.D(), -K_expected) context = double_integrator.CreateDefaultContext() double_integrator.get_input_port(0).FixValue(context, [0]) controller = LinearQuadraticRegulator( double_integrator, context, Q, R, input_port_index=double_integrator.get_input_port().get_index()) np.testing.assert_almost_equal(controller.D(), -K_expected) def test_discrete_time_linear_quadratic_regulator(self): A = np.array([[1, 1], [0, 1]]) B = np.array([[0], [1]]) Q = np.identity(2) R = np.identity(1) (K, S) = DiscreteTimeLinearQuadraticRegulator(A, B, Q, R) self.assertEqual(K.shape, (1, 2)) self.assertEqual(S.shape, (2, 2)) def test_finite_horizon_linear_quadratic_regulator(self): A = np.array([[0, 1], [0, 0]]) B = np.array([[0], [1]]) C = np.identity(2) D = np.array([[0], [0]]) double_integrator = LinearSystem(A, B, C, D) Q = np.identity(2) R = np.identity(1) options = FiniteHorizonLinearQuadraticRegulatorOptions() options.Qf = Q options.use_square_root_method = False options.simulator_config.max_step_size = 0.2 self.assertIsNone(options.N) self.assertIsNone(options.x0) self.assertIsNone(options.u0) self.assertIsNone(options.xd) self.assertIsNone(options.ud) self.assertEqual(options.input_port_index, InputPortSelection.kUseFirstInputIfItExists) self.assertRegex(repr(options), "".join([ r"FiniteHorizonLinearQuadraticRegulatorOptions\(", # Don't be particular about numpy's whitespace in Qf. r"Qf=\[\[ *1\. *0\.\]\s*\[ *0\. *1\.\]\], " r"N=None, ", r"input_port_index=", r"InputPortSelection.kUseFirstInputIfItExists, ", r"use_square_root_method=False, ", r"simulator_config=SimulatorConfig\(.*\)\)"])) context = double_integrator.CreateDefaultContext() double_integrator.get_input_port(0).FixValue(context, 0.0) result = FiniteHorizonLinearQuadraticRegulator( system=double_integrator, context=context, t0=0, tf=0.1, Q=Q, R=R, options=options) self.assertIsInstance(result, FiniteHorizonLinearQuadraticRegulatorResult) self.assertIsInstance(result.x0, Trajectory) self.assertEqual(result.x0.value(0).shape, (2, 1)) self.assertIsInstance(result.u0, Trajectory) self.assertEqual(result.u0.value(0).shape, (1, 1)) self.assertIsInstance(result.K, Trajectory) self.assertEqual(result.K.value(0).shape, (1, 2)) self.assertIsInstance(result.S, Trajectory) self.assertEqual(result.S.value(0).shape, (2, 2)) self.assertIsInstance(result.k0, Trajectory) self.assertEqual(result.k0.value(0).shape, (1, 1)) self.assertIsInstance(result.sx, Trajectory) self.assertEqual(result.sx.value(0).shape, (2, 1)) self.assertIsInstance(result.s0, Trajectory) self.assertEqual(result.s0.value(0).shape, (1, 1)) regulator = MakeFiniteHorizonLinearQuadraticRegulator( system=double_integrator, context=context, t0=0, tf=0.1, Q=Q, R=R, options=options) self.assertEqual(regulator.get_input_port(0).size(), 2) self.assertEqual(regulator.get_output_port(0).size(), 1)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/lifetime_test_notebook.ipynb
from pydrake.common.value import Value from pydrake.math import RigidTransform from pydrake.systems.primitives import ConstantValueSource def _create_context_value_reference(): """Returns a reference to a C++ object whose storage comes from inside a Context. The return value should keep the entire (otherwise-unused) context alive. """ system = ConstantValueSource(Value(RigidTransform())) context = system.CreateDefaultContext() return system.get_output_port().Eval(context) # Check that the object still retains its correct value. X = _create_context_value_reference() print(X) assert X.IsExactlyIdentity()# Re-check that the object still retains its correct value. print(X) assert X.IsExactlyIdentity()
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/monte_carlo_test.py
# -*- coding: utf-8 -*- import copy import time import unittest import warnings import numpy as np from pydrake.common import RandomGenerator from pydrake.systems.analysis import ( MonteCarloSimulation, RandomSimulationResult, RandomSimulation, Simulator ) from pydrake.systems.primitives import ConstantVectorSource class TestMonteCarlo(unittest.TestCase): def test_minimal_simulation(self): # Create a simple system. system = ConstantVectorSource([1.]) def make_simulator(generator): simulator = Simulator(system) simulator.Initialize() simulator.set_target_realtime_rate(0) return simulator def calc_output(system, context): return 42. result = RandomSimulation( make_simulator=make_simulator, output=calc_output, final_time=1.0, generator=RandomGenerator()) self.assertEqual(result, 42.) result = MonteCarloSimulation( make_simulator=make_simulator, output=calc_output, final_time=1.0, num_samples=10, generator=RandomGenerator()) self.assertIsInstance(result, list) self.assertEqual(len(result), 10) self.assertIsInstance(result[0], RandomSimulationResult) self.assertIsInstance(result[0].generator_snapshot, RandomGenerator) self.assertEqual(result[0].output, 42.) for i in range(1, len(result)): self.assertIsNot(result[0].generator_snapshot, result[i].generator_snapshot)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/planar_scenegraph_visualizer_test.py
import unittest from python.runfiles import Create as CreateRunfiles import numpy as np import os from pydrake.common import FindResourceOrThrow from pydrake.geometry import Box, Mesh from pydrake.math import RigidTransform from pydrake.multibody.parsing import Parser from pydrake.multibody.plant import ( AddMultibodyPlantSceneGraph, CoulombFriction) from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder from pydrake.systems.planar_scenegraph_visualizer import ( ConnectPlanarSceneGraphVisualizer, PlanarSceneGraphVisualizer) class TestPlanarSceneGraphVisualizer(unittest.TestCase): def test_cart_pole(self): """Cart-Pole with simple geometry.""" file_name = FindResourceOrThrow( "drake/examples/multibody/cart_pole/cart_pole.sdf") builder = DiagramBuilder() cart_pole, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) Parser(plant=cart_pole).AddModels(file_name) cart_pole.Finalize() self.assertTrue(cart_pole.geometry_source_is_registered()) visualizer = builder.AddSystem(PlanarSceneGraphVisualizer(scene_graph)) builder.Connect(scene_graph.get_query_output_port(), visualizer.get_geometry_query_input_port()) diagram = builder.Build() diagram_context = diagram.CreateDefaultContext() cart_pole_context = diagram.GetMutableSubsystemContext( cart_pole, diagram_context) vis_context = diagram.GetMutableSubsystemContext( visualizer, diagram_context) cart_pole.get_actuation_input_port().FixValue(cart_pole_context, 0) cart_slider = cart_pole.GetJointByName("CartSlider") pole_pin = cart_pole.GetJointByName("PolePin") cart_slider.set_translation(context=cart_pole_context, translation=0.) pole_pin.set_angle(context=cart_pole_context, angle=2.) simulator = Simulator(diagram, diagram_context) simulator.set_publish_every_time_step(False) simulator.AdvanceTo(.1) visualizer.draw(vis_context) self.assertEqual(visualizer.ax.get_title(), "t = 0.1",) def test_kuka(self): """Kuka IIWA with mesh geometry.""" url = ( "package://drake_models/iiwa_description/sdf/" + "iiwa14_no_collision.sdf") builder = DiagramBuilder() kuka, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) Parser(plant=kuka).AddModels(url=url) kuka.Finalize() # Make sure that the frames to visualize exist. iiwa = kuka.GetModelInstanceByName("iiwa14") kuka.GetFrameByName("iiwa_link_7", iiwa) kuka.GetFrameByName("iiwa_link_6", iiwa) visualizer = builder.AddSystem(PlanarSceneGraphVisualizer(scene_graph)) builder.Connect(scene_graph.get_query_output_port(), visualizer.get_geometry_query_input_port()) diagram = builder.Build() diagram_context = diagram.CreateDefaultContext() kuka_context = diagram.GetMutableSubsystemContext( kuka, diagram_context) vis_context = diagram.GetMutableSubsystemContext( visualizer, diagram_context) kuka_actuation_port = kuka.get_actuation_input_port() kuka_actuation_port.FixValue(kuka_context, np.zeros(kuka_actuation_port.size())) simulator = Simulator(diagram, diagram_context) simulator.set_publish_every_time_step(False) simulator.AdvanceTo(.1) visualizer.draw(vis_context) self.assertEqual(visualizer.ax.get_title(), "t = 0.1",) def test_procedural_geometry(self): """ This test ensures we can draw procedurally added primitive geometry that is added to the world model instance (which has a slightly different naming scheme than geometry with a non-default / non-world model instance). """ builder = DiagramBuilder() mbp, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) world_body = mbp.world_body() box_shape = Box(1., 2., 3.) # This rigid body will be added to the world model instance since # the model instance is not specified. box_body = mbp.AddRigidBody("box") mbp.WeldFrames(world_body.body_frame(), box_body.body_frame(), RigidTransform()) mbp.RegisterVisualGeometry( box_body, RigidTransform.Identity(), box_shape, "ground_vis", np.array([0.5, 0.5, 0.5, 1.])) mbp.RegisterCollisionGeometry( box_body, RigidTransform.Identity(), box_shape, "ground_col", CoulombFriction(0.9, 0.8)) mbp.Finalize() visualizer = builder.AddSystem(PlanarSceneGraphVisualizer(scene_graph)) builder.Connect(scene_graph.get_query_output_port(), visualizer.get_geometry_query_input_port()) diagram = builder.Build() diagram_context = diagram.CreateDefaultContext() vis_context = diagram.GetMutableSubsystemContext( visualizer, diagram_context) simulator = Simulator(diagram, diagram_context) simulator.set_publish_every_time_step(False) simulator.AdvanceTo(.1) visualizer.draw(vis_context) self.assertEqual(visualizer.ax.get_title(), "t = 0.1",) def test_mesh_file_parsing(self): """ This test ensures we can load obj files or provide a reasonable error message. """ def scene_graph_with_mesh(filename, scale=1.0): builder = DiagramBuilder() mbp, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) world_body = mbp.world_body() mesh_shape = Mesh(filename, scale=scale) mesh_body = mbp.AddRigidBody("mesh") mbp.WeldFrames(world_body.body_frame(), mesh_body.body_frame(), RigidTransform()) mbp.RegisterVisualGeometry( mesh_body, RigidTransform.Identity(), mesh_shape, "mesh_vis", np.array([0.5, 0.5, 0.5, 1.])) mbp.Finalize() return scene_graph # This mesh should load correctly. runfiles = CreateRunfiles() mesh_name = runfiles.Rlocation( "drake_models/iiwa_description/meshes/iiwa14/visual/" "link_0.gltf") scene_graph = scene_graph_with_mesh(mesh_name) PlanarSceneGraphVisualizer(scene_graph) # This should load correctly, too, by substituting the .gltf. mesh_name_wrong_ext = os.path.splitext(mesh_name)[0] + ".STL" scene_graph = scene_graph_with_mesh(mesh_name_wrong_ext) PlanarSceneGraphVisualizer(scene_graph) # This should report that the file does not exist: with self.assertRaises(FileNotFoundError): PlanarSceneGraphVisualizer( scene_graph, substitute_collocated_mesh_files=False) # This should report that the file does not exist. scene_graph = scene_graph_with_mesh("garbage.obj") with self.assertRaises(FileNotFoundError): PlanarSceneGraphVisualizer(scene_graph) # This should report that the extension was wrong and no .obj was # found. scene_graph = scene_graph_with_mesh("garbage.STL") with self.assertRaises(RuntimeError): PlanarSceneGraphVisualizer(scene_graph) # This should load correctly and yield a very large patch. scene_graph = scene_graph_with_mesh(mesh_name, 1e3) visualizer = PlanarSceneGraphVisualizer(scene_graph) _, _, width, height = visualizer.ax.dataLim.bounds self.assertTrue(width > 10.0) self.assertTrue(height > 10.0) def testConnectPlanarSceneGraphVisualizer(self): """Cart-Pole with simple geometry.""" file_name = FindResourceOrThrow( "drake/examples/multibody/cart_pole/cart_pole.sdf") builder = DiagramBuilder() cart_pole, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) Parser(plant=cart_pole).AddModels(file_name) cart_pole.Finalize() # The function auto connects to the scene graph query object port. vis_auto_connect = ConnectPlanarSceneGraphVisualizer( builder=builder, scene_graph=scene_graph, xlim=[0.3, 1.2]) self.assertIsInstance(vis_auto_connect, PlanarSceneGraphVisualizer) # Confirm that arguments are passed through. self.assertEqual(vis_auto_connect.ax.get_xlim(), (0.3, 1.2)) # The function connects visualizer to provided query object port. vis_port_connect = ConnectPlanarSceneGraphVisualizer( builder=builder, scene_graph=scene_graph, output_port=scene_graph.get_query_output_port()) vis_port_connect.set_name("vis_port_connect") self.assertIsInstance(vis_port_connect, PlanarSceneGraphVisualizer) diagram = builder.Build() diagram_context = diagram.CreateDefaultContext() vis_auto_connect_context = diagram.GetMutableSubsystemContext( vis_auto_connect, diagram_context) vis_port_connect_context = diagram.GetMutableSubsystemContext( vis_port_connect, diagram_context) # Note: we can't simply call diagram.Publish(diagram_context) because # the visualizer isn't set to be visible; as such, no drawing work # will be done. vis_auto_connect.draw(vis_auto_connect_context) vis_port_connect.draw(vis_port_connect_context)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/general_test.py
# -*- coding: utf-8 -*- import pydrake.systems.framework import copy import gc from textwrap import dedent import unittest import numpy as np from pydrake.autodiffutils import AutoDiffXd from pydrake.common import RandomGenerator from pydrake.common.test_utilities import numpy_compare from pydrake.common.value import AbstractValue, Value from pydrake.examples import PendulumPlant, RimlessWheel from pydrake.symbolic import Expression from pydrake.systems.analysis import ( GetIntegrationSchemes, IntegratorBase, IntegratorBase_, PrintSimulatorStatistics, ResetIntegratorFromFlags, RungeKutta2Integrator, Simulator, Simulator_, ) from pydrake.systems.framework import ( BasicVector, BasicVector_, CacheEntry, ContextBase, Context, Context_, ContinuousState, ContinuousState_, Diagram, Diagram_, DiagramBuilder, DiagramBuilder_, DiscreteUpdateEvent, DiscreteUpdateEvent_, DiscreteValues, DiscreteValues_, Event, Event_, EventStatus, InputPort, InputPort_, InputPortIndex, kUseDefaultName, LeafContext, LeafContext_, LeafSystem, LeafSystem_, OutputPort, OutputPort_, OutputPortIndex, Parameters, Parameters_, PeriodicEventData, PublishEvent, PublishEvent_, State, State_, Subvector, Subvector_, Supervector, Supervector_, System, System_, SystemVisitor, SystemBase, SystemOutput, SystemOutput_, VectorBase, VectorBase_, TriggerType, VectorSystem, VectorSystem_, ) from pydrake.systems.primitives import ( Adder, Adder_, ConstantValueSource, ConstantVectorSource, ConstantVectorSource_, Integrator, LinearSystem, PassThrough, PassThrough_, ZeroOrderHold, ) # TODO(eric.cousineau): The scope of this test file and `custom_test.py` # is poor. Move these tests into `framework_test` and `analysis_test`, and # ensure that the tests reflect this, even if there is some coupling. class TestGeneral(unittest.TestCase): def _check_instantiations( self, template, default_cls, supports_symbolic=True): self.assertIs(template[None], default_cls) self.assertIs(template[float], default_cls) self.assertIsNot(template[AutoDiffXd], default_cls) if supports_symbolic: self.assertIsNot(template[Expression], default_cls) def _compare_system_instances(self, lhs, rhs): # Compares two different scalar type instantiation instances of a # system. self.assertEqual(lhs.num_input_ports(), rhs.num_input_ports()) self.assertEqual( lhs.num_output_ports(), rhs.num_output_ports()) for i in range(lhs.num_input_ports()): lhs_port = lhs.get_input_port(i) rhs_port = rhs.get_input_port(i) self.assertEqual(lhs_port.size(), rhs_port.size()) for i in range(lhs.num_output_ports()): lhs_port = lhs.get_output_port(i) rhs_port = rhs.get_output_port(i) self.assertEqual(lhs_port.size(), rhs_port.size()) def test_system_base_api(self): # Test a system with a different number of inputs from outputs. system = Adder(3, 10) self.assertIsInstance(system, SystemBase) self.assertEqual( system.GetSystemType(), "drake::systems::Adder<double>") system.set_name(name="adder") self.assertEqual(system.get_name(), "adder") self.assertEqual(system.GetSystemName(), "adder") self.assertEqual(system.GetSystemPathname(), "::adder") self.assertEqual(system.num_input_ports(), 3) self.assertEqual(system.num_output_ports(), 1) self.assertEqual(system.num_continuous_states(), 0) self.assertEqual(system.num_discrete_state_groups(), 0) self.assertEqual(system.num_abstract_states(), 0) self.assertEqual(system.implicit_time_derivatives_residual_size(), 0) self.assertTrue(system.HasInputPort("u1")) u1 = system.GetInputPort("u1") self.assertEqual(u1.get_name(), "u1") self.assertIn("u1", u1.GetFullDescription()) self.assertEqual(u1.get_index(), 1) self.assertEqual(u1.size(), 10) self.assertIsNotNone(u1.ticket()) self.assertIsInstance(u1.Allocate(), Value[BasicVector]) self.assertIs(u1.get_system(), system) self.assertTrue(system.HasOutputPort("sum")) y = system.GetOutputPort("sum") self.assertEqual(y.get_name(), "sum") self.assertEqual(y.get_index(), 0) self.assertIsInstance(y.Allocate(), Value[BasicVector]) self.assertIs(y.get_system(), system) cache_entry = y.cache_entry() self.assertFalse(cache_entry.is_disabled_by_default()) y.disable_caching_by_default() self.assertTrue(cache_entry.is_disabled_by_default()) self.assertEqual(y, system.get_output_port()) # TODO(eric.cousineau): Consolidate the main API tests for `System` # to this test point. def test_context_base_api(self): system = Adder(3, 10) context = system.AllocateContext() self.assertIsInstance(context, ContextBase) self.assertEqual(context.num_input_ports(), 3) self.assertEqual(context.num_output_ports(), 1) context.DisableCaching() context.EnableCaching() context.SetAllCacheEntriesOutOfDate() context.FreezeCache() self.assertTrue(context.is_cache_frozen()) context.UnfreezeCache() self.assertFalse(context.is_cache_frozen()) def test_context_api(self): system = Adder(3, 10) context = system.AllocateContext() self.assertIsInstance( context.get_continuous_state(), ContinuousState) self.assertIsInstance( context.get_mutable_continuous_state(), ContinuousState) self.assertIsInstance( context.get_continuous_state_vector(), VectorBase) self.assertIsInstance( context.get_mutable_continuous_state_vector(), VectorBase) system.SetDefaultContext(context=context) # Check random context method. system.SetRandomContext(context=context, generator=RandomGenerator()) context = system.CreateDefaultContext() self.assertIsInstance( context.get_continuous_state(), ContinuousState) self.assertIsInstance( context.get_mutable_continuous_state(), ContinuousState) self.assertIsInstance( context.get_continuous_state_vector(), VectorBase) self.assertIsInstance( context.get_mutable_continuous_state_vector(), VectorBase) self.assertTrue(context.is_stateless()) self.assertFalse(context.has_only_continuous_state()) self.assertFalse(context.has_only_discrete_state()) self.assertEqual(context.num_total_states(), 0) # TODO(eric.cousineau): Consolidate main API tests for `Context` here. # Test methods with two scalar types. for T in [float, AutoDiffXd, Expression]: systemT = Adder_[T](3, 10) contextT = systemT.CreateDefaultContext() for U in [float, AutoDiffXd, Expression]: systemU = Adder_[U](3, 10) contextU = systemU.CreateDefaultContext() contextU.SetTime(0.5) contextT.SetStateAndParametersFrom(contextU) contextT.SetTimeStateAndParametersFrom(contextU) if T == float: self.assertEqual(contextT.get_time(), 0.5) elif T == AutoDiffXd: self.assertEqual(contextT.get_time().value(), 0.5) else: self.assertEqual(contextT.get_time().Evaluate(), 0.5) pendulum = PendulumPlant() context = pendulum.CreateDefaultContext() self.assertEqual(context.num_numeric_parameter_groups(), 1) self.assertEqual(pendulum.num_numeric_parameter_groups(), 1) self.assertTrue( context.get_parameters().get_numeric_parameter(0) is context.get_numeric_parameter(index=0)) self.assertTrue( context.get_mutable_parameters().get_mutable_numeric_parameter( 0) is context.get_mutable_numeric_parameter(index=0)) self.assertEqual(context.num_abstract_parameters(), 0) self.assertEqual(pendulum.num_numeric_parameter_groups(), 1) # TODO(russt): Bind _Declare*Parameter or find an example with an # abstract parameter to actually call this method. self.assertTrue(hasattr(context, "get_abstract_parameter")) self.assertTrue(hasattr(context, "get_mutable_abstract_parameter")) x = np.array([0.1, 0.2]) context.SetContinuousState(x) np.testing.assert_equal( context.get_continuous_state().CopyToVector(), x) np.testing.assert_equal( context.get_continuous_state_vector().CopyToVector(), x) context.SetTimeAndContinuousState(0.3, 2*x) np.testing.assert_equal(context.get_time(), 0.3) np.testing.assert_equal( context.get_continuous_state_vector().CopyToVector(), 2*x) self.assertNotEqual(pendulum.EvalPotentialEnergy(context=context), 0) self.assertNotEqual(pendulum.EvalKineticEnergy(context=context), 0) # RimlessWheel has a single discrete variable and a bool abstract # variable. rimless = RimlessWheel() context = rimless.CreateDefaultContext() x = np.array([1.125]) context.SetDiscreteState(xd=2 * x) np.testing.assert_equal( context.get_discrete_state_vector().CopyToVector(), 2 * x) context.SetDiscreteState(group_index=0, xd=3 * x) np.testing.assert_equal( context.get_discrete_state_vector().CopyToVector(), 3 * x) # Just verify that the third overload is present. context.SetDiscreteState(context.get_discrete_state()) def check_abstract_value_zero(context, expected_value): # Check through Context, State, and AbstractValues APIs. self.assertEqual(context.get_abstract_state(index=0).get_value(), expected_value) self.assertEqual(context.get_abstract_state().get_value( index=0).get_value(), expected_value) self.assertEqual(context.get_state().get_abstract_state() .get_value(index=0).get_value(), expected_value) context.SetAbstractState(index=0, value=True) check_abstract_value_zero(context, True) context.SetAbstractState(index=0, value=False) check_abstract_value_zero(context, False) value = context.get_mutable_state().get_mutable_abstract_state()\ .get_mutable_value(index=0) value.set_value(True) check_abstract_value_zero(context, True) def test_event_api(self): # TriggerType - existence check. TriggerType.kUnknown TriggerType.kInitialization TriggerType.kForced TriggerType.kTimed TriggerType.kPeriodic TriggerType.kPerStep TriggerType.kWitness # PublishEvent. # TODO(eric.cousineau): Test other event types when it is useful to # expose them. def callback(context, event): pass event = PublishEvent(callback=callback) self.assertIsInstance(event, Event) event = PublishEvent( trigger_type=TriggerType.kInitialization, callback=callback) self.assertIsInstance(event, Event) self.assertEqual(event.get_trigger_type(), TriggerType.kInitialization) def system_callback(system, context, event): pass event = PublishEvent(system_callback=system_callback) self.assertIsInstance(event, Event) event = PublishEvent( trigger_type=TriggerType.kInitialization, system_callback=system_callback) self.assertIsInstance(event, Event) self.assertEqual(event.get_trigger_type(), TriggerType.kInitialization) # Simple discrete-time system. system1 = LinearSystem(A=[1], B=[1], C=[1], D=[1], time_period=0.1) periodic_data = system1.GetUniquePeriodicDiscreteUpdateAttribute() self.assertIsInstance(periodic_data, PeriodicEventData) periodic_data.period_sec() periodic_data.offset_sec() self.assertTrue(copy.copy(periodic_data) is not periodic_data) is_diff_eq, period = system1.IsDifferenceEquationSystem() self.assertTrue(is_diff_eq) self.assertFalse(system1.IsDifferentialEquationSystem()) self.assertEqual(period, periodic_data.period_sec()) context = system1.CreateDefaultContext() system1.get_input_port(0).FixValue(context, 0.0) updated_discrete = system1.EvalUniquePeriodicDiscreteUpdate(context) self.assertEqual(updated_discrete.num_groups(), context.get_discrete_state().num_groups()) # Simple continuous-time system. system2 = LinearSystem(A=[1], B=[1], C=[1], D=[1], time_period=0.0) periodic_data = system2.GetUniquePeriodicDiscreteUpdateAttribute() self.assertIsNone(periodic_data) is_diff_eq, period = system2.IsDifferenceEquationSystem() self.assertFalse(is_diff_eq) self.assertTrue(system2.IsDifferentialEquationSystem()) def test_continuous_state_api(self): self.assertEqual(ContinuousState().size(), 0) self.assertEqual(ContinuousState(state=BasicVector(2)).size(), 2) state = ContinuousState(state=BasicVector(np.arange(6)), num_q=3, num_v=2, num_z=1) state_clone = state.Clone() self.assertTrue(state_clone is not state) self.assertEqual(state.size(), 6) self.assertEqual(state.num_q(), 3) self.assertEqual(state.num_v(), 2) self.assertEqual(state.num_z(), 1) self.assertEqual(state[1], 1.0) state[1] = 11. self.assertEqual(state[1], 11.) self.assertEqual(state.get_vector().size(), 6) self.assertEqual(state.get_mutable_vector().size(), 6) self.assertEqual(state.get_generalized_position().size(), 3) self.assertEqual(state.get_mutable_generalized_position().size(), 3) self.assertEqual(state.get_generalized_velocity().size(), 2) self.assertEqual(state.get_mutable_generalized_velocity().size(), 2) self.assertEqual(state.get_misc_continuous_state().size(), 1) self.assertEqual(state.get_mutable_misc_continuous_state().size(), 1) state.SetFrom(ContinuousState(BasicVector(6), 3, 2, 1)) state.SetFromVector(value=3*np.arange(6)) self.assertEqual(len(state.CopyToVector()), 6) @numpy_compare.check_all_types def test_discrete_value_api(self, T): DiscreteValues = DiscreteValues_[T] BasicVector = BasicVector_[T] cast = np.vectorize(T) self.assertEqual(DiscreteValues().num_groups(), 0) discrete_values = DiscreteValues(data=[BasicVector(1), BasicVector(2)]) self.assertEqual(discrete_values.num_groups(), 2) x = cast(np.array([1.23, 4.56])) discrete_values.set_value(1, x) numpy_compare.assert_equal(discrete_values.get_value(index=1), x) if T == float: numpy_compare.assert_equal( discrete_values.get_mutable_value(index=1), x) else: with self.assertRaises(RuntimeError): discrete_values.get_mutable_value(index=1) discrete_values = DiscreteValues(datum=BasicVector(np.arange(3))) self.assertEqual(discrete_values.size(), 3) discrete_values_clone = discrete_values.Clone() self.assertTrue(discrete_values_clone is not discrete_values) self.assertEqual(len(discrete_values.get_data()), 1) self.assertEqual(discrete_values.get_vector(index=0).size(), 3) self.assertEqual(discrete_values.get_mutable_vector(index=0).size(), 3) x = cast(np.array([1., 3., 4.])) discrete_values.set_value(x) numpy_compare.assert_equal(discrete_values.value(index=0), x) numpy_compare.assert_equal(discrete_values.get_value(), x) if T == float: numpy_compare.assert_equal( discrete_values.get_mutable_value(), x) discrete_values[1] = 5. numpy_compare.assert_equal(discrete_values[1], T(5.)) if T == float: vector = discrete_values.get_mutable_value() vector[0] = 2.3 self.assertEqual(discrete_values[0], 2.3) discrete_values.SetFrom(DiscreteValues_[float](BasicVector_[float](3))) def test_instantiations(self): # Quick check of instantiations for given types. # N.B. These checks are ordered according to their binding definitions # in the corresponding source file. # `analysis_py.cc` self._check_instantiations(IntegratorBase_, IntegratorBase, False) self._check_instantiations(Simulator_, Simulator, False) # `framework_py_semantics.cc` self._check_instantiations(Context_, Context) for T in [float, AutoDiffXd, Expression]: self.assertTrue(issubclass(Context_[T], ContextBase), repr(T)) self._check_instantiations(LeafContext_, LeafContext) self._check_instantiations(Event_, Event) self._check_instantiations(PublishEvent_, PublishEvent) self._check_instantiations(DiscreteUpdateEvent_, DiscreteUpdateEvent) self._check_instantiations(DiagramBuilder_, DiagramBuilder) self._check_instantiations(OutputPort_, OutputPort) self._check_instantiations(SystemOutput_, SystemOutput) self._check_instantiations(InputPort_, InputPort) self._check_instantiations(Parameters_, Parameters) self._check_instantiations(State_, State) self._check_instantiations(ContinuousState_, ContinuousState) self._check_instantiations(DiscreteValues_, DiscreteValues) # `framework_py_systems.cc` self._check_instantiations(System_, System) self._check_instantiations(LeafSystem_, LeafSystem) self._check_instantiations(Diagram_, Diagram) self._check_instantiations(VectorSystem_, VectorSystem) # `framework_py_values.cc` self._check_instantiations(VectorBase_, VectorBase) self._check_instantiations(BasicVector_, BasicVector) self._check_instantiations(Supervector_, Supervector) self._check_instantiations(Subvector_, Subvector) def test_scalar_type_conversion(self): float_system = Adder(1, 1) float_context = float_system.CreateDefaultContext() float_system.get_input_port(0).FixValue(float_context, 1.) for T in [float, AutoDiffXd, Expression]: system = Adder_[T](1, 1) # N.B. Current scalar conversion does not permit conversion to and # from the same type. if T != float: methods = [Adder_[T].ToScalarType[float], Adder_[T].ToScalarTypeMaybe[float]] for method in methods: system_float = method(system) self.assertIsInstance(system_float, System_[float]) self._compare_system_instances(system, system_float) if T != AutoDiffXd: methods = [Adder_[T].ToAutoDiffXd, Adder_[T].ToAutoDiffXdMaybe, Adder_[T].ToScalarType[AutoDiffXd], Adder_[T].ToScalarTypeMaybe[AutoDiffXd]] for method in methods: system_ad = method(system) self.assertIsInstance(system_ad, System_[AutoDiffXd]) self._compare_system_instances(system, system_ad) if T != Expression: methods = [Adder_[T].ToSymbolic, Adder_[T].ToSymbolicMaybe, Adder_[T].ToScalarType[Expression], Adder_[T].ToScalarTypeMaybe[Expression]] for method in methods: system_sym = method(system) self.assertIsInstance(system_sym, System_[Expression]) self._compare_system_instances(system, system_sym) context = system.CreateDefaultContext() system.FixInputPortsFrom(other_system=float_system, other_context=float_context, target_context=context) u = system.get_input_port(0).Eval(context) self.assertEqual(len(u), 1) if T == float: self.assertEqual(u[0], 1.) elif T == AutoDiffXd: self.assertEqual(u[0].value(), 1.) else: self.assertEqual(u[0].Evaluate(), 1.) @numpy_compare.check_all_types def test_port_output(self, T): # TODO(eric.cousineau): Find better location for this testing. system = ConstantVectorSource_[T]([1.]) context = system.CreateDefaultContext() # Check number of output ports and value for a given context. output = system.AllocateOutput() self.assertEqual(output.num_ports(), 1) system.CalcOutput(context=context, outputs=output) if T == float: value = output.get_vector_data(0).get_value() self.assertTrue(np.allclose([1], value)) elif T == AutoDiffXd: value = output.get_vector_data(0)._get_value_copy() # TODO(eric.cousineau): Define `isfinite` ufunc, if # possible, to use for `np.allclose`. self.assertEqual(value.shape, (1,)) self.assertEqual(value[0], AutoDiffXd(1.)) def test_copy(self): # Copy a context using `deepcopy` or `clone`. system = ConstantVectorSource([1]) context = system.CreateDefaultContext() context_copies = [ copy.copy(context), copy.deepcopy(context), context.Clone(), ] # TODO(eric.cousineau): Compare copies. for context_copy in context_copies: self.assertTrue(context_copy is not context) def test_str(self): """ Tests str() methods. See ./value_test.py for testing str() and repr() specific to BasicVector. """ # Context. integrator = Integrator(3) integrator.set_name("integrator") context = integrator.CreateDefaultContext() # N.B. This is only to show behavior of C++ string formatting in # Python. It is OK to update this when the upstream C++ code changes. self.assertEqual( str(context), dedent("""\ ::integrator Context --------------------- Time: 0 States: 3 continuous states 0 0 0 """), ) # TODO(eric.cousineau): Add more. def test_diagram_simulation(self): # TODO(eric.cousineau): Move this to `analysis_test.py`. # Similar to: //systems/framework:diagram_test, ExampleDiagram size = 3 builder = DiagramBuilder() self.assertTrue(builder.empty()) adder0 = builder.AddSystem(Adder(2, size)) adder0.set_name("adder0") self.assertFalse(builder.empty()) adder1 = builder.AddSystem(Adder(2, size)) adder1.set_name("adder1") integrator = builder.AddSystem(Integrator(size)) integrator.set_name("integrator") self.assertEqual( builder.GetSystems(), [adder0, adder1, integrator]) self.assertEqual( builder.GetMutableSystems(), [adder0, adder1, integrator]) builder.Connect(adder0.get_output_port(0), adder1.get_input_port(0)) builder.Connect(adder1.get_output_port(0), integrator.get_input_port(0)) # Exercise naming variants. builder.ExportInput(adder0.get_input_port(0)) builder.ExportInput(adder0.get_input_port(1), kUseDefaultName) builder.ExportInput(adder1.get_input_port(1), "third_input") builder.ExportOutput(integrator.get_output_port(0), "result") diagram = builder.Build() self.assertEqual(adder0.get_name(), "adder0") self.assertTrue(diagram.HasSubsystemNamed("adder0")) self.assertEqual(diagram.GetSubsystemByName("adder0"), adder0) self.assertEqual( diagram.GetSystems(), [adder0, adder1, integrator]) # TODO(eric.cousineau): Figure out unicode handling if needed. # See //systems/framework/test/diagram_test.cc:349 (sha: bc84e73) # for an example name. diagram.set_name("test_diagram") simulator = Simulator(diagram) context = simulator.get_mutable_context() # Create and attach inputs. # TODO(eric.cousineau): Not seeing any assertions being printed if no # inputs are connected. Need to check this behavior. input0 = np.array([0.1, 0.2, 0.3]) diagram.get_input_port(0).FixValue(context, input0) input1 = np.array([0.02, 0.03, 0.04]) diagram.get_input_port(1).FixValue(context, input1) # Test the BasicVector overload. input2 = BasicVector([0.003, 0.004, 0.005]) diagram.get_input_port(2).FixValue(context, input2) # Initialize integrator states. integrator_xc = ( diagram.GetMutableSubsystemState(integrator, context) .get_mutable_continuous_state().get_vector()) integrator_xc.SetFromVector([0, 1, 2]) simulator.Initialize() # Simulate briefly, and take full-context snapshots at intermediate # points. n = 6 times = np.linspace(0, 1, n) context_log = [] for t in times: simulator.AdvanceTo(t) # Record snapshot of *entire* context. context_log.append(context.Clone()) # Test binding for PrintSimulatorStatistics PrintSimulatorStatistics(simulator) xc_initial = np.array([0, 1, 2]) xc_final = np.array([0.123, 1.234, 2.345]) for i, context_i in enumerate(context_log): t = times[i] self.assertEqual(context_i.get_time(), t) xc = context_i.get_continuous_state_vector().CopyToVector() xc_expected = (float(i) / (n - 1) * (xc_final - xc_initial) + xc_initial) self.assertTrue(np.allclose(xc, xc_expected)) def test_simulator_context_manipulation(self): # TODO(eric.cousineau): Move this to `analysis_test.py`. system = ConstantVectorSource([1]) # Use default-constructed context. simulator = Simulator(system) self.assertTrue(simulator.has_context()) context_default = simulator.get_mutable_context() self.assertIsInstance(context_default, Context) # WARNING: Once we call `simulator.reset_context()`, it will delete the # context it currently owns, which is `context_default` in this case. # BE CAREFUL IN SITUATIONS LIKE THIS! # TODO(eric.cousineau): Bind `release_context()`, or migrate context # usage to use `shared_ptr`. context = system.CreateDefaultContext() simulator.reset_context(context) self.assertIs(context, simulator.get_mutable_context()) # WARNING: This will also invalidate `context`. Be careful! simulator.reset_context(None) self.assertFalse(simulator.has_context()) def test_simulator_flags(self): # TODO(eric.cousineau): Move this to `analysis_test.py`. system = ConstantVectorSource([1]) simulator = Simulator(system) ResetIntegratorFromFlags(simulator, "runge_kutta2", 0.00123) integrator = simulator.get_integrator() self.assertEqual(type(integrator), RungeKutta2Integrator) self.assertEqual(integrator.get_maximum_step_size(), 0.00123) self.assertGreater(len(GetIntegrationSchemes()), 5) def test_abstract_output_port_eval(self): model_value = Value("Hello World") source = ConstantValueSource(copy.copy(model_value)) context = source.CreateDefaultContext() output_port = source.get_output_port(0) value = output_port.Eval(context) self.assertEqual(type(value), type(model_value.get_value())) self.assertEqual(value, model_value.get_value()) value_abs = output_port.EvalAbstract(context) self.assertEqual(type(value_abs), type(model_value)) self.assertEqual(value_abs.get_value(), model_value.get_value()) def test_vector_output_port_eval(self): np_value = np.array([1., 2., 3.]) model_value = Value(BasicVector(np_value)) source = ConstantVectorSource(np_value) context = source.CreateDefaultContext() output_port = source.get_output_port(0) value = output_port.Eval(context) self.assertEqual(type(value), np.ndarray) np.testing.assert_equal(value, np_value) value_abs = output_port.EvalAbstract(context) self.assertEqual(type(value_abs), type(model_value)) self.assertEqual(type(value_abs.get_value().get_value()), np.ndarray) np.testing.assert_equal(value_abs.get_value().get_value(), np_value) basic = output_port.EvalBasicVector(context) self.assertEqual(type(basic), BasicVector) self.assertEqual(type(basic.get_value()), np.ndarray) np.testing.assert_equal(basic.get_value(), np_value) def test_abstract_input_port_eval(self): model_value = Value("Hello World") system = PassThrough(copy.copy(model_value)) context = system.CreateDefaultContext() fixed = system.get_input_port(0).FixValue(context, copy.copy(model_value)) self.assertIsInstance(fixed.GetMutableData(), AbstractValue) input_port = system.get_input_port(0) self.assertTrue(input_port.HasValue(context)) value = input_port.Eval(context) self.assertEqual(type(value), type(model_value.get_value())) self.assertEqual(value, model_value.get_value()) value_abs = input_port.EvalAbstract(context) self.assertEqual(type(value_abs), type(model_value)) self.assertEqual(value_abs.get_value(), model_value.get_value()) def test_vector_input_port_eval(self): np_value = np.array([1., 2., 3.]) model_value = Value(BasicVector(np_value)) system = PassThrough(len(np_value)) context = system.CreateDefaultContext() system.get_input_port(0).FixValue(context, np_value) input_port = system.get_input_port(0) self.assertTrue(input_port.HasValue(context)) value = input_port.Eval(context) self.assertEqual(type(value), np.ndarray) np.testing.assert_equal(value, np_value) value_abs = input_port.EvalAbstract(context) self.assertEqual(type(value_abs), type(model_value)) self.assertEqual(type(value_abs.get_value().get_value()), np.ndarray) np.testing.assert_equal(value_abs.get_value().get_value(), np_value) basic = input_port.EvalBasicVector(context) self.assertEqual(type(basic), BasicVector) self.assertEqual(type(basic.get_value()), np.ndarray) np.testing.assert_equal(basic.get_value(), np_value) def test_abstract_input_port_fix_string(self): model_value = Value("") system = PassThrough(copy.copy(model_value)) context = system.CreateDefaultContext() input_port = system.get_input_port(0) # Fix to a literal. input_port.FixValue(context, "Alpha") value = input_port.Eval(context) self.assertEqual(type(value), type(model_value.get_value())) self.assertEqual(value, "Alpha") # Fix to a type-erased string. input_port.FixValue(context, Value("Bravo")) value = input_port.Eval(context) self.assertEqual(type(value), type(model_value.get_value())) self.assertEqual(value, "Bravo") # Fix to a non-string. with self.assertRaises(RuntimeError): # A RuntimeError occurs when the Context detects that the # type-erased Value objects are incompatible. input_port.FixValue(context, Value(1)) with self.assertRaises(TypeError): # A TypeError occurs when pybind Value.set_value cannot match any # overload for how to assign the argument into the erased storage. input_port.FixValue(context, 1) with self.assertRaises(TypeError): input_port.FixValue(context, np.array([2.])) def test_abstract_input_port_fix_object(self): # The port type is py::object, not any specific C++ type. model_value = Value(object()) system = PassThrough(copy.copy(model_value)) context = system.CreateDefaultContext() input_port = system.get_input_port(0) # Fix to a type-erased py::object. input_port.FixValue(context, Value(object())) # Fix to an int. input_port.FixValue(context, 1) value = input_port.Eval(context) self.assertEqual(type(value), int) self.assertEqual(value, 1) # Fixing to an explicitly-typed Value instantiation is an error ... with self.assertRaises(RuntimeError): input_port.FixValue(context, Value("string")) # ... but implicit typing works just fine. input_port.FixValue(context, "string") value = input_port.Eval(context) self.assertEqual(type(value), str) self.assertEqual(value, "string") @numpy_compare.check_all_types def test_vector_input_port_fix(self, T): np_zeros = np.array([0.]) system = PassThrough_[T](len(np_zeros)) context = system.CreateDefaultContext() input_port = system.get_input_port(0) # Fix to a scalar. input_port.FixValue(context, T(1.)) value = input_port.Eval(context) self.assertEqual(type(value), np.ndarray) numpy_compare.assert_equal(value, np.array([T(1.)])) # Fix to an ndarray. input_port.FixValue(context, np.array([T(2.)])) value = input_port.Eval(context) self.assertEqual(type(value), np.ndarray) numpy_compare.assert_equal(value, np.array([T(2.)])) # Fix to a BasicVector. input_port.FixValue(context, BasicVector_[T]([3.])) value = input_port.Eval(context) self.assertEqual(type(value), np.ndarray) numpy_compare.assert_equal(value, np.array([T(3.)])) # Fix to a type-erased BasicVector. input_port.FixValue(context, Value(BasicVector_[T]([4.]))) value = input_port.Eval(context) self.assertEqual(type(value), np.ndarray) numpy_compare.assert_equal(value, np.array([T(4.)])) # Fix to wrong-sized vector. with self.assertRaises(RuntimeError): input_port.FixValue(context, np.array([0., 1.])) with self.assertRaises(RuntimeError): input_port.FixValue(context, Value(BasicVector_[T]([0., 1.]))) # Fix to a non-vector. with self.assertRaises(TypeError): # A TypeError occurs when pybind Value.set_value cannot match any # overload for how to assign the argument into the erased storage. input_port.FixValue(context, "string") with self.assertRaises(RuntimeError): # A RuntimeError occurs when the Context detects that the # type-erased Value objects are incompatible. input_port.FixValue(context, Value("string")) @numpy_compare.check_all_types def test_allocate_input_vector(self, T): system = PassThrough_[T](1) value = system.AllocateInputVector(system.get_input_port()) self.assertIsInstance(value, BasicVector_[T]) @numpy_compare.check_all_types def test_allocate_input_abstract(self, T): system = PassThrough_[T](Value("a")) value = system.AllocateInputAbstract(system.get_input_port()) self.assertIsInstance(value, Value[str]) def test_event_status(self): system = ZeroOrderHold(period_sec=0.1, vector_size=1) # Existence check. EventStatus.Severity.kDidNothing EventStatus.Severity.kSucceeded EventStatus.Severity.kReachedTermination EventStatus.Severity.kFailed self.assertIsInstance(EventStatus.DidNothing(), EventStatus) self.assertIsInstance(EventStatus.Succeeded(), EventStatus) status = EventStatus.ReachedTermination(system=system, message="done") # Check API. self.assertIsInstance(status, EventStatus) self.assertEqual( status.severity(), EventStatus.Severity.kReachedTermination) self.assertIs(status.system(), system) self.assertEqual(status.message(), "done") self.assertIsInstance( status.KeepMoreSevere(candidate=status), EventStatus) status = EventStatus.Failed(system=system, message="failed") self.assertIsInstance(status, EventStatus) def test_diagram_builder_remove(self): builder = DiagramBuilder() source = builder.AddSystem(ConstantVectorSource([0.0])) adder = builder.AddSystem(Adder(1, 1)) builder.ExportOutput(source.get_output_port()) builder.RemoveSystem(adder) # N.B. Deletes 'adder'; don't use after! diagram = builder.Build() self.assertEqual(diagram.num_input_ports(), 0) self.assertEqual(diagram.num_output_ports(), 1) def test_diagram_fan_out(self): builder = DiagramBuilder() adder = builder.AddSystem(Adder(7, 1)) adder.set_name("adder") builder.ExportOutput(adder.get_output_port()) in0_index = builder.ExportInput(adder.get_input_port(0), "in0") in1_index = builder.ExportInput(adder.get_input_port(1), "in1") # Exercise ConnectInput overload bindings, with and without argument # names. builder.ConnectInput(in0_index, adder.get_input_port(2)) builder.ConnectInput("in1", adder.get_input_port(3)) builder.ConnectInput(diagram_port_name="in0", input=adder.get_input_port(4)) builder.ConnectInput(diagram_port_index=in1_index, input=adder.get_input_port(5)) builder.ConnectToSame(exemplar=adder.get_input_port(2), dest=adder.get_input_port(6)) diagram = builder.Build() # Check the desired input topology is in the graph. graph = diagram.GetGraphvizString() self.assertRegex(graph, ":0:e -> .*:u0:w") self.assertRegex(graph, ":1:e -> .*:u1:w") self.assertRegex(graph, ":0:e -> .*:u2:w") self.assertRegex(graph, ":1:e -> .*:u3:w") self.assertRegex(graph, ":0:e -> .*:u4:w") self.assertRegex(graph, ":1:e -> .*:u5:w") self.assertRegex(graph, ":0:e -> .*:u6:w") # Check that fragment struct is a subset of the whole graph. fragment_struct = diagram.GetGraphvizFragment() self.assertEqual(len(fragment_struct.input_ports), 2) self.assertEqual(len(fragment_struct.output_ports), 1) fragment_text = "".join(fragment_struct.fragments) self.assertGreater(len(fragment_text), 0) self.assertIn(fragment_text, graph) # Check that max_depth has an effect. small_graph = diagram.GetGraphvizString(max_depth=0) self.assertLess(len(small_graph), len(graph)) def test_diagram_api(self): def make_diagram(): builder = DiagramBuilder() self.assertTrue(builder.empty()) self.assertFalse(builder.already_built()) adder1 = builder.AddNamedSystem("adder1", Adder(2, 2)) adder2 = builder.AddNamedSystem("adder2", Adder(1, 2)) builder.Connect(adder1.get_output_port(), adder2.get_input_port()) self.assertTrue( builder.IsConnectedOrExported(port=adder2.get_input_port())) builder.ExportInput(adder1.get_input_port(0), "in0") builder.ExportInput(adder1.get_input_port(1), "in1") builder.ExportOutput(adder2.get_output_port(), "out") self.assertEqual(builder.num_input_ports(), 2) self.assertEqual(builder.num_output_ports(), 1) self.assertTrue(builder.HasSubsystemNamed("adder1")) builder.GetSubsystemByName(name="adder1") builder.GetMutableSubsystemByName(name="adder2") self.assertEqual(len(builder.connection_map()), 1) diagram = builder.Build() return adder1, adder2, diagram adder1, adder2, diagram = make_diagram() connections = diagram.connection_map() self.assertIn((adder2, InputPortIndex(0)), connections) self.assertEqual(connections[(adder2, InputPortIndex(0))], (adder1, OutputPortIndex(0))) del adder1, adder2, diagram # To test keep-alive logic gc.collect() self.assertEqual(list(connections.keys())[0][0].get_name(), "adder2") adder1, adder2, diagram = make_diagram() in0_locators = diagram.GetInputPortLocators( port_index=InputPortIndex(0)) in1_locators = diagram.GetInputPortLocators( port_index=InputPortIndex(1)) self.assertEqual(in0_locators, [(adder1, InputPortIndex(0))]) self.assertEqual(in1_locators, [(adder1, InputPortIndex(1))]) del adder1, adder2, diagram # To test keep-alive logic gc.collect() self.assertEqual(in0_locators[0][0].get_name(), "adder1") adder1, adder2, diagram = make_diagram() out_locators = diagram.get_output_port_locator( port_index=OutputPortIndex(0)) self.assertEqual(out_locators, (adder2, OutputPortIndex(0))) del adder1, adder2, diagram # To test keep-alive logic gc.collect() self.assertEqual(out_locators[0].get_name(), "adder2") def test_add_named_system(self): builder = DiagramBuilder() adder1 = builder.AddNamedSystem("adder1", Adder(2, 3)) self.assertEqual(adder1.get_name(), "adder1") adder2 = builder.AddNamedSystem(name="adder2", system=Adder(5, 8)) self.assertEqual(adder2.get_name(), "adder2") def test_module_constants(self): self.assertEqual(repr(kUseDefaultName), "kUseDefaultName") def test_system_visitor(self): builder = DiagramBuilder() builder.AddNamedSystem("adder1", Adder(2, 2)) builder.AddNamedSystem("adder2", Adder(2, 2)) system = builder.Build() system.set_name("diagram") visited_systems = [] visited_diagrams = [] class MyVisitor(SystemVisitor): def VisitSystem(self, system): visited_systems.append(system.get_name()) def VisitDiagram(self, diagram): visited_diagrams.append(diagram.get_name()) for sys in diagram.GetSystems(): sys.Accept(self) visitor = MyVisitor() system.Accept(v=visitor) self.assertEqual(visited_systems, ["adder1", "adder2"]) self.assertEqual(visited_diagrams, ["diagram"])
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/custom_test.py
# -*- coding: utf-8 -*- import copy import sys from types import SimpleNamespace import unittest import warnings import numpy as np from pydrake.autodiffutils import AutoDiffXd from pydrake.common.value import Value from pydrake.symbolic import Expression from pydrake.systems.analysis import ( Simulator, ) from pydrake.systems.framework import ( AbstractParameterIndex, AbstractStateIndex, BasicVector, BasicVector_, CacheEntry, CacheEntryValue, CacheIndex, Context, ContinuousStateIndex, DependencyTicket, Diagram, DiagramBuilder, DiscreteStateIndex, DiscreteValues, EventStatus, InputPortIndex, LeafSystem, LeafSystem_, NumericParameterIndex, PortDataType, PublishEvent, State, System, TriggerType, UnrestrictedUpdateEvent, ValueProducer, VectorSystem, WitnessFunctionDirection, kUseDefaultName, ) from pydrake.systems.primitives import ( Adder, ZeroOrderHold, ) from pydrake.systems.test.test_util import ( call_leaf_system_overrides, call_vector_system_overrides, ) from pydrake.common.test_utilities import numpy_compare def noop(*args, **kwargs): # When a callback is required for an interface, but not useful for testing. pass class CustomAdder(LeafSystem): # Reimplements `Adder`. def __init__(self, num_inputs, size): LeafSystem.__init__(self) for i in range(num_inputs): self.DeclareVectorInputPort( "input{}".format(i), size) self.DeclareVectorOutputPort("sum", size, self._calc_sum) def _calc_sum(self, context, sum_data): # @note This will NOT work if the scalar type is AutoDiff or symbolic, # since they are not stored densely. sum = sum_data.get_mutable_value() sum[:] = 0 for i in range(context.num_input_ports()): input_vector = self.EvalVectorInput(context=context, port_index=i) sum += input_vector.get_value() def DoGetGraphvizFragment(self, params): # N.B. We cannot use `header_lines.append(...)` here; the property # getter returns a _copy_ of the lines, not a _reference_. params.header_lines += ["hello=world"] params.options |= {"split": "I/O"} return super().DoGetGraphvizFragment(params) # TODO(eric.cousineau): Make this class work with custom scalar types once # referencing with custom dtypes lands. # WARNING: At present, dtype=object matrices are NOT well supported, and may # produce unexpected results (e.g. references not actually being respected). class CustomVectorSystem(VectorSystem): def __init__(self, is_discrete): # VectorSystem only supports pure Continuous or pure Discrete. # Dimensions: # 1 Input, 2 States, 3 Outputs. VectorSystem.__init__(self, 1, 3) self._is_discrete = is_discrete if self._is_discrete: self.DeclareDiscreteState(2) else: self.DeclareContinuousState(2) # Record calls for testing. self.has_called = [] def DoCalcVectorOutput(self, context, u, x, y): self.ValidateContext(context=context) y[:] = np.hstack([u, x]) self.has_called.append("output") def DoCalcVectorTimeDerivatives(self, context, u, x, x_dot): self.ValidateContext(context) x_dot[:] = x + u self.has_called.append("continuous") def DoCalcVectorDiscreteVariableUpdates(self, context, u, x, x_n): self.ValidateContext(context) x_n[:] = x + 2*u self.has_called.append("discrete") # Wraps `Adder`. class CustomDiagram(Diagram): # N.B. The CustomDiagram is used to unit test the DiagramBuilder.BuildInto # method. For pydrake users, this is not a good example. The best way in # pydrake to create a Diagram is DiagramBuilder.Build (as seen in the test # case named test_adder_simulation). def __init__(self, num_inputs, size): Diagram.__init__(self) builder = DiagramBuilder() adder = Adder(num_inputs, size) builder.AddSystem(adder) builder.ExportOutput(adder.get_output_port(0)) for i in range(num_inputs): builder.ExportInput(adder.get_input_port(i)) builder.BuildInto(self) def DoGetGraphvizFragment(self, params): # N.B. We cannot use `header_lines.append(...)` here; the property # getter returns a _copy_ of the lines, not a _reference_. params.header_lines += ["meaning_of_life=42"] return super().DoGetGraphvizFragment(params) class TestCustom(unittest.TestCase): def _create_adder_system(self): system = CustomAdder(2, 3) return system def _fix_adder_inputs(self, system, context): self.assertEqual(context.num_input_ports(), 2) system.get_input_port(0).FixValue(context, [1, 2, 3]) system.get_input_port(1).FixValue(context, [4, 5, 6]) def test_diagram_adder(self): system = CustomDiagram(2, 3) self.assertEqual(system.GetSystemType(), f"{__name__}.CustomDiagram") self.assertEqual(system.num_input_ports(), 2) self.assertEqual(system.get_input_port(0).size(), 3) self.assertEqual(system.num_output_ports(), 1) self.assertEqual(system.get_output_port(0).size(), 3) def test_adder_execution(self): system = self._create_adder_system() self.assertEqual(system.GetSystemType(), f"{__name__}.CustomAdder") context = system.CreateDefaultContext() self.assertEqual(context.num_output_ports(), 1) self._fix_adder_inputs(system, context) output = system.AllocateOutput() self.assertEqual(output.num_ports(), 1) system.CalcOutput(context, output) value = output.get_vector_data(0).get_value() self.assertTrue(np.allclose([5, 7, 9], value)) def test_adder_simulation(self): builder = DiagramBuilder() adder = builder.AddSystem(self._create_adder_system()) adder.set_name("custom_adder") # Add ZOH so we can easily extract state. zoh = builder.AddSystem(ZeroOrderHold(0.1, 3)) zoh.set_name("zoh") builder.ExportInput(adder.get_input_port(0)) builder.ExportInput(adder.get_input_port(1)) builder.Connect(adder.get_output_port(0), zoh.get_input_port(0)) diagram = builder.Build() context = diagram.CreateDefaultContext() self._fix_adder_inputs(diagram, context) simulator = Simulator(diagram, context) simulator.Initialize() simulator.AdvanceTo(1) # Ensure that we have the outputs we want. value = (diagram.GetMutableSubsystemContext(zoh, context) .get_discrete_state_vector().get_value()) self.assertTrue(np.allclose([5, 7, 9], value)) def test_adder_graphviz(self): system = CustomAdder(2, 3) graph = system.GetGraphvizString() self.assertIn("hello=world", graph) self.assertIn("(split)", graph) def test_diagram_graphviz(self): system = CustomDiagram(2, 3) graph = system.GetGraphvizString() self.assertIn("meaning_of_life=42", graph) def test_leaf_system_well_known_tickets(self): for func in [ LeafSystem.accuracy_ticket, LeafSystem.all_input_ports_ticket, LeafSystem.all_parameters_ticket, LeafSystem.all_sources_except_input_ports_ticket, LeafSystem.all_sources_ticket, LeafSystem.all_state_ticket, LeafSystem.configuration_ticket, LeafSystem.ke_ticket, LeafSystem.kinematics_ticket, LeafSystem.nothing_ticket, LeafSystem.pa_ticket, LeafSystem.pc_ticket, LeafSystem.pe_ticket, LeafSystem.pn_ticket, LeafSystem.pnc_ticket, LeafSystem.q_ticket, LeafSystem.time_ticket, LeafSystem.v_ticket, LeafSystem.xa_ticket, LeafSystem.xc_ticket, LeafSystem.xcdot_ticket, LeafSystem.xd_ticket, LeafSystem.z_ticket]: self.assertIsInstance(func(), DependencyTicket, func) def test_leaf_system_per_item_tickets(self): dut = LeafSystem() dut.DeclareAbstractParameter(model_value=Value(1)) dut.DeclareAbstractState(model_value=Value(1)) dut.DeclareDiscreteState(1) dut.DeclareVectorInputPort("u0", BasicVector(1)) self.assertEqual(dut.DeclareVectorInputPort("u1", 2).size(), 2) dut.DeclareNumericParameter(model_vector=BasicVector(1)) for func, arg in [ (dut.abstract_parameter_ticket, AbstractParameterIndex(0)), (dut.abstract_state_ticket, AbstractStateIndex(0)), (dut.cache_entry_ticket, CacheIndex(0)), (dut.discrete_state_ticket, DiscreteStateIndex(0)), (dut.input_port_ticket, InputPortIndex(0)), (dut.numeric_parameter_ticket, NumericParameterIndex(0)), ]: self.assertIsInstance(func(arg), DependencyTicket, func) def test_cache_entry(self): """Checks the existence of CacheEntry-related bindings.""" # Cover DeclareCacheEntry. dummy = LeafSystem() model_value = Value(SimpleNamespace()) def calc_cache(context, abstract_value): cache = abstract_value.get_mutable_value() self.assertIsInstance(cache, SimpleNamespace) cache.updated = True cache_entry = dummy.DeclareCacheEntry( description="scratch", value_producer=ValueProducer( allocate=model_value.Clone, calc=calc_cache), prerequisites_of_calc={dummy.nothing_ticket()}) self.assertIsInstance(cache_entry, CacheEntry) context = dummy.CreateDefaultContext() # Cover CacheEntry and get_cache_entry. self.assertIsInstance(cache_entry.prerequisites(), set) self.assertTrue(cache_entry.is_out_of_date(context)) self.assertFalse(cache_entry.is_cache_entry_disabled(context)) cache_entry.disable_caching(context) self.assertTrue(cache_entry.is_cache_entry_disabled(context)) cache_entry.enable_caching(context) self.assertFalse(cache_entry.is_cache_entry_disabled(context)) self.assertFalse(cache_entry.is_disabled_by_default()) cache_entry.disable_caching_by_default() self.assertTrue(cache_entry.is_disabled_by_default()) self.assertIsInstance(cache_entry.description(), str) cache_index = cache_entry.cache_index() self.assertIsInstance(cache_index, CacheIndex) self.assertIsInstance(cache_entry.ticket(), DependencyTicket) self.assertIs(dummy.get_cache_entry(cache_index), cache_entry) self.assertFalse(cache_entry.has_default_prerequisites()) # Cover CacheEntryValue. # WARNING: This is not the suggested workflow for proper bindings. See # below for proper workflow using .Eval(). cache_entry_value = cache_entry.get_mutable_cache_entry_value(context) self.assertIsInstance(cache_entry_value, CacheEntryValue) data = cache_entry_value.GetMutableValueOrThrow() self.assertIsInstance(data, SimpleNamespace) # This has not yet been updated. self.assertFalse(hasattr(data, "updated")) # Const flavor access. cache_entry_value_const = cache_entry.get_cache_entry_value(context) self.assertIs(cache_entry_value_const, cache_entry_value) # Const flavor is out of date. with self.assertRaises(RuntimeError) as cm: cache_entry_value_const.GetValueOrThrow() self.assertIn("the current value is out of date", str(cm.exception)) # Now properly update the cache entry. # Using .Eval() is the best workflow to follow. data_updated = cache_entry.Eval(context) # Ensure we didn't clone. self.assertIs(data, data_updated) # Mutated! self.assertTrue(data.updated) # Check abstract access. self.assertIs(cache_entry.EvalAbstract(context).get_value(), data) # Now check const aliasing. data_const = cache_entry_value_const.GetValueOrThrow() self.assertIs(data_const, data) def test_leaf_system_issue13792(self): """ Ensures that users get a better error when forgetting to explicitly call the C++ superclass's __init__. """ class Oops(LeafSystem): def __init__(self): pass with self.assertRaisesRegex(TypeError, "LeafSystem.*__init__"): Oops() def test_all_leaf_system_overrides(self): test = self class TrivialSystem(LeafSystem): def __init__(self): LeafSystem.__init__(self) self.called_continuous = False self.called_initialize = False self.called_per_step = False self.called_periodic = False self.called_initialize_publish = False self.called_initialize_discrete = False self.called_initialize_unrestricted = False self.called_periodic_publish = False self.called_periodic_discrete = False self.called_periodic_unrestricted = False self.called_per_step_publish = False self.called_per_step_discrete = False self.called_per_step_unrestricted = False self.called_forced_publish = False self.called_forced_discrete = False self.called_forced_unrestricted = False self.called_getwitness = False self.called_witness = False self.called_guard = False self.called_reset = False self.called_system_reset = False # Ensure we have desired overloads. self.DeclareInitializationPublishEvent( publish=self._on_initialize_publish) self.DeclareInitializationDiscreteUpdateEvent( update=self._on_initialize_discrete) self.DeclareInitializationUnrestrictedUpdateEvent( update=self._on_initialize_unrestricted) self.DeclareInitializationEvent( event=PublishEvent( trigger_type=TriggerType.kInitialization, callback=self._on_initialize)) self.DeclarePeriodicPublishEvent( period_sec=1.0, offset_sec=0, publish=self._on_periodic_publish) self.DeclarePeriodicDiscreteUpdateEvent( period_sec=1.0, offset_sec=0, update=self._on_periodic_discrete) self.DeclarePeriodicUnrestrictedUpdateEvent( period_sec=1.0, offset_sec=0, update=self._on_periodic_unrestricted) self.DeclarePerStepPublishEvent( publish=self._on_per_step_publish) self.DeclarePerStepDiscreteUpdateEvent( update=self._on_per_step_discrete) self.DeclarePerStepUnrestrictedUpdateEvent( update=self._on_per_step_unrestricted) self.DeclarePerStepEvent( event=PublishEvent( trigger_type=TriggerType.kPerStep, callback=self._on_per_step)) self.DeclareForcedPublishEvent( publish=self._on_forced_publish) self.DeclareForcedDiscreteUpdateEvent( update=self._on_forced_discrete) self.DeclareForcedUnrestrictedUpdateEvent( update=self._on_forced_unrestricted) self.DeclarePeriodicEvent( period_sec=1.0, offset_sec=0.0, event=PublishEvent( trigger_type=TriggerType.kPeriodic, callback=self._on_periodic)) self.DeclareContinuousState(2) self.DeclareDiscreteState(1) # Ensure that we have inputs / outputs to call direct # feedthrough. self.DeclareInputPort( kUseDefaultName, PortDataType.kVectorValued, 1) self.DeclareVectorInputPort( name="test_input", model_vector=BasicVector(1), random_type=None) self.DeclareVectorOutputPort( "noop", BasicVector(1), noop, prerequisites_of_calc=set([self.nothing_ticket()])) self.DeclareVectorOutputPort("noop2", 1, noop, prerequisites_of_calc=set( [self.nothing_ticket()])) self.witness = self.MakeWitnessFunction( "witness", WitnessFunctionDirection.kCrossesZero, self._witness) # Test bindings for both callback function signatures. self.reset_witness = self.MakeWitnessFunction( "reset", WitnessFunctionDirection.kCrossesZero, self._guard, UnrestrictedUpdateEvent(self._reset)) self.system_reset_witness = self.MakeWitnessFunction( "system reset", WitnessFunctionDirection.kCrossesZero, self._guard, UnrestrictedUpdateEvent( system_callback=self._system_reset)) self.witness_result = 1.0 self.getwitness_result = [ self.witness, self.reset_witness, self.system_reset_witness, ] def DoCalcTimeDerivatives(self, context, derivatives): # Note: Don't call base method here; it would abort because # derivatives.size() != 0. test.assertEqual(derivatives.get_vector().size(), 2) self.called_continuous = True def DoGetWitnessFunctions(self, context): self.called_getwitness = True return self.getwitness_result def _on_initialize(self, context, event): test.assertIsInstance(context, Context) test.assertIsInstance(event, PublishEvent) test.assertFalse(self.called_initialize) self.called_initialize = True def _on_per_step(self, context, event): test.assertIsInstance(context, Context) test.assertIsInstance(event, PublishEvent) self.called_per_step = True def _on_periodic(self, context, event): test.assertIsInstance(context, Context) test.assertIsInstance(event, PublishEvent) test.assertFalse(self.called_periodic) self.called_periodic = True def _on_initialize_publish(self, context): test.assertIsInstance(context, Context) test.assertFalse(self.called_initialize_publish) self.called_initialize_publish = True return EventStatus.Succeeded() def _on_initialize_discrete(self, context, discrete_state): test.assertIsInstance(context, Context) test.assertIsInstance(discrete_state, DiscreteValues) test.assertFalse(self.called_initialize_discrete) self.called_initialize_discrete = True return EventStatus.Succeeded() def _on_initialize_unrestricted(self, context, state): test.assertIsInstance(context, Context) test.assertIsInstance(state, State) test.assertFalse(self.called_initialize_unrestricted) self.called_initialize_unrestricted = True return EventStatus.Succeeded() def _on_periodic_publish(self, context): test.assertIsInstance(context, Context) test.assertFalse(self.called_periodic_publish) self.called_periodic_publish = True return EventStatus.Succeeded() def _on_periodic_discrete(self, context, discrete_state): test.assertIsInstance(context, Context) test.assertIsInstance(discrete_state, DiscreteValues) test.assertFalse(self.called_periodic_discrete) self.called_periodic_discrete = True return EventStatus.Succeeded() def _on_periodic_unrestricted(self, context, state): test.assertIsInstance(context, Context) test.assertIsInstance(state, State) test.assertFalse(self.called_periodic_unrestricted) self.called_periodic_unrestricted = True return EventStatus.Succeeded() def _on_per_step_publish(self, context): test.assertIsInstance(context, Context) self.called_per_step_publish = True return EventStatus.Succeeded() def _on_per_step_discrete(self, context, discrete_state): test.assertIsInstance(context, Context) test.assertIsInstance(discrete_state, DiscreteValues) self.called_per_step_discrete = True return EventStatus.Succeeded() def _on_per_step_unrestricted(self, context, state): test.assertIsInstance(context, Context) test.assertIsInstance(state, State) self.called_per_step_unrestricted = True return EventStatus.Succeeded() def _on_forced_publish(self, context): test.assertIsInstance(context, Context) test.assertFalse(self.called_forced_publish) self.called_forced_publish = True return EventStatus.Succeeded() def _on_forced_discrete(self, context, discrete_state): test.assertIsInstance(context, Context) test.assertIsInstance(discrete_state, DiscreteValues) test.assertFalse(self.called_forced_discrete) self.called_forced_discrete = True return EventStatus.Succeeded() def _on_forced_unrestricted(self, context, state): test.assertIsInstance(context, Context) test.assertIsInstance(state, State) test.assertFalse(self.called_forced_unrestricted) self.called_forced_unrestricted = True return EventStatus.Succeeded() def _witness(self, context): test.assertIsInstance(context, Context) self.called_witness = True return self.witness_result def _guard(self, context): test.assertIsInstance(context, Context) self.called_guard = True return context.get_time() - 0.5 def _reset(self, context, event, state): test.assertIsInstance(context, Context) test.assertIsInstance(event, UnrestrictedUpdateEvent) test.assertIsInstance(state, State) self.called_reset = True def _system_reset(self, system, context, event, state): test.assertIsInstance(system, System) test.assertIsInstance(context, Context) test.assertIsInstance(event, UnrestrictedUpdateEvent) test.assertIsInstance(state, State) self.called_system_reset = True system = TrivialSystem() self.assertFalse(system.called_continuous) self.assertFalse(system.called_initialize) results = call_leaf_system_overrides(system) self.assertFalse(results["has_direct_feedthrough"]) self.assertTrue(system.called_continuous) self.assertTrue(system.called_initialize) self.assertEqual(results["discrete_next_t"], 1.0) self.assertFalse(system.HasAnyDirectFeedthrough()) self.assertFalse(system.HasDirectFeedthrough(output_port=0)) self.assertFalse( system.HasDirectFeedthrough(input_port=0, output_port=0)) # Test explicit calls. system = TrivialSystem() context = system.CreateDefaultContext() system.ForcedPublish(context=context) self.assertTrue(system.called_forced_publish) context_update = context.Clone() system.CalcTimeDerivatives( context=context, derivatives=context_update.get_mutable_continuous_state()) self.assertTrue(system.called_continuous) system.called_continuous = False residual = system.AllocateImplicitTimeDerivativesResidual() system.CalcImplicitTimeDerivativesResidual( context=context, proposed_derivatives=context_update.get_continuous_state(), residual=residual) np.testing.assert_allclose(residual, 0, 1e-14) self.assertTrue(system.called_continuous) np.testing.assert_allclose( system.CalcImplicitTimeDerivativesResidual( context=context, proposed_derivatives=context_update.get_continuous_state()), 0, 1e-14) witnesses = system.GetWitnessFunctions(context) self.assertEqual(len(witnesses), 3) system.CalcForcedDiscreteVariableUpdate( context=context, discrete_state=context_update.get_mutable_discrete_state()) self.assertTrue(system.called_forced_discrete) system.CalcForcedUnrestrictedUpdate( context=context, state=context_update.get_mutable_state() ) self.assertTrue(system.called_forced_unrestricted) # Test per-step, periodic, and witness call backs system = TrivialSystem() simulator = Simulator(system) simulator.get_mutable_context().SetAccuracy(0.1) # Stepping to 0.99 so that we get exactly one periodic event. simulator.AdvanceTo(0.99) self.assertTrue(system.called_per_step) self.assertTrue(system.called_periodic) self.assertTrue(system.called_initialize_publish) self.assertTrue(system.called_initialize_discrete) self.assertTrue(system.called_initialize_unrestricted) self.assertTrue(system.called_periodic_publish) self.assertTrue(system.called_periodic_discrete) self.assertTrue(system.called_periodic_unrestricted) self.assertTrue(system.called_per_step_publish) self.assertTrue(system.called_per_step_discrete) self.assertTrue(system.called_per_step_unrestricted) self.assertTrue(system.called_getwitness) self.assertTrue(system.called_witness) self.assertTrue(system.called_guard) self.assertTrue(system.called_reset) self.assertTrue(system.called_system_reset) # Test ExecuteInitializationEvents. system = TrivialSystem() context = system.CreateDefaultContext() system.ExecuteInitializationEvents(context=context) self.assertFalse(system.called_per_step) self.assertFalse(system.called_periodic) self.assertTrue(system.called_initialize_publish) self.assertTrue(system.called_initialize_discrete) self.assertTrue(system.called_initialize_unrestricted) self.assertFalse(system.called_periodic_publish) self.assertFalse(system.called_periodic_discrete) self.assertFalse(system.called_periodic_unrestricted) self.assertFalse(system.called_per_step_publish) self.assertFalse(system.called_per_step_discrete) self.assertFalse(system.called_per_step_unrestricted) self.assertFalse(system.called_getwitness) self.assertFalse(system.called_witness) self.assertFalse(system.called_guard) self.assertFalse(system.called_reset) self.assertFalse(system.called_system_reset) # Test witness function error messages. system = TrivialSystem() system.getwitness_result = None simulator = Simulator(system) with self.assertRaisesRegex(TypeError, "NoneType"): simulator.AdvanceTo(0.1) self.assertTrue(system.called_getwitness) system = TrivialSystem() system.witness_result = None simulator = Simulator(system) with self.assertRaisesRegex(TypeError, "NoneType"): simulator.AdvanceTo(0.1) self.assertTrue(system.called_witness) def test_event_handler_returns_none(self): """Checks that a Python event handler callback function is allowed to (implicitly) return None, instead of an EventStatus. Because of all the setup boilerplate, we only test one specific event type and assume that the other event types (which are implemented similarly) will likewise behave the same. """ class PublishReturnsNoneSystem(LeafSystem): def __init__(self): LeafSystem.__init__(self) self.called_periodic_publish = False self.DeclarePeriodicPublishEvent( period_sec=1.0, offset_sec=0.0, publish=self._on_periodic_publish) def _on_periodic_publish(self, context): self.called_periodic_publish = True # There is no `return` statement here; Python implicitly treats # this like a `return None`. system = PublishReturnsNoneSystem() simulator = Simulator(system) simulator.AdvanceTo(0.25) self.assertTrue(system.called_periodic_publish) def test_state_output_port_declarations(self): """Checks that DeclareStateOutputPort is bound.""" dut = LeafSystem() xc_index = dut.DeclareContinuousState(2) xc_port = dut.DeclareStateOutputPort(name="xc", state_index=xc_index) self.assertEqual(xc_port.size(), 2) xd_index = dut.DeclareDiscreteState(3) xd_port = dut.DeclareStateOutputPort(name="xd", state_index=xd_index) self.assertEqual(xd_port.size(), 3) xa_index = dut.DeclareAbstractState(Value(1)) xa_port = dut.DeclareStateOutputPort(name="xa", state_index=xa_index) self.assertEqual(xa_port.get_name(), "xa") def test_vector_system_overrides(self): dt = 0.5 for is_discrete in [False, True]: system = CustomVectorSystem(is_discrete) self.assertEqual( system.GetSystemType(), f"{__name__}.CustomVectorSystem") context = system.CreateDefaultContext() u = np.array([1.]) system.get_input_port(0).FixValue(context, u) # Dispatch virtual calls from C++. output = call_vector_system_overrides( system, context, is_discrete, dt) self.assertTrue(system.HasAnyDirectFeedthrough()) # Check call order. update_type = is_discrete and "discrete" or "continuous" self.assertEqual( system.has_called, [update_type, "output"]) # Check values. state = context.get_state() x = (is_discrete and state.get_discrete_state() or state.get_continuous_state()).get_vector().get_value() x0 = [0., 0.] c = is_discrete and 2 or 1*dt x_expected = x0 + c*u self.assertTrue(np.allclose(x, x_expected)) # Check output. y_expected = np.hstack([u, x]) y = output.get_vector_data(0).get_value() self.assertTrue(np.allclose(y, y_expected)) def test_context_api(self): # Capture miscellaneous functions not yet tested. model_value = Value("Hello") model_vector = BasicVector([1., 2.]) class TrivialSystem(LeafSystem): def __init__(self): LeafSystem.__init__(self) self.DeclareContinuousState(1) self.DeclareDiscreteState(2) self.DeclareAbstractState(model_value=model_value.Clone()) self.DeclareAbstractParameter(model_value=model_value.Clone()) self.DeclareNumericParameter(model_vector=model_vector.Clone()) system = TrivialSystem() context = system.CreateDefaultContext() self.assertTrue( context.get_state() is context.get_mutable_state()) self.assertEqual(context.num_continuous_states(), 1) self.assertTrue( context.get_continuous_state_vector() is context.get_mutable_continuous_state_vector()) self.assertEqual(context.num_discrete_state_groups(), 1) self.assertTrue( context.get_discrete_state_vector() is context.get_mutable_discrete_state_vector()) self.assertTrue( context.get_discrete_state(0) is context.get_discrete_state_vector()) self.assertTrue( context.get_discrete_state(0) is context.get_discrete_state().get_vector(0)) self.assertTrue( context.get_mutable_discrete_state(0) is context.get_mutable_discrete_state_vector()) self.assertTrue( context.get_mutable_discrete_state(0) is context.get_mutable_discrete_state().get_vector(0)) self.assertEqual(context.num_abstract_states(), 1) self.assertTrue( context.get_abstract_state() is context.get_mutable_abstract_state()) self.assertTrue( context.get_abstract_state(0) is context.get_mutable_abstract_state(0)) self.assertEqual( context.get_abstract_state(0).get_value(), model_value.get_value()) # Check state API. state = context.get_mutable_state() self.assertTrue( state.get_mutable_discrete_state(index=0) is state.get_mutable_discrete_state().get_vector(index=0)) self.assertTrue( state.get_abstract_state(index=0) is state.get_abstract_state().get_value(index=0)) self.assertTrue( state.get_mutable_abstract_state(index=0) is state.get_mutable_abstract_state().get_value(index=0)) # Check abstract state API (also test Values). values = context.get_abstract_state() self.assertEqual(values.size(), 1) self.assertEqual( values.get_value(0).get_value(), model_value.get_value()) self.assertEqual( values.get_mutable_value(0).get_value(), model_value.get_value()) values.SetFrom(values.Clone()) # Check parameter accessors. self.assertEqual(system.num_abstract_parameters(), 1) self.assertEqual( context.get_abstract_parameter(index=0).get_value(), model_value.get_value()) self.assertEqual(system.num_numeric_parameter_groups(), 1) np.testing.assert_equal( context.get_numeric_parameter(index=0).get_value(), model_vector.get_value()) # Check diagram context accessors. builder = DiagramBuilder() builder.AddSystem(system) diagram = builder.Build() context = diagram.CreateDefaultContext() # Existence check. self.assertIsNot( diagram.GetMutableSubsystemState(system, context), None) subcontext = diagram.GetMutableSubsystemContext(subsystem=system, context=context) self.assertIsNot(subcontext, None) self.assertIs( diagram.GetSubsystemContext(subsystem=system, context=context), subcontext) subcontext2 = system.GetMyMutableContextFromRoot(root_context=context) self.assertIsNot(subcontext2, None) self.assertIs(subcontext2, subcontext) self.assertIs(system.GetMyContextFromRoot(root_context=context), subcontext2) def test_continuous_state_api(self): # N.B. Since this has trivial operations, we can test all scalar types. for T in [float, AutoDiffXd, Expression]: class TrivialSystem(LeafSystem_[T]): def __init__(self, index): LeafSystem_[T].__init__(self) num_q = 2 num_v = 1 num_z = 3 num_state = num_q + num_v + num_z if index == 0: self.DeclareContinuousState( num_state_variables=num_state) elif index == 1: self.DeclareContinuousState( num_q=num_q, num_v=num_v, num_z=num_z) elif index == 2: self.DeclareContinuousState( BasicVector_[T](num_state)) elif index == 3: self.DeclareContinuousState( BasicVector_[T](num_state), num_q=num_q, num_v=num_v, num_z=num_z) def DoCalcTimeDerivatives(self, context, derivatives): derivatives.get_mutable_vector().SetZero() for index in range(4): system = TrivialSystem(index) context = system.CreateDefaultContext() self.assertEqual( context.get_continuous_state_vector().size(), 6) self.assertEqual(system.AllocateTimeDerivatives().size(), 6) self.assertEqual( system.EvalTimeDerivatives(context=context).size(), 6) def test_discrete_state_api(self): # N.B. Since this has trivial operations, we can test all scalar types. for T in [float, AutoDiffXd, Expression]: class TrivialSystem(LeafSystem_[T]): def __init__(self, index): LeafSystem_[T].__init__(self) num_states = 3 if index == 0: self.DeclareDiscreteState( num_state_variables=num_states) elif index == 1: self.DeclareDiscreteState([1, 2, 3]) elif index == 2: self.DeclareDiscreteState( BasicVector_[T](num_states)) for index in range(3): system = TrivialSystem(index) context = system.CreateDefaultContext() self.assertEqual( context.get_discrete_state(0).size(), 3) self.assertEqual(system.AllocateDiscreteVariables().size(), 3) def test_abstract_io_port(self): test = self def assert_value_equal(a, b): a_name, a_value = a b_name, b_value = b self.assertEqual(a_name, b_name) numpy_compare.assert_equal(a_value, b_value) # N.B. Since this has trivial operations, we can test all scalar types. for T in [float, AutoDiffXd, Expression]: default_value = ("default", T(0.)) expected_input_value = ("input", T(np.pi)) expected_output_value = ("output", 2*T(np.pi)) class CustomAbstractSystem(LeafSystem_[T]): def __init__(self): LeafSystem_[T].__init__(self) self.input_port = self.DeclareAbstractInputPort( "in", Value(default_value)) self.output_port = self.DeclareAbstractOutputPort( "out", lambda: Value(default_value), self.DoCalcAbstractOutput, prerequisites_of_calc=set([ self.input_port.ticket()])) def DoCalcAbstractOutput(self, context, y_data): input_value = self.EvalAbstractInput( context=context, port_index=0).get_value() # The allocator function will populate the output with # the "input" assert_value_equal(input_value, expected_input_value) y_data.set_value(expected_output_value) assert_value_equal( y_data.get_value(), expected_output_value) system = CustomAbstractSystem() context = system.CreateDefaultContext() self.assertEqual(context.num_input_ports(), 1) system.get_input_port(0).FixValue(context, expected_input_value) output = system.AllocateOutput() self.assertEqual(output.num_ports(), 1) system.CalcOutput(context, output) value = output.get_data(0) self.assertEqual(value.get_value(), expected_output_value) def assert_equal_but_not_aliased(self, a, b): self.assertEqual(a, b) self.assertIsNot(a, b) def test_context_and_value_object_set_from(self): """ Shows how `Value[object]` behaves in a context, especially in connection to `Context.SetTimeStateAndParametersFrom()`. Helps to highlight failure mode illustrated in #18653. """ arbitrary_object = {"key": "value"} class SystemWithCacheAndState(LeafSystem): def __init__(self): super().__init__() model_value = Value(arbitrary_object) self.state_index = self.DeclareAbstractState(model_value) def calc_cache_noop(context, abstract_value): pass self.cache_entry = self.DeclareCacheEntry( description="test", value_producer=ValueProducer( allocate=model_value.Clone, calc=calc_cache_noop, ), ) def eval_state(self, context): return context.get_abstract_state(self.state_index).get_value() system = SystemWithCacheAndState() context = system.CreateDefaultContext() context_init = context.Clone() cache = system.cache_entry.Eval(context) self.assert_equal_but_not_aliased(cache, arbitrary_object) state = system.eval_state(context) self.assert_equal_but_not_aliased(state, arbitrary_object) def check_set_from(): nonlocal cache, state context.SetTimeStateAndParametersFrom(context_init) # Ensure that we have cloned the object. old_state = state state = system.eval_state(context) self.assert_equal_but_not_aliased(state, old_state) # Warning: Cache objects are not cloned! old_cache = cache cache = system.cache_entry.Eval(context) self.assertIs(cache, old_cache) # Check twice. Per #18653, if we did not implement # Value[object].SetFrom() correctly, this would fail the second time. check_set_from() check_set_from()
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/pyplot_visualizer_test.py
import unittest import matplotlib.animation as animation import matplotlib.pyplot as plt import numpy as np from pydrake.systems.analysis import Simulator from pydrake.systems.framework import ( Context, DiagramBuilder, PortDataType, VectorSystem, kUseDefaultName) from pydrake.systems.primitives import VectorLogSink from pydrake.systems.pyplot_visualizer import PyPlotVisualizer from pydrake.trajectories import PiecewisePolynomial # TODO(tehbelinda): Augment this test with a Jupyter notebook to make this # easier to visualize. class TestVisualizer(PyPlotVisualizer): # Set limits of view port. XLIM = (-20., 20.) YLIM = (-6., 6.) TICK_DIMS = (0.2, 0.8) PATCH_WIDTH = 5. PATCH_HEIGHT = 1. def __init__(self, size): PyPlotVisualizer.__init__(self) self.DeclareInputPort(kUseDefaultName, PortDataType.kVectorValued, size) self.ax.set_xlim(*self.XLIM) self.ax.set_ylim(*self.YLIM) self.ax.set_aspect('auto') self._make_background() self.patch = plt.Rectangle((0.0, 0.0), self.PATCH_WIDTH, self.PATCH_HEIGHT, fc='#A31F34', ec='k') self.patch.set_x(-self.PATCH_WIDTH / 2) # Center at x. def _make_background(self): # X-axis. plt.plot(self.XLIM, np.zeros_like(self.XLIM), 'k') # Tick mark centered at the origin. tick_pos = -0.5 * np.asarray(self.TICK_DIMS) self.ax.add_patch(plt.Rectangle(tick_pos, *self.TICK_DIMS, fc='k')) def draw(self, context): try: x = self.EvalVectorInput(context, 0).get_value()[0] except TypeError: x = context[0] self.patch.set_x(x - self.PATCH_WIDTH / 2) class SimpleContinuousTimeSystem(VectorSystem): def __init__(self): VectorSystem.__init__(self, 0, # Zero inputs. 1) # One output. self.DeclareContinuousState(1) # One state variable. # xdot(t) = -x(t) + x^3(t) def DoCalcVectorTimeDerivatives(self, context, u, x, xdot): xdot[:] = -x + x**3 # y(t) = x(t) def DoCalcVectorOutput(self, context, u, x, y): y[:] = x class TestPyplotVisualizer(unittest.TestCase): def test_simple_visualizer(self): builder = DiagramBuilder() system = builder.AddSystem(SimpleContinuousTimeSystem()) logger = builder.AddSystem(VectorLogSink(1)) builder.Connect(system.get_output_port(0), logger.get_input_port(0)) visualizer = builder.AddSystem(TestVisualizer(1)) builder.Connect(system.get_output_port(0), visualizer.get_input_port(0)) diagram = builder.Build() context = diagram.CreateDefaultContext() context.SetContinuousState([0.9]) simulator = Simulator(diagram, context) simulator.AdvanceTo(.1) ani = visualizer.animate(logger.FindLog(context), repeat=True) self.assertIsInstance(ani, animation.FuncAnimation) def test_trajectory(self): builder = DiagramBuilder() visualizer = builder.AddSystem(TestVisualizer(1)) ppt = PiecewisePolynomial.FirstOrderHold( [0., 1.], [[2., 3.], [2., 1.]]) ani = visualizer.animate(ppt) self.assertIsInstance(ani, animation.FuncAnimation) def test_recording(self): visualizer = PyPlotVisualizer() # Assert that we start with no recordings. This uses private API for # testing _recorded_contexts and should not be used publicly. self.assertEqual(len(visualizer._recorded_contexts), 0) visualizer.start_recording() # Artificially produce some specific contexts. times = [0.003, 0.2, 1.1, 1.12] context = visualizer.AllocateContext() for time in times: context.SetTime(time) visualizer.ForcedPublish(context) # Check that there are now recorded contexts with matching times. visualizer.stop_recording() self.assertEqual(len(visualizer._recorded_contexts), len(times)) for i, time in enumerate(times): self.assertEqual(time, visualizer._recorded_contexts[i].get_time()) ani = visualizer.get_recording_as_animation() self.assertIsInstance(ani, animation.FuncAnimation) visualizer.reset_recording() self.assertEqual(len(visualizer._recorded_contexts), 0)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/sensors_test.py
import pydrake.systems.sensors as mut import copy import gc import tempfile import unittest import numpy as np from pydrake.common import FindResourceOrThrow from pydrake.common.test_utilities.pickle_compare import assert_pickle from pydrake.common.value import AbstractValue, Value from pydrake.geometry import ( ClippingRange, ColorRenderCamera, DepthRange, DepthRenderCamera, FrameId, RenderCameraCore, ) from pydrake.lcm import DrakeLcm from pydrake.math import ( RigidTransform, RollPitchYaw, ) from pydrake.multibody.plant import ( AddMultibodyPlantSceneGraph, ) from pydrake.systems.framework import ( DiagramBuilder, InputPort, OutputPort, ) from pydrake.systems.lcm import LcmBuses, _Serializer_ from drake import ( lcmt_image, lcmt_image_array, ) # Shorthand aliases, to reduce verbosity. pt = mut.PixelType pf = mut.PixelFormat # Available image / pixel types. pixel_types = [ pt.kRgba8U, pt.kRgb8U, pt.kBgra8U, pt.kBgr8U, pt.kDepth16U, pt.kDepth32F, pt.kLabel16I, pt.kGrey8U, ] # Convenience aliases. image_type_aliases = [ mut.ImageRgba8U, mut.ImageRgb8U, mut.ImageBgra8U, mut.ImageBgr8U, mut.ImageDepth16U, mut.ImageDepth32F, mut.ImageLabel16I, mut.ImageGrey8U, ] class TestSensors(unittest.TestCase): def test_image_traits(self): # Ensure that we test all available enums. self.assertSetEqual( set(pixel_types), set(mut.PixelType.__members__.values())) # Spot-check specific instantiations of ImageTraits<>. t = mut.ImageTraits[pt.kRgba8U] self.assertEqual(t.kNumChannels, 4) self.assertEqual(t.ChannelType, np.uint8) self.assertEqual(t.kPixelFormat, pf.kRgba) t = mut.ImageTraits[pt.kDepth16U] self.assertEqual(t.kNumChannels, 1) self.assertEqual(t.ChannelType, np.uint16) self.assertEqual(t.kPixelFormat, pf.kDepth) t = mut.ImageTraits[pt.kDepth32F] self.assertEqual(t.kNumChannels, 1) self.assertEqual(t.ChannelType, np.float32) self.assertEqual(t.kPixelFormat, pf.kDepth) t = mut.ImageTraits[pt.kLabel16I] self.assertEqual(t.kNumChannels, 1) self.assertEqual(t.ChannelType, np.int16) self.assertEqual(t.kPixelFormat, pf.kLabel) # Smoke test all instantiations of ImageTraits<>. for pixel_type in pixel_types: t = mut.ImageTraits[pixel_type] self.assertGreaterEqual(t.kNumChannels, 1) self.assertIsNotNone(t.ChannelType) self.assertIn(t.kPixelFormat, mut.PixelFormat.__members__.values()) # Smoke test the pixel scalars. mut.PixelScalar.k8U mut.PixelScalar.k16I mut.PixelScalar.k16U mut.PixelScalar.k32F def test_image_types(self): # Test instantiations of Image<>. for pixel_type, image_type_alias in ( zip(pixel_types, image_type_aliases)): ImageT = mut.Image[pixel_type] self.assertEqual(ImageT.Traits, mut.ImageTraits[pixel_type]) self.assertEqual(ImageT, image_type_alias) ImageT() w = 640 h = 480 nc = ImageT.Traits.kNumChannels image = ImageT(width=w, height=h) self.assertEqual(image.width(), w) self.assertEqual(image.height(), h) self.assertEqual(image.size(), h * w * nc) # N.B. Since `shape` is a custom-Python extension, it's defined as # a property (not a function). self.assertEqual(image.shape, (h, w, nc)) self.assertEqual(image.data.shape, image.shape) self.assertEqual(image.data.dtype, ImageT.Traits.ChannelType) w //= 2 h //= 2 # WARNING: Resizing an image with an existing reference to # `image.data` will cause `image.data` + `image.mutable_data` to be # invalid. image.resize(w, h) self.assertEqual(image.shape, (h, w, nc)) def test_image_data(self): # Test data mapping. for pixel_type in pixel_types: # Use a trivial size for ease of debugging. w = 8 h = 6 channel_default = 1 ImageT = mut.Image[pixel_type] image = ImageT(w, h, channel_default) nc = ImageT.Traits.kNumChannels # Test default initialization. self.assertEqual(image.at(0, 0)[0], channel_default) self.assertTrue(np.allclose(image.data, channel_default)) # Test pixel / channel mutation and access. image.at(0, 0)[0] = 2 # - Also test named arguments. self.assertEqual(image.at(x=0, y=0)[0], 2) bad_coords = [ # Bad X (-1, 0, 0), (100, 0, 0), # Bad Y (0, -1, 0), (0, 100, 0), # Bad Channel (0, 0, -100), (0, 0, 100), ] for x, y, c in bad_coords: try: image.at(x, y)[c] self.assertTrue(False) except SystemExit: pass except IndexError: pass # Test numpy views, access and mutation. image.mutable_data[:] = 3 self.assertEqual(image.at(0, 0)[0], 3) self.assertTrue(np.allclose(image.data, 3)) self.assertTrue(np.allclose(image.mutable_data, 3)) # Ensure that each dimension of the image array is unique. self.assertEqual(len(set(image.shape)), 3) # Ensure indices match as expected. Fill each channel at each pixel # with unique values, and ensure that pixel / channels map # appropriately. data = image.mutable_data data[:] = np.arange(0, image.size()).reshape(image.shape) for iw in range(w): for ih in range(h): self.assertTrue( np.allclose(data[ih, iw, :], image.at(iw, ih))) # Ensure that keep alive works by using temporary objects. def check_keep_alive(): image = ImageT(w, h, channel_default) return (image.data, image.mutable_data) data, mutable_data = check_keep_alive() gc.collect() np.testing.assert_array_equal(data, channel_default) np.testing.assert_array_equal(mutable_data, channel_default) def test_depth_image_conversion(self): foo = mut.ImageDepth32F(width=3, height=4) bar = mut.ImageDepth16U() mut.ConvertDepth32FTo16U(input=foo, output=bar) self.assertEqual(bar.width(), 3) self.assertEqual(bar.height(), 4) foo = mut.ImageDepth16U(width=3, height=4) bar = mut.ImageDepth32F() mut.ConvertDepth16UTo32F(input=foo, output=bar) self.assertEqual(bar.width(), 3) self.assertEqual(bar.height(), 4) def test_camera_config(self): mut.CameraConfig() config = mut.CameraConfig( width=124, focal=mut.CameraConfig.FocalLength(x=10, y=20)) self.assertEqual(config.width, 124) self.assertIn("width", repr(config)) copy.copy(config) self.assertEqual(config.focal_x(), 10) self.assertEqual(config.focal_y(), 20) config.principal_point() (color, depth) = config.MakeCameras() self.assertIsInstance(color, ColorRenderCamera) self.assertIsInstance(depth, DepthRenderCamera) fov = mut.CameraConfig.FovDegrees(x=10, y=20) self.assertIn("x=10", repr(fov)) copy.copy(fov) focal = mut.CameraConfig.FocalLength(x=10, y=20) self.assertIn("x=10", repr(focal)) copy.copy(focal) builder = DiagramBuilder() plant, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) system_count = len(builder.GetSystems()) lcm = DrakeLcm() mut.ApplyCameraConfig(config=config, plant=plant, builder=builder, scene_graph=scene_graph, lcm=lcm) # Systems have been added. self.assertGreater(len(builder.GetSystems()), system_count) def test_camera_config_lcm_buses(self): builder = DiagramBuilder() plant, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) system_count = len(builder.GetSystems()) # We'll call the Apply function using lcm_buses= instead of lcm=. lcm_buses = LcmBuses() lcm_buses.Add("fancy", DrakeLcm()) config = mut.CameraConfig(lcm_bus="fancy") mut.ApplyCameraConfig(config=config, builder=builder, lcm_buses=lcm_buses) # Check that systems were added. self.assertGreater(len(builder.GetSystems()), system_count) def test_camera_info(self): width = 640 height = 480 fov_y = np.pi / 4 focal_y = height / 2 / np.tan(fov_y / 2) focal_x = focal_y center_x = width / 2 - 0.5 center_y = height / 2 - 0.5 intrinsic_matrix = np.array([ [focal_x, 0, center_x], [0, focal_y, center_y], [0, 0, 1]]) infos = [ mut.CameraInfo(width=width, height=height, fov_y=fov_y), mut.CameraInfo( width=width, height=height, intrinsic_matrix=intrinsic_matrix), mut.CameraInfo( width=width, height=height, focal_x=focal_x, focal_y=focal_y, center_x=center_x, center_y=center_y), ] for info in infos: self.assertEqual(info.width(), width) self.assertEqual(info.height(), height) self.assertEqual(info.focal_x(), focal_x) self.assertEqual(info.focal_y(), focal_y) self.assertEqual(info.center_x(), center_x) self.assertEqual(info.center_y(), center_y) self.assertIsInstance(info.fov_x(), float) self.assertIsInstance(info.fov_y(), float) self.assertTrue( (info.intrinsic_matrix() == intrinsic_matrix).all()) assert_pickle(self, info, mut.CameraInfo.intrinsic_matrix) def _check_input(self, value): self.assertIsInstance(value, InputPort) def _check_output(self, value): self.assertIsInstance(value, OutputPort) def test_image_to_lcm_image_array_basic(self): """Tests the nominal constructor.""" dut = mut.ImageToLcmImageArrayT( color_frame_name="color", depth_frame_name="depth", label_frame_name="label", do_compress=False) for port in ( dut.color_image_input_port(), dut.depth_image_input_port(), dut.label_image_input_port()): self._check_input(port) for port in ( dut.image_array_t_msg_output_port(),): self._check_output(port) def test_image_to_lcm_image_array_custom(self): """Tests the custom constructor and runtime functionality.""" # Declare ports using the custom constructor. dut = mut.ImageToLcmImageArrayT(do_compress=False) for pixel_type in pixel_types: name = str(pixel_type) dut.DeclareImageInputPort[pixel_type](name=name) # Populate the input images. context = dut.CreateDefaultContext() for pixel_type in pixel_types: name = str(pixel_type) port = dut.GetInputPort(name) self._check_input(port) image = mut.Image[pixel_type](width=1, height=1) port.FixValue(context, image) # Compute the C++ message as a Value<drake::lcmt_image_array>. output = dut.AllocateOutput() dut.CalcOutput(context, output) cxx_message = output.get_data(0) # We can't access that C++ message from Python (messages are not bound # into the Value[] template class), so to inspect it we'll need to # serialize down into raw bytes and then back into a Python message. serializer = _Serializer_[lcmt_image_array]() message = lcmt_image_array.decode(serializer.Serialize(cxx_message)) # Inspect the message for correctness. self.assertEqual(message.num_images, len(pixel_types)) for i, image in enumerate(message.images): pixel_type = pixel_types[i] with self.subTest(pixel_type=pixel_type): self.assertEqual(image.width, 1) self.assertEqual(image.height, 1) expected_format = { pt.kRgba8U: lcmt_image.PIXEL_FORMAT_RGBA, pt.kRgb8U: lcmt_image.PIXEL_FORMAT_RGB, pt.kBgra8U: lcmt_image.PIXEL_FORMAT_BGRA, pt.kBgr8U: lcmt_image.PIXEL_FORMAT_BGR, pt.kDepth16U: lcmt_image.PIXEL_FORMAT_DEPTH, pt.kDepth32F: lcmt_image.PIXEL_FORMAT_DEPTH, pt.kLabel16I: lcmt_image.PIXEL_FORMAT_LABEL, pt.kGrey8U: lcmt_image.PIXEL_FORMAT_GRAY, }[pixel_type] self.assertEqual(image.pixel_format, expected_format) def test_lcm_image_array_to_images_basic(self): """Tests all API calls as well as runtime functionality.""" dut = mut.LcmImageArrayToImages() for port in ( dut.image_array_t_input_port(),): self._check_input(port) for port in ( dut.color_image_output_port(), dut.depth_image_output_port(), dut.label_image_output_port()): self._check_output(port) # Create a one-pixel lcmt_image message. image_message = lcmt_image() image_message.pixel_format = lcmt_image.PIXEL_FORMAT_RGBA image_message.channel_type = lcmt_image.CHANNEL_TYPE_UINT8 image_message.width = 1 image_message.height = 1 image_message.row_stride = 4 image_message.size = 4 image_message.data = [0] * 4 # Wrap the single image message into an image_array message. array_message = lcmt_image_array() array_message.num_images = 1 array_message.images = [image_message] # Copy the message into the Context. This is a bit tricky because the # input port is Value<drake::lcmt_image_array> which is not bound into # the Value[] class template, so we need to encode/decode to get there. context = dut.CreateDefaultContext() serializer = _Serializer_[lcmt_image_array]() cxx_message = serializer.CreateDefaultValue() serializer.Deserialize(array_message.encode(), cxx_message) dut.image_array_t_input_port().FixValue(context, cxx_message) # Extract the message's color image using the dut. image = dut.color_image_output_port().Eval(context) self.assertEqual(image.width(), 1) self.assertEqual(image.height(), 1) @staticmethod def _make_render_camera_core(*, width=640, height=480): return RenderCameraCore( "renderer", mut.CameraInfo(width, height, np.pi/6), ClippingRange(0.1, 6.0), RigidTransform()) def test_rgbd_sensor(self): def check_ports(system): self.assertIsInstance(system.query_object_input_port(), InputPort) self.assertIsInstance(system.color_image_output_port(), OutputPort) self.assertIsInstance(system.depth_image_32F_output_port(), OutputPort) self.assertIsInstance(system.depth_image_16U_output_port(), OutputPort) self.assertIsInstance(system.label_image_output_port(), OutputPort) self.assertIsInstance(system.body_pose_in_world_output_port(), OutputPort) self.assertIsInstance(system.image_time_output_port(), OutputPort) # Use HDTV size. width = 1280 height = 720 # There are *two* variants of the constructor for each camera # representation: one with color and depth explicitly specified and one # with only depth. We enumerate all four here. def construct(parent_id, X_PB): color_camera = ColorRenderCamera( self._make_render_camera_core(width=width, height=height), False) depth_camera = DepthRenderCamera(color_camera.core(), DepthRange(0.1, 5.5)) return mut.RgbdSensor(parent_id=parent_id, X_PB=X_PB, color_camera=color_camera, depth_camera=depth_camera) def construct_single(parent_id, X_PB): depth_camera = DepthRenderCamera( self._make_render_camera_core(width=width, height=height), DepthRange(0.1, 5.5)) return mut.RgbdSensor(parent_id=parent_id, X_PB=X_PB, depth_camera=depth_camera) # Put it at the origin. X_WB = RigidTransform() # This id would fail if we tried to render; no such id exists. parent_id = FrameId.get_new_id() def check_info(camera_info): self.assertIsInstance(camera_info, mut.CameraInfo) self.assertEqual(camera_info.width(), width) self.assertEqual(camera_info.height(), height) for constructor in [construct, construct_single]: sensor = constructor(parent_id, X_WB) check_info(sensor.color_camera_info()) check_info(sensor.depth_camera_info()) self.assertIsInstance(sensor.X_BC(), RigidTransform) self.assertIsInstance(sensor.X_BD(), RigidTransform) self.assertEqual(sensor.parent_frame_id(), parent_id) check_ports(sensor) # Test discrete camera. We'll simply use the last sensor constructed. period = mut.RgbdSensorDiscrete.kDefaultPeriod discrete = mut.RgbdSensorDiscrete( sensor=sensor, period=period, render_label_image=True) self.assertTrue(discrete.sensor() is sensor) self.assertEqual(discrete.period(), period) check_ports(discrete) # That we can access the state as images. context = discrete.CreateDefaultContext() values = context.get_abstract_state() self.assertIsInstance(values.get_value(0), Value[mut.ImageRgba8U]) self.assertIsInstance(values.get_value(1), Value[mut.ImageDepth32F]) self.assertIsInstance(values.get_value(2), Value[mut.ImageDepth16U]) self.assertIsInstance(values.get_value(3), Value[mut.ImageLabel16I]) def test_rgbd_sensor_async(self): builder = DiagramBuilder() plant, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0) camera_core = self._make_render_camera_core() color_camera = ColorRenderCamera(camera_core) depth_camera = DepthRenderCamera(camera_core, DepthRange(0.1, 5.5)) dut = mut.RgbdSensorAsync(scene_graph=scene_graph, parent_id=FrameId.get_new_id(), X_PB=RigidTransform(), fps=1.0, capture_offset=0.1, output_delay=0.01, color_camera=color_camera, depth_camera=depth_camera, render_label_image=True) dut.parent_id() dut.X_PB() dut.fps() dut.capture_offset() dut.output_delay() dut.color_camera() dut.depth_camera() dut.color_image_output_port() dut.depth_image_32F_output_port() dut.depth_image_16U_output_port() dut.label_image_output_port() dut.body_pose_in_world_output_port() dut.image_time_output_port() def test_image_file_format(self): mut.ImageFileFormat.kJpeg mut.ImageFileFormat.kPng mut.ImageFileFormat.kTiff def test_image_io_metadata(self): dut = mut.ImageIo.Metadata(width=640) self.assertEqual(dut.width, 640) self.assertIn("width=640", repr(dut)) def test_image_io_using_buffer(self): orig_image = mut.ImageRgba8U(6, 4) format = mut.ImageFileFormat.kPng dut = mut.ImageIo() data = dut.Save(image=orig_image, format=format) self.assertIsInstance(data, bytes) self.assertGreater(len(data), 0) meta = dut.LoadMetadata(buffer=data) self.assertEqual((meta.width, meta.height), (6, 4)) new_image = dut.Load(buffer=data, format=format) self.assertEqual((new_image.width(), new_image.height()), (6, 4)) def test_image_io_using_file(self): orig_image = mut.ImageRgba8U(6, 4) with tempfile.TemporaryDirectory() as temp: path = f"{temp}/test_image_io_using_file.png" dut = mut.ImageIo() dut.Save(image=orig_image, path=path, format=None) meta = dut.LoadMetadata(path=path) self.assertEqual((meta.width, meta.height), (6, 4)) new_image = dut.Load(path=path, format=mut.ImageFileFormat.kPng) self.assertEqual((new_image.width(), new_image.height()), (6, 4)) def test_image_writer(self): writer = mut.ImageWriter() input_port = writer.DeclareImageInputPort( pixel_type=mut.PixelType.kRgba8U, port_name="color", file_name_format="/tmp/{port_name}-{time_usec}", publish_period=0.125, start_time=0.0) self.assertIsNotNone(input_port)
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/_resample_log_interp1d_test.py
import unittest import numpy as np from numpy.testing import assert_allclose from pydrake.systems.analysis import Simulator from pydrake.systems._resample_interp1d import _resample_interp1d from pydrake.systems.framework import DiagramBuilder, VectorSystem from pydrake.systems.primitives import VectorLogSink class SimpleContinuousTimeSystem(VectorSystem): def __init__(self): self.output_size = 1 VectorSystem.__init__(self, 0, # Zero inputs. self.output_size) # One output. self.DeclareContinuousState(1) # One state variable. # xdot(t) = -x(t) + x^3(t) def DoCalcVectorTimeDerivatives(self, context, u, x, xdot): xdot[:] = -x + x**3 # y(t) = x(t) def DoCalcVectorOutput(self, context, u, x, y): y[:] = x class MultiDimensionalTimeSystem(VectorSystem): def __init__(self): self.output_size = 3 VectorSystem.__init__(self, 0, # Zero inputs. self.output_size) # One output. self.DeclareContinuousState(1) # One state variable. # dx/dt = 2 * t -> x(t) = t^2 + const. def DoCalcVectorTimeDerivatives(self, context, u, x, xdot): t = context.get_time() xdot[:] = 2 * t # y(t) = [x(t), 2*x(t), sqrt(x(t))] def DoCalcVectorOutput(self, context, u, x, y): y[:] = np.array([x[0], 2*x[0], np.sqrt(x[0])]) class TestResampleLogInterp1d(unittest.TestCase): def test_resample_log_oned_interp1d(self): vector_system = SimpleContinuousTimeSystem() simulator, log, context = self.create_log(vector_system) context.SetContinuousState([0.9]) simulator.AdvanceTo(0.1) # Check data generated by the VectorSystem expected_t = np.array([ 0.00000000000000, 0.00010000000000, 0.00060000000000, 0.00310000000000, 0.01560000000000, 0.07810000000000, 0.10000000000000 ]) expected_y = np.array([ [0.90000000000000, 0.89998289877732, 0.89989735597770, 0.89946872408377, 0.89730252592194, 0.88588499474765, 0.88164759214110] ]) self._check_input_data(log, expected_t, expected_y) # Resample [0, .1] by step=0.03125 => 4 data points. expected_t = np.array([0.00000000000000, 0.03125000000000, 0.06250000000000, 0.09375000000000]) expected_y = np.array([[0.90000000000000, 0.89444357611590, 0.88873481052876, 0.88285689653795]]) self._check_resample(log, 0.03125, expected_t, expected_y) # Resample [0, .1] by step=0.01 => 10 data points. expected_t = np.array([ 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]) expected_y = np.array([[ 0.90000000000000, 0.89827298269844, 0.89649873172727, 0.89467192673938, 0.89284512175150, 0.89101831676361, 0.88919151177573, 0.88736470678784, 0.88551736621101, 0.88358247917606]]) self._check_resample(log, 0.01, expected_t, expected_y) def test_resample_log_multid_interp1d(self): vector_system = MultiDimensionalTimeSystem() simulator, log, context = self.create_log(vector_system) context.SetContinuousState([0.]) simulator.AdvanceTo(0.1) # Check data generated by the VectorSystem expected_t = np.array([ 0.0, 0.0001, 0.0006, 0.0031, 0.0156, 0.0781, 0.1]) expected_y = np.array([ [0.000000e+00, 1.000000e-08, 3.600000e-07, 9.610000e-06, 2.433600e-04, 6.099610e-03, 1.000000e-02], [0.000000e+00, 2.000000e-08, 7.200000e-07, 1.922000e-05, 4.867200e-04, 1.219922e-02, 2.000000e-02], [0.000000e+00, 1.000000e-04, 6.000000e-04, 3.100000e-03, 1.560000e-02, 7.810000e-02, 1.000000e-01], ]) self._check_input_data(log, expected_t, expected_y) # Resample [0, .1] by step=0.03125 => 4 data points. expected_t = np.array([0.0, 0.03125, 0.0625, 0.09375]) expected_y = np.array([ [0.0000000000, 0.0017097650, 0.0046378900, 0.0088868750], [0.0000000000, 0.0034195300, 0.0092757800, 0.0177737500], [0.0000000000, 0.0312500000, 0.0625000000, 0.0937500000], ]) self._check_resample(log, 0.03125, expected_t, expected_y) # Resample [0, .1] by step=0.01 => 10 data points. expected_t = np.array([ 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]) expected_y = np.array([ [0.0000000000, 0.0001386400, 0.0006556400, 0.0015926400, 0.0025296400, 0.0034666400, 0.0044036400, 0.0053406400, 0.0064380000, 0.0082190000], [0.0000000000, 0.0002772800, 0.0013112800, 0.0031852800, 0.0050592800, 0.0069332800, 0.0088072800, 0.0106812800, 0.0128760000, 0.0164380000], [0.0000000000, 0.0100000000, 0.0200000000, 0.0300000000, 0.0400000000, 0.0500000000, 0.0600000000, 0.0700000000, 0.0800000000, 0.0900000000], ]) self._check_resample(log, 0.01, expected_t, expected_y) def create_log(self, vector_system): builder = DiagramBuilder() system = builder.AddSystem(vector_system) logger = builder.AddSystem(VectorLogSink(vector_system.output_size)) builder.Connect(system.get_output_port(0), logger.get_input_port(0)) diagram = builder.Build() context = diagram.CreateDefaultContext() simulator = Simulator(diagram, context) # Get the log and make sure its original values are as expected. log = logger.FindLog(context) return simulator, log, context def _check_input_data(self, log, expected_t, expected_y): assert_allclose( log.sample_times(), expected_t, err_msg="Expected simulation input times not equivalent.") assert_allclose( log.data(), expected_y, err_msg="Expected simulation input data not equivalent.") def _check_resample(self, log, step, expected_t, expected_y): self.validate_resample(log, step, expected_t, expected_y) # Final test: make sure un-sorted data gets sorted. Use a proxy to # reverse the original log, only sample_times and data methods needed. class ReverseLog: def sample_times(self): return np.flip(np.array(log.sample_times(), copy=True), axis=0) def data(self): return np.flip(np.array(log.data(), copy=True), axis=1) # Re-use the previous test's expected t and x. r_log = ReverseLog() self.validate_resample(r_log, step, expected_t, expected_y) def validate_resample(self, log, time_step, t_expected, x_expected): """Perform the resampling and validate with the provided values.""" t, x = log.sample_times(), log.data() t, x = _resample_interp1d(t, x, time_step) self.assertTrue( t.shape[0] == x.shape[1], msg=f"Expected t.shape={t.shape} to match x.shape={x.shape}.") assert_allclose( t, t_expected, err_msg="Resampled times are not as expected.") assert_allclose( x, x_expected, err_msg="Resampled data are not as expected.")
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/lifetime_test.py
# -*- coding: utf-8 -*- """ @file Captures limitations for the present state of the Python bindings for the lifetime of objects, eventually lock down capabilities as they are introduced. """ import unittest import numpy as np from pydrake.systems.analysis import ( Simulator, ) from pydrake.systems.framework import ( DiagramBuilder, ) from pydrake.systems.primitives import ( Adder, ) from pydrake.systems.test.test_util import ( DeleteListenerSystem, DeleteListenerVector, ) class Info: # Tracks if an instance has been deleted. def __init__(self): self.deleted = False def record_deletion(self): assert not self.deleted self.deleted = True class TestLifetime(unittest.TestCase): def test_basic(self): info = Info() system = DeleteListenerSystem(info.record_deletion) self.assertFalse(info.deleted) del system self.assertTrue(info.deleted) def test_ownership_diagram(self): info = Info() system = DeleteListenerSystem(info.record_deletion) builder = DiagramBuilder() # `system` is now owned by `builder`. builder.AddSystem(system) # `system` is now owned by `diagram`. diagram = builder.Build() # Delete the builder. Should still be alive. del builder self.assertFalse(info.deleted) # Delete the diagram. Should be dead. del diagram # Using `py::keep_alive`, `system` will keep `builder` alive after # `.AddSystem` is called, and `builder` will keep `diagram` alive after # `.Build` is called. # Transitively, `system` will keep `builder` alive (as its old owner) # and `diagram` (as its new owner, which is kept alive by `builder`). self.assertFalse(info.deleted) self.assertTrue(system is not None) del system # Upon removing this reference, everything should have been cleared up. # However, since we work around #14355 by inducing a keep_alive cycle, # it will not be deleted. self.assertFalse(info.deleted) def test_ownership_multiple_containers(self): info = Info() system = DeleteListenerSystem(info.record_deletion) builder_1 = DiagramBuilder() builder_2 = DiagramBuilder() builder_1.AddSystem(system) # This is tested in our fork of `pybind11`, but echoed here for when # we decide to switch to use `shared_ptr`. with self.assertRaises(RuntimeError): # This should throw an error from `pybind11`, since two containers # are trying to own a unique_ptr-held object. builder_2.AddSystem(system) def test_ownership_simulator(self): info = Info() system = DeleteListenerSystem(info.record_deletion) simulator = Simulator(system) self.assertFalse(info.deleted) del simulator # Simulator does not own the system. self.assertFalse(info.deleted) self.assertTrue(system is not None) # Now ensure that having a system be alive will keep # the system alive (using `py::keep_alive`). simulator = Simulator(system) del system self.assertFalse(info.deleted) del simulator self.assertTrue(info.deleted) def test_ownership_vector(self): system = Adder(1, 1) context = system.CreateDefaultContext() info = Info() vector = DeleteListenerVector(info.record_deletion) system.get_input_port(0).FixValue(context, vector) del context # Same as above applications, using `py::keep_alive`. self.assertFalse(info.deleted) self.assertTrue(vector is not None) # Ensure that we do not get segfault behavior when accessing / mutating # the values. self.assertTrue(np.allclose(vector.get_value(), [0.])) vector.get_mutable_value()[:] = [10.] self.assertTrue(np.allclose(vector.get_value(), [10.]))
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/value_test.py
# -*- coding: utf-8 -*- import copy import unittest import numpy as np from pydrake.autodiffutils import AutoDiffXd from pydrake.common.value import Value from pydrake.common.test_utilities import numpy_compare from pydrake.symbolic import Expression from pydrake.systems.framework import ( BasicVector, BasicVector_, Parameters, VectorBase, ) def pass_through(x): return x # TODO(eric.cousineau): Add negative (or positive) test cases for AutoDiffXd # and Symbolic once they are in the bindings. class TestValue(unittest.TestCase): def test_basic_vector_double(self): # Test constructing vectors of sizes [0, 1, 2], and ensure that we can # construct from both lists and `np.array` objects with no ambiguity. for n in [0, 1, 2]: for wrap in [pass_through, np.array]: # Ensure that we can get vectors templated on double by # reference. expected_init = wrap([float(x) for x in range(n)]) expected_add = wrap([x + 1 for x in expected_init]) expected_set = wrap([x + 10 for x in expected_init]) expected_plus_eq = wrap([3*x for x in expected_set]) value_data = BasicVector(expected_init) value = value_data.get_mutable_value() self.assertTrue(np.allclose(value, expected_init)) # Add value directly. # TODO(eric.cousineau): Determine if there is a way to extract # the pointer referred to by the buffer (e.g. `value.data`). value[:] += 1 self.assertTrue(np.allclose(value, expected_add)) self.assertTrue( np.allclose(value_data.value(), expected_add)) self.assertTrue( np.allclose(value_data.get_value(), expected_add)) self.assertTrue( np.allclose(value_data.get_mutable_value(), expected_add)) # Set value from `BasicVector`. value_data.SetFromVector(value=expected_set) self.assertTrue(np.allclose(value, expected_set)) self.assertTrue( np.allclose(value_data.get_value(), expected_set)) self.assertTrue( np.allclose(value_data.get_mutable_value(), expected_set)) # Set value to zero. old_value = value_data.CopyToVector() value_data.SetZero() if n > 0: self.assertFalse(np.allclose(old_value, np.zeros(n))) self.assertTrue(np.allclose(value, np.zeros(n))) self.assertTrue( np.allclose(value_data.get_value(), np.zeros(n))) self.assertTrue( np.allclose(value_data.get_mutable_value(), np.zeros(n))) # Set value from `BasicVector`. value_data.set_value(expected_set) self.assertTrue(np.allclose(value, expected_set)) self.assertTrue( np.allclose(value_data.get_value(), expected_set)) self.assertTrue( np.allclose(value_data.get_mutable_value(), expected_set)) # Ensure we can construct from size. old_value_data = value_data value_data = BasicVector(n) self.assertEqual(value_data.size(), n) value_data.SetFrom(value=old_value_data) self.assertTrue( np.allclose(value_data.get_value(), expected_set)) new_value_data = value_data.PlusEqScaled(scale=2, rhs=old_value_data) self.assertTrue( np.allclose(value_data.get_value(), expected_plus_eq)) # Ensure we can clone. value_copies = [ value_data.Clone(), copy.copy(value_data), copy.deepcopy(value_data), ] for value_copy in value_copies: self.assertTrue(value_copy is not value_data) self.assertEqual(value_data.size(), n) def test_basic_vector_set_get(self): value = BasicVector(np.arange(3., 5.)) self.assertEqual(value.GetAtIndex(index=1), 4.) value.SetAtIndex(index=1, value=5.) self.assertEqual(value[1], 5.) value[1] = 6. self.assertEqual(value[1], 6.) def assert_basic_vector_equal(self, a, b): self.assertIs(type(a), type(b)) self.assertIsNot(a, b) np.testing.assert_equal(a.get_value(), b.get_value()) def test_str_and_repr(self): # T=float self.assertIs(BasicVector, BasicVector_[float]) vector_f = [1.] value_f = BasicVector_[float](vector_f) self.assertEqual(str(value_f), "[1.0]") self.assertEqual(repr(value_f), "BasicVector([1.0])") # Check repr() invariant. self.assert_basic_vector_equal(value_f, eval(repr(value_f))) # - Empty. value_f_empty = BasicVector_[float]([]) self.assertEqual(str(value_f_empty), "[]") self.assertEqual(repr(value_f_empty), "BasicVector([])") # - Multiple values. value_f_multi = BasicVector_[float]([1., 2.]) self.assertEqual(str(value_f_multi), "[1.0, 2.0]") self.assertEqual(repr(value_f_multi), "BasicVector([1.0, 2.0])") # TODO(eric.cousineau): Make repr() for AutoDiffXd and Expression be # semi-usable. # T=AutoDiffXd value_ad = BasicVector_[AutoDiffXd](vector_f) self.assertEqual(str(value_ad), "[<AutoDiffXd 1.0 nderiv=0>]") self.assertEqual( repr(value_ad), "BasicVector_[AutoDiffXd]([<AutoDiffXd 1.0 nderiv=0>])") # T=Expression value_sym = BasicVector_[Expression](vector_f) self.assertEqual(str(value_sym), "[<Expression \"1\">]") self.assertEqual( repr(value_sym), "BasicVector_[Expression]([<Expression \"1\">])") @numpy_compare.check_all_types def test_value_registration(self, T): Value[BasicVector_[T]] def test_parameters_api(self): def compare(actual, expected): self.assertEqual(type(actual), type(expected)) if isinstance(actual, VectorBase): self.assertTrue( np.allclose(actual.get_value(), expected.get_value())) else: self.assertEqual(actual.get_value(), expected.get_value()) model_numeric = BasicVector([0.]) model_abstract = Value("Hello") params = Parameters( numeric=[model_numeric.Clone()], abstract=[model_abstract.Clone()]) self.assertEqual(params.num_numeric_parameter_groups(), 1) self.assertEqual(params.num_abstract_parameters(), 1) # Numeric. compare(params.get_numeric_parameter(index=0), model_numeric) compare(params.get_mutable_numeric_parameter(index=0), model_numeric) # WARNING: This will invalidate old references! params.set_numeric_parameters(params.get_numeric_parameters().Clone()) # Abstract. compare(params.get_abstract_parameter(index=0), model_abstract) compare(params.get_mutable_abstract_parameter(index=0), model_abstract) # WARNING: This will invalidate old references! params.set_abstract_parameters( params.get_abstract_parameters().Clone()) # WARNING: This may invalidate old references! params.SetFrom(copy.deepcopy(params)) # Test alternative constructors. ctor_test = [ Parameters(), Parameters(numeric=[model_numeric.Clone()]), Parameters(abstract=[model_abstract.Clone()]), Parameters( numeric=[model_numeric.Clone()], abstract=[model_abstract.Clone()]), Parameters(vec=model_numeric.Clone()), Parameters(value=model_abstract.Clone()), ]
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/primitives_test.py
import gc import unittest import numpy as np from pydrake.autodiffutils import AutoDiffXd from pydrake.common import RandomDistribution, RandomGenerator from pydrake.common.test_utilities import numpy_compare from pydrake.common.value import Value from pydrake.symbolic import Expression, Variable from pydrake.systems.framework import ( BasicVector, DiagramBuilder, DiagramBuilder_, InputPort, TriggerType, VectorBase, ) from pydrake.systems.test.test_util import ( MyVector2, ) from pydrake.systems.primitives import ( Adder, Adder_, AddRandomInputs, AffineSystem, AffineSystem_, ConstantValueSource, ConstantValueSource_, ConstantVectorSource, ConstantVectorSource_, ControllabilityMatrix, Demultiplexer, Demultiplexer_, DiscreteDerivative, DiscreteDerivative_, DiscreteTimeDelay, DiscreteTimeDelay_, DiscreteTimeIntegrator_, FirstOrderLowPassFilter, FirstOrderTaylorApproximation, Gain, Gain_, Integrator, Integrator_, IsControllable, IsDetectable, IsObservable, IsStabilizable, Linearize, LinearSystem, LinearSystem_, LinearTransformDensity, LinearTransformDensity_, LogVectorOutput, MatrixGain, Multiplexer, Multiplexer_, MultilayerPerceptron, MultilayerPerceptron_, ObservabilityMatrix, PassThrough, PassThrough_, PerceptronActivationType, PortSwitch, PortSwitch_, RandomSource, Saturation, Saturation_, SharedPointerSystem, SharedPointerSystem_, Sine, Sine_, StateInterpolatorWithDiscreteDerivative, StateInterpolatorWithDiscreteDerivative_, SymbolicVectorSystem, SymbolicVectorSystem_, TrajectoryAffineSystem, TrajectoryAffineSystem_, TrajectoryLinearSystem, TrajectoryLinearSystem_, TrajectorySource, TrajectorySource_, VectorLog, VectorLogSink, VectorLogSink_, WrapToSystem, WrapToSystem_, ZeroOrderHold, ZeroOrderHold_, ) from pydrake.trajectories import PiecewisePolynomial def compare_value(test, a, b): # Compares a vector or abstract value. if isinstance(a, VectorBase): test.assertTrue(np.allclose(a.get_value(), b.get_value())) else: test.assertEqual(type(a.get_value()), type(b.get_value())) test.assertEqual(a.get_value(), b.get_value()) class TestGeneral(unittest.TestCase): def _check_instantiations(self, template, supports_symbolic=True): default_cls = template[None] self.assertTrue(template[float] is default_cls) self.assertTrue(template[AutoDiffXd] is not default_cls) if supports_symbolic: self.assertTrue(template[Expression] is not default_cls) def test_instantiations(self): # TODO(eric.cousineau): Refine tests once NumPy functionality is # resolved for dtype=object, or dtype=custom is used. self._check_instantiations(Adder_) self._check_instantiations(AffineSystem_) self._check_instantiations(ConstantValueSource_) self._check_instantiations(ConstantVectorSource_) self._check_instantiations(Demultiplexer_) self._check_instantiations(DiscreteDerivative_) self._check_instantiations(DiscreteTimeDelay_) self._check_instantiations(DiscreteTimeIntegrator_) self._check_instantiations(Gain_) self._check_instantiations(Integrator_) self._check_instantiations(LinearSystem_) self._check_instantiations(LinearTransformDensity_, supports_symbolic=False) self._check_instantiations(Multiplexer_) self._check_instantiations(MultilayerPerceptron_) self._check_instantiations(PassThrough_) self._check_instantiations(PortSwitch_) self._check_instantiations(Saturation_) self._check_instantiations(SharedPointerSystem_) self._check_instantiations(Sine_) self._check_instantiations(StateInterpolatorWithDiscreteDerivative_) self._check_instantiations(SymbolicVectorSystem_) self._check_instantiations(TrajectoryAffineSystem_, supports_symbolic=False) self._check_instantiations(TrajectoryLinearSystem_, supports_symbolic=False) self._check_instantiations(TrajectorySource_) self._check_instantiations(VectorLogSink_) self._check_instantiations(WrapToSystem_) self._check_instantiations(ZeroOrderHold_) @numpy_compare.check_all_types def test_discrete_time_integrator(self, T): time_step = 0.1 integrator = DiscreteTimeIntegrator_[T](size=2, time_step=time_step) self.assertEqual(integrator.time_step(), time_step) context = integrator.CreateDefaultContext() x = np.array([1., 2.]) integrator.set_integral_value(context=context, value=x) u = np.array([3., 4.]) integrator.get_input_port(0).FixValue(context, u) x_next = integrator.EvalUniquePeriodicDiscreteUpdate( context).get_vector()._get_value_copy() numpy_compare.assert_float_equal(x_next, x + time_step * u) def test_linear_affine_system(self): # Just make sure linear system is spelled correctly. A = np.identity(2) B = np.array([[0], [1]]) f0 = np.array([[0], [0]]) C = np.array([[0, 1]]) D = [1] y0 = [0] system = LinearSystem(A, B, C, D) context = system.CreateDefaultContext() self.assertEqual(system.get_input_port(0).size(), 1) self.assertEqual(context .get_mutable_continuous_state_vector().size(), 2) self.assertEqual(system.get_output_port(0).size(), 1) self.assertTrue((system.A() == A).all()) self.assertTrue((system.B() == B).all()) self.assertTrue((system.f0() == f0).all()) self.assertTrue((system.C() == C).all()) self.assertEqual(system.D(), D) self.assertEqual(system.y0(), y0) self.assertEqual(system.time_period(), 0.) x0 = np.array([1, 2]) system.configure_default_state(x0=x0) system.SetDefaultContext(context) np.testing.assert_equal( context.get_continuous_state_vector().CopyToVector(), x0) generator = RandomGenerator() system.SetRandomContext(context, generator) np.testing.assert_equal( context.get_continuous_state_vector().CopyToVector(), x0) system.configure_random_state(covariance=np.eye(2)) system.SetRandomContext(context, generator) self.assertNotEqual( context.get_continuous_state_vector().CopyToVector()[1], x0[1]) Co = ControllabilityMatrix(system) self.assertEqual(Co.shape, (2, 2)) self.assertFalse(IsControllable(system)) self.assertFalse(IsControllable(system, 1e-6)) self.assertFalse(IsStabilizable(sys=system)) self.assertFalse(IsStabilizable(sys=system, threshold=1e-6)) Ob = ObservabilityMatrix(system) self.assertEqual(Ob.shape, (2, 2)) self.assertFalse(IsObservable(system)) self.assertFalse(IsDetectable(sys=system)) self.assertFalse(IsDetectable(sys=system, threshold=1e-6)) system = AffineSystem(A, B, f0, C, D, y0, .1) self.assertEqual(system.get_input_port(0), system.get_input_port()) self.assertEqual(system.get_output_port(0), system.get_output_port()) context = system.CreateDefaultContext() self.assertEqual(system.get_input_port(0).size(), 1) self.assertEqual(context.get_discrete_state_vector().size(), 2) self.assertEqual(system.get_output_port(0).size(), 1) self.assertTrue((system.A() == A).all()) self.assertTrue((system.B() == B).all()) self.assertTrue((system.f0() == f0).all()) self.assertTrue((system.C() == C).all()) self.assertEqual(system.D(), D) self.assertEqual(system.y0(), y0) self.assertEqual(system.time_period(), .1) system.get_input_port(0).FixValue(context, 0) linearized = Linearize(system, context) self.assertTrue((linearized.A() == A).all()) taylor = FirstOrderTaylorApproximation(system, context) self.assertTrue((taylor.y0() == y0).all()) new_A = np.array([[1, 2], [3, 4]]) new_B = np.array([[5], [6]]) new_f0 = np.array([[7], [8]]) new_C = np.array([[9, 10]]) new_D = np.array([[11]]) new_y0 = np.array([12]) system.UpdateCoefficients( A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0 ) np.testing.assert_equal(new_A, system.A()) np.testing.assert_equal(new_B, system.B()) np.testing.assert_equal(new_f0.flatten(), system.f0()) np.testing.assert_equal(new_C, system.C()) np.testing.assert_equal(new_D, system.D()) np.testing.assert_equal(new_y0, system.y0()) system = MatrixGain(D=A) self.assertTrue((system.D() == A).all()) system = TrajectoryAffineSystem( PiecewisePolynomial(A), PiecewisePolynomial(B), PiecewisePolynomial(f0), PiecewisePolynomial(C), PiecewisePolynomial(D), PiecewisePolynomial(y0), .1) self.assertEqual(system.get_input_port(0), system.get_input_port()) self.assertEqual(system.get_output_port(0), system.get_output_port()) context = system.CreateDefaultContext() self.assertEqual(system.get_input_port(0).size(), 1) self.assertEqual(context.get_discrete_state_vector().size(), 2) self.assertEqual(system.get_output_port(0).size(), 1) for t in np.linspace(0., 1., 5): self.assertTrue((system.A(t) == A).all()) self.assertTrue((system.B(t) == B).all()) self.assertTrue((system.f0(t) == f0).all()) self.assertTrue((system.C(t) == C).all()) self.assertEqual(system.D(t), D) self.assertEqual(system.y0(t), y0) self.assertEqual(system.time_period(), .1) x0 = np.array([1, 2]) system.configure_default_state(x0=x0) system.SetDefaultContext(context) np.testing.assert_equal( context.get_discrete_state_vector().CopyToVector(), x0) generator = RandomGenerator() system.SetRandomContext(context, generator) np.testing.assert_equal( context.get_discrete_state_vector().CopyToVector(), x0) system.configure_random_state(covariance=np.eye(2)) system.SetRandomContext(context, generator) self.assertNotEqual( context.get_discrete_state_vector().CopyToVector()[1], x0[1]) system = TrajectoryLinearSystem( A=PiecewisePolynomial(A), B=PiecewisePolynomial(B), C=PiecewisePolynomial(C), D=PiecewisePolynomial(D), time_period=0.1) self.assertEqual(system.time_period(), .1) system.configure_default_state(x0=np.array([1, 2])) system.configure_random_state(covariance=np.eye(2)) def test_linear_affine_system_empty_matrices(self): # Confirm the default values for the system matrices in the # constructor. def CheckSizes(system, num_states, num_inputs, num_outputs): self.assertEqual(system.num_continuous_states(), num_states) self.assertEqual(system.num_inputs(), num_inputs) self.assertEqual(system.num_outputs(), num_outputs) # A constant vector system. system = AffineSystem(y0=[2, 1]) CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2) # A matrix gain. system = AffineSystem(D=np.eye(2)) CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2) system = LinearSystem(D=np.eye(2)) CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2) # Add an offset. system = AffineSystem(D=np.eye(2), y0=[1, 2]) CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2) # An integrator. system = LinearSystem(B=np.eye(2)) CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0) def test_linear_system_zero_size(self): # Explicitly test #12633. num_x = 0 num_y = 2 num_u = 2 A = np.zeros((num_x, num_x)) B = np.zeros((num_x, num_u)) C = np.zeros((num_y, num_x)) D = np.zeros((num_y, num_u)) self.assertIsNotNone(LinearSystem(A, B, C, D)) @numpy_compare.check_nonsymbolic_types def test_linear_transform_density(self, T): dut = LinearTransformDensity_[T]( distribution=RandomDistribution.kGaussian, input_size=3, output_size=3) w_in = np.array([T(0.5), T(0.1), T(1.5)]) context = dut.CreateDefaultContext() dut.get_input_port_w_in().FixValue(context, w_in) self.assertEqual(dut.get_input_port_A().size(), 9) self.assertEqual(dut.get_input_port_b().size(), 3) self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian) A = np.array([ [T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]]) dut.FixConstantA(context=context, A=A) b = np.array([T(1), T(2), T(3)]) dut.FixConstantB(context=context, b=b) dut.CalcDensity(context=context) self.assertEqual(dut.get_output_port_w_out().size(), 3) self.assertEqual(dut.get_output_port_w_out_density().size(), 1) def test_vector_pass_through(self): model_value = BasicVector([1., 2, 3]) system = PassThrough(vector_size=model_value.size()) context = system.CreateDefaultContext() system.get_input_port(0).FixValue(context, model_value) output = system.AllocateOutput() input_eval = system.EvalVectorInput(context, 0) compare_value(self, input_eval, model_value) system.CalcOutput(context, output) output_value = output.get_vector_data(0) compare_value(self, output_value, model_value) def test_default_vector_pass_through(self): model_value = [1., 2, 3] system = PassThrough(value=model_value) context = system.CreateDefaultContext() np.testing.assert_array_equal( model_value, system.get_output_port().Eval(context)) def test_abstract_pass_through(self): model_value = Value("Hello world") system = PassThrough(abstract_model_value=model_value) context = system.CreateDefaultContext() system.get_input_port(0).FixValue(context, model_value) output = system.AllocateOutput() input_eval = system.EvalAbstractInput(context, 0) compare_value(self, input_eval, model_value) system.CalcOutput(context, output) output_value = output.get_data(0) compare_value(self, output_value, model_value) def test_port_switch(self): system = PortSwitch(vector_size=2) a = system.DeclareInputPort(name="a") system.DeclareInputPort(name="b") context = system.CreateDefaultContext() self.assertIsInstance(a, InputPort) system.get_port_selector_input_port().FixValue(context, a.get_index()) def test_first_order_low_pass_filter(self): filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4) self.assertEqual(filter1.get_time_constant(), 3.0) alpha = np.array([1, 2, 3]) filter2 = FirstOrderLowPassFilter(time_constants=alpha) np.testing.assert_array_equal(filter2.get_time_constants_vector(), alpha) context = filter2.CreateDefaultContext() filter2.set_initial_output_value(context, [0., -0.2, 0.4]) def test_gain(self): k = 42. input_size = 10 systems = [Gain(k=k, size=input_size), Gain(k=k*np.ones(input_size))] for system in systems: context = system.CreateDefaultContext() output = system.AllocateOutput() def mytest(input, expected): system.get_input_port(0).FixValue(context, input) system.CalcOutput(context, output) self.assertTrue(np.allclose(output.get_vector_data( 0).CopyToVector(), expected)) test_input = np.arange(input_size) mytest(np.arange(input_size), k*np.arange(input_size)) def test_saturation(self): system = Saturation((0., -1., 3.), (1., 2., 4.)) context = system.CreateDefaultContext() output = system.AllocateOutput() def mytest(input, expected): system.get_input_port(0).FixValue(context, input) system.CalcOutput(context, output) self.assertTrue(np.allclose(output.get_vector_data( 0).CopyToVector(), expected)) mytest((-5., 5., 4.), (0., 2., 4.)) mytest((.4, 0., 3.5), (.4, 0., 3.5)) def test_trajectory_source(self): ppt = PiecewisePolynomial.FirstOrderHold( [0., 1.], [[2., 3.], [2., 1.]]) system = TrajectorySource(trajectory=ppt, output_derivative_order=0, zero_derivatives_beyond_limits=True) context = system.CreateDefaultContext() output = system.AllocateOutput() def mytest(input, expected): context.SetTime(input) system.CalcOutput(context, output) self.assertTrue(np.allclose(output.get_vector_data( 0).CopyToVector(), expected)) mytest(0.0, (2.0, 2.0)) mytest(0.5, (2.5, 1.5)) mytest(1.0, (3.0, 1.0)) ppt2 = PiecewisePolynomial.FirstOrderHold( [0., 1.], [[4., 6.], [4., 2.]]) system.UpdateTrajectory(trajectory=ppt2) mytest(0.0, (4.0, 4.0)) mytest(0.5, (5.0, 3.0)) mytest(1.0, (6.0, 2.0)) def test_symbolic_vector_system(self): t = Variable("t") x = [Variable("x0"), Variable("x1")] u = [Variable("u0"), Variable("u1")] system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x[0] + x[1], t], output=[u[1]], time_period=0.0) context = system.CreateDefaultContext() self.assertEqual(context.num_continuous_states(), 2) self.assertEqual(context.num_discrete_state_groups(), 0) self.assertEqual(system.get_input_port(0).size(), 2) self.assertEqual(system.get_output_port(0).size(), 1) self.assertEqual(context.num_abstract_parameters(), 0) self.assertEqual(context.num_numeric_parameter_groups(), 0) self.assertTrue(system.dynamics_for_variable(x[0]) .EqualTo(x[0] + x[1])) self.assertTrue(system.dynamics_for_variable(x[1]) .EqualTo(t)) def test_symbolic_vector_system_parameters(self): t = Variable("t") x = [Variable("x0"), Variable("x1")] u = [Variable("u0"), Variable("u1")] p = [Variable("p0"), Variable("p1")] system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p, dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]], time_period=0.0) context = system.CreateDefaultContext() self.assertEqual(context.num_continuous_states(), 2) self.assertEqual(context.num_discrete_state_groups(), 0) self.assertEqual(system.get_input_port(0).size(), 2) self.assertEqual(system.get_output_port(0).size(), 1) self.assertEqual(context.num_abstract_parameters(), 0) self.assertEqual(context.num_numeric_parameter_groups(), 1) self.assertEqual(context.get_numeric_parameter(0).size(), 2) self.assertTrue(system.dynamics_for_variable(x[0]) .EqualTo(p[0] * x[0] + x[1] + p[1])) self.assertTrue(system.dynamics_for_variable(x[1]) .EqualTo(t)) def test_wrap_to_system(self): system = WrapToSystem(2) system.set_interval(1, 1., 2.) context = system.CreateDefaultContext() output = system.AllocateOutput() def mytest(input, expected): system.get_input_port(0).FixValue(context, input) system.CalcOutput(context, output) self.assertTrue(np.allclose(output.get_vector_data( 0).CopyToVector(), expected)) mytest((-1.5, 0.5), (-1.5, 1.5)) mytest((.2, .3), (.2, 1.3)) def test_demultiplexer(self): # Test demultiplexer with scalar outputs. demux = Demultiplexer(size=4) context = demux.CreateDefaultContext() self.assertEqual(demux.num_input_ports(), 1) self.assertEqual(demux.num_output_ports(), 4) numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]) input_vec = np.array([1., 2., 3., 4.]) demux.get_input_port(0).FixValue(context, input_vec) output = demux.AllocateOutput() demux.CalcOutput(context, output) for i in range(4): self.assertTrue( np.allclose(output.get_vector_data(i).get_value(), input_vec[i])) # Test demultiplexer with vector outputs. demux = Demultiplexer(size=4, output_ports_size=2) context = demux.CreateDefaultContext() self.assertEqual(demux.num_input_ports(), 1) self.assertEqual(demux.num_output_ports(), 2) numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2]) demux.get_input_port(0).FixValue(context, input_vec) output = demux.AllocateOutput() demux.CalcOutput(context, output) for i in range(2): self.assertTrue( np.allclose(output.get_vector_data(i).get_value(), input_vec[2*i:2*i+2])) # Test demultiplexer with different output port sizes. output_ports_sizes = np.array([1, 2, 1]) num_output_ports = output_ports_sizes.size input_vec = np.array([1., 2., 3., 4.]) demux = Demultiplexer(output_ports_sizes=output_ports_sizes) context = demux.CreateDefaultContext() self.assertEqual(demux.num_input_ports(), 1) self.assertEqual(demux.num_output_ports(), num_output_ports) numpy_compare.assert_equal(demux.get_output_ports_sizes(), output_ports_sizes) demux.get_input_port(0).FixValue(context, input_vec) output = demux.AllocateOutput() demux.CalcOutput(context, output) output_port_start = 0 for i in range(num_output_ports): output_port_size = output.get_vector_data(i).size() self.assertTrue( np.allclose(output.get_vector_data(i).get_value(), input_vec[output_port_start: output_port_start+output_port_size])) output_port_start += output_port_size def test_multiplexer(self): my_vector = MyVector2(data=[1., 2.]) test_cases = [ dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4), data=[[5.], [3.], [4.], [2.]]), dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]), data=[[8., 4.], [3., 6., 9.]]), dict(has_vector=True, mux=Multiplexer(model_vector=my_vector), data=[[42.], [3.]]), ] for case in test_cases: mux = case['mux'] port_size = sum([len(vec) for vec in case['data']]) self.assertEqual(mux.get_output_port(0).size(), port_size) context = mux.CreateDefaultContext() output = mux.AllocateOutput() num_ports = len(case['data']) self.assertEqual(context.num_input_ports(), num_ports) for j, vec in enumerate(case['data']): mux.get_input_port(j).FixValue(context, vec) mux.CalcOutput(context, output) self.assertTrue( np.allclose(output.get_vector_data(0).get_value(), [elem for vec in case['data'] for elem in vec])) if case['has_vector']: # Check the type matches MyVector2. value = output.get_vector_data(0) self.assertTrue(isinstance(value, MyVector2)) def test_multilayer_perceptron(self): mlp = MultilayerPerceptron( layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU) self.assertEqual(mlp.get_input_port().size(), 1) self.assertEqual(mlp.get_output_port().size(), 3) context = mlp.CreateDefaultContext() params = np.zeros((mlp.num_parameters(), 1)) self.assertEqual(mlp.num_parameters(), 13) self.assertEqual(mlp.layers(), [1, 2, 3]) self.assertEqual(mlp.activation_type(layer=0), PerceptronActivationType.kReLU) self.assertEqual(len(mlp.GetParameters(context=context)), mlp.num_parameters()) mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]])) mlp.SetBiases(context=context, layer=0, b=[3, 4]) np.testing.assert_array_equal( mlp.GetWeights(context=context, layer=0), np.array([[1], [2]])) np.testing.assert_array_equal( mlp.GetBiases(context=context, layer=0), np.array([3, 4])) params = np.zeros(mlp.num_parameters()) mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]])) mlp.SetBiases(params=params, layer=0, b=[3, 4]) np.testing.assert_array_equal( mlp.GetWeights(params=params, layer=0), np.array([[1], [2]])) np.testing.assert_array_equal( mlp.GetBiases(params=params, layer=0), np.array([3, 4])) mutable_params = mlp.GetMutableParameters(context=context) mutable_params[:] = 3.0 np.testing.assert_array_equal(mlp.GetParameters(context), np.full(mlp.num_parameters(), 3.0)) global called_loss called_loss = False def silly_loss(Y, dloss_dY): global called_loss called_loss = True # We must be careful to update the dloss in place, rather than bind # a new matrix to the same variable name. dloss_dY[:] = 1 # dloss_dY = np.array(...etc...) # <== wrong return Y.sum() dloss_dparams = np.zeros((13,)) generator = RandomGenerator(23) mlp.SetRandomContext(context, generator) mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape((1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams) self.assertTrue(called_loss) self.assertTrue(dloss_dparams.any()) # No longer all zero. dloss_dparams = np.zeros((13,)) mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1, 3, 4]).reshape( (1, 3)), Y_desired=np.eye(3), dloss_dparams=dloss_dparams) self.assertTrue(dloss_dparams.any()) # No longer all zero. Y = np.asfortranarray(np.eye(3)) mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y) self.assertFalse(np.allclose(Y, np.eye(3))) Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]])) np.testing.assert_array_equal(Y, Y2) mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[ PerceptronActivationType.kReLU, PerceptronActivationType.kTanh ]) self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.kReLU) self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.kTanh) Y = np.asfortranarray(np.full((1, 3), 2.4)) dYdX = np.asfortranarray(np.full((3, 3), 5.3)) context2 = mlp2.CreateDefaultContext() mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX) # The default context sets the weights and biases to zero, so the # output (and gradients) should be zero. np.testing.assert_array_almost_equal(Y, np.zeros((1, 3))) np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3))) mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False], remaining_layers=[3, 2], activation_types=[ PerceptronActivationType.kReLU, PerceptronActivationType.kTanh ]) self.assertEqual(mlp.get_input_port().size(), 2) np.testing.assert_array_equal(mlp.layers(), [3, 3, 2]) def test_random_source(self): source = RandomSource(distribution=RandomDistribution.kUniform, num_outputs=2, sampling_interval_sec=0.01) self.assertEqual(source.get_output_port(0).size(), 2) builder = DiagramBuilder() # Note: There are no random inputs to add to the empty diagram, but it # confirms the API works. AddRandomInputs(sampling_interval_sec=0.01, builder=builder) builder_ad = DiagramBuilder_[AutoDiffXd]() AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad) def test_constant_vector_source(self): source = ConstantVectorSource(source_value=[1., 2.]) context = source.CreateDefaultContext() source.get_source_value(context) source.get_mutable_source_value(context) def test_ctor_api(self): """Tests construction of systems for systems whose executions semantics are not tested above. """ ConstantValueSource(Value("Hello world")) DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2) DiscreteTimeDelay( update_sec=0.1, delay_time_steps=5, abstract_model_value=Value("Hello world")) ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2) dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25, abstract_model_value=Value("Hello world")) self.assertEqual(dut.period(), 1.0) self.assertEqual(dut.offset(), 0.25) def test_shared_pointer_system_ctor(self): dut = SharedPointerSystem(value_to_hold=[1, 2, 3]) readback = dut.get() self.assertListEqual(readback, [1, 2, 3]) del dut self.assertListEqual(readback, [1, 2, 3]) def test_shared_pointer_system_builder(self): builder = DiagramBuilder() self.assertListEqual( SharedPointerSystem.AddToBuilder( builder=builder, value_to_hold=[1, 2, 3]), [1, 2, 3]) diagram = builder.Build() del builder readback = diagram.GetSystems()[0].get() self.assertListEqual(readback, [1, 2, 3]) del diagram self.assertListEqual(readback, [1, 2, 3]) def test_sine(self): # Test scalar output. sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1, is_time_based=True) self.assertEqual(sine_source.get_output_port(0).size(), 1) self.assertEqual(sine_source.get_output_port(1).size(), 1) self.assertEqual(sine_source.get_output_port(2).size(), 1) # Test vector output. sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3, is_time_based=True) self.assertEqual(sine_source.get_output_port(0).size(), 3) self.assertEqual(sine_source.get_output_port(1).size(), 3) self.assertEqual(sine_source.get_output_port(2).size(), 3) sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2), phases=np.ones(2), is_time_based=True) self.assertEqual(sine_source.get_output_port(0).size(), 2) self.assertEqual(sine_source.get_output_port(1).size(), 2) self.assertEqual(sine_source.get_output_port(2).size(), 2) def test_discrete_derivative(self): discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5) self.assertEqual(discrete_derivative.get_input_port(0).size(), 5) self.assertEqual(discrete_derivative.get_output_port(0).size(), 5) self.assertEqual(discrete_derivative.time_step(), 0.5) self.assertTrue(discrete_derivative.suppress_initial_transient()) discrete_derivative = DiscreteDerivative( num_inputs=5, time_step=0.5, suppress_initial_transient=False) self.assertFalse(discrete_derivative.suppress_initial_transient()) def test_state_interpolator_with_discrete_derivative(self): state_interpolator = StateInterpolatorWithDiscreteDerivative( num_positions=5, time_step=0.4) self.assertEqual(state_interpolator.get_input_port(0).size(), 5) self.assertEqual(state_interpolator.get_output_port(0).size(), 10) self.assertTrue(state_interpolator.suppress_initial_transient()) # test set_initial_position using context context = state_interpolator.CreateDefaultContext() state_interpolator.set_initial_position( context=context, position=5*[1.1]) np.testing.assert_array_equal( context.get_discrete_state(0).CopyToVector(), np.array(5*[1.1])) np.testing.assert_array_equal( context.get_discrete_state(1).CopyToVector(), np.array(5*[1.1])) # test set_initial_position using state context = state_interpolator.CreateDefaultContext() state_interpolator.set_initial_position( state=context.get_state(), position=5*[1.3]) np.testing.assert_array_equal( context.get_discrete_state(0).CopyToVector(), np.array(5*[1.3])) np.testing.assert_array_equal( context.get_discrete_state(1).CopyToVector(), np.array(5*[1.3])) state_interpolator = StateInterpolatorWithDiscreteDerivative( num_positions=5, time_step=0.4, suppress_initial_transient=True) self.assertTrue(state_interpolator.suppress_initial_transient()) @numpy_compare.check_nonsymbolic_types def test_log_vector_output(self, T): # Add various redundant loggers to a system, to exercise the # LogVectorOutput bindings. builder = DiagramBuilder_[T]() kSize = 1 integrator = builder.AddSystem(Integrator_[T](kSize)) port = integrator.get_output_port(0) loggers = [] loggers.append(LogVectorOutput(port, builder)) loggers.append(LogVectorOutput(src=port, builder=builder)) loggers.append(LogVectorOutput(port, builder, 0.125)) loggers.append(LogVectorOutput( src=port, builder=builder, publish_period=0.125)) loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced})) loggers.append(LogVectorOutput( src=port, builder=builder, publish_triggers={TriggerType.kForced})) loggers.append(LogVectorOutput( port, builder, {TriggerType.kPeriodic}, 0.125)) loggers.append(LogVectorOutput( src=port, builder=builder, publish_triggers={TriggerType.kPeriodic}, publish_period=0.125)) # Check the returned loggers by calling some trivial methods. diagram = builder.Build() context = diagram.CreateDefaultContext() self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for logger in loggers)) @numpy_compare.check_nonsymbolic_types def test_vector_log(self, T): kSize = 1 dut = VectorLog(kSize) self.assertEqual(dut.get_input_size(), kSize) dut.AddData(0.1, [22.22]) self.assertEqual(dut.num_samples(), 1) self.assertEqual(dut.sample_times(), [0.1]) self.assertEqual(dut.data(), [22.22]) dut.Clear() self.assertEqual(dut.num_samples(), 0) # There is no good way from python to test the semantics of Reserve(), # but test the binding anyway. dut.Reserve(VectorLog.kDefaultCapacity * 3) @numpy_compare.check_nonsymbolic_types def test_vector_log_sink(self, T): # Add various redundant loggers to a system, to exercise the # VectorLog constructor bindings. builder = DiagramBuilder_[T]() kSize = 1 constructors = [VectorLogSink_[T]] loggers = [] if T == float: constructors.append(VectorLogSink) for constructor in constructors: loggers.append(builder.AddSystem(constructor(kSize))) loggers.append(builder.AddSystem(constructor(input_size=kSize))) loggers.append(builder.AddSystem(constructor(kSize, 0.125))) loggers.append(builder.AddSystem( constructor(input_size=kSize, publish_period=0.125))) loggers.append(builder.AddSystem( constructor(kSize, {TriggerType.kForced}))) loggers.append(builder.AddSystem( constructor(input_size=kSize, publish_triggers={TriggerType.kForced}))) loggers.append(builder.AddSystem( constructor(kSize, {TriggerType.kPeriodic}, 0.125))) loggers.append(builder.AddSystem( constructor(input_size=kSize, publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))) # Exercise all of the log access methods. diagram = builder.Build() context = diagram.CreateDefaultContext() # FindLog and FindMutableLog find the same object. self.assertTrue( all(logger.FindLog(context) == logger.FindMutableLog(context) for logger in loggers)) # Build a list of pairs of loggers and their local contexts. loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in loggers] # GetLog and GetMutableLog find the same object. self.assertTrue( all(logger.GetLog(logger_context) == logger.GetMutableLog(logger_context) for logger, logger_context in loggers_and_contexts)) # GetLog and FindLog find the same object, given the proper contexts. self.assertTrue( all(logger.GetLog(logger_context) == logger.FindLog(context) for logger, logger_context in loggers_and_contexts))
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/rendering_test.py
# -*- coding: utf-8 -*- from pydrake.systems.rendering import ( MultibodyPositionToGeometryPose, ) import copy import unittest import numpy as np from pydrake.common import FindResourceOrThrow from pydrake.common.value import AbstractValue from pydrake.geometry import SceneGraph from pydrake.math import RigidTransform from pydrake.multibody.plant import MultibodyPlant from pydrake.multibody.math import ( SpatialVelocity, ) from pydrake.multibody.parsing import Parser from pydrake.systems.framework import ( BasicVector, PortDataType, ) from pydrake.common.eigen_geometry import Quaternion def normalized(x): return x / np.linalg.norm(x) class TestRendering(unittest.TestCase): def testMultibodyPositionToGeometryPose(self): file_name = FindResourceOrThrow( "drake/multibody/benchmarks/acrobot/acrobot.sdf") plant = MultibodyPlant(time_step=0.01) Parser(plant).AddModels(file_name) scene_graph = SceneGraph() plant.RegisterAsSourceForSceneGraph(scene_graph) plant.Finalize() to_pose = MultibodyPositionToGeometryPose( plant=plant, input_multibody_state=False) # Check the size of the input. self.assertEqual(to_pose.get_input_port().size(), 2) # Just check the spelling of the output port (size is not meaningful # for Abstract-valued ports). to_pose.get_output_port()
0
/home/johnshepherd/drake/bindings/pydrake/systems
/home/johnshepherd/drake/bindings/pydrake/systems/test/lcm_test.py
""" Test bindings of LCM integration with the Systems framework. """ import pydrake.systems.lcm as mut import collections import time import unittest import numpy as np from drake import lcmt_header, lcmt_quaternion import drake as drake_lcmtypes from pydrake.common.value import Value from pydrake.lcm import DrakeLcm, DrakeLcmParams, Subscriber from pydrake.systems.analysis import Simulator from pydrake.systems.framework import ( BasicVector, DiagramBuilder, LeafSystem, TriggerType, ) from pydrake.systems.primitives import ConstantVectorSource # TODO(eric.cousieau): Move this to more generic code when another piece of # code uses it. def lcm_to_json(message): def helper(thing): if type(thing) in (int, float, np.float64, str): return thing if type(thing) in (list,): return list(map(helper, [x for x in thing])) result = collections.OrderedDict() for field in thing.__slots__: value = getattr(thing, field) result[field] = helper(value) return result return helper(message) class TestSystemsLcm(unittest.TestCase): def _model_message(self): message = lcmt_quaternion() message.w, message.x, message.y, message.z = (1, 2, 3, 4) return message def _model_value_cpp(self): serializer = mut._Serializer_[lcmt_quaternion]() model_message = self._model_message() model_value = serializer.CreateDefaultValue() serializer.Deserialize(model_message.encode(), model_value) return model_value def _cpp_value_to_py_message(self, value): serializer = mut._Serializer_[lcmt_quaternion]() raw = serializer.Serialize(value) return lcmt_quaternion.decode(raw) def assert_lcm_equal(self, actual, expected): self.assertIsInstance(actual, type(expected)) self.assertDictEqual(lcm_to_json(actual), lcm_to_json(expected)) def assert_lcm_not_equal(self, actual, expected): self.assertIsInstance(actual, type(expected)) self.assertNotEqual(lcm_to_json(actual), lcm_to_json(expected)) def test_serializer(self): dut = mut.PySerializer(lcmt_quaternion) self.assertEqual(repr(dut), "PySerializer(lcmt_quaternion)") model_message = self._model_message() value = dut.CreateDefaultValue() self.assert_lcm_not_equal(value.get_value(), model_message) # Check deserialization. dut.Deserialize(model_message.encode(), value) self.assert_lcm_equal(value.get_value(), model_message) # Check serialization. raw = dut.Serialize(value) reconstruct = lcmt_quaternion.decode(raw) self.assert_lcm_equal(reconstruct, model_message) def test_serializer_cpp(self): # Tests relevant portions of API. model_message = self._model_message() model_value = self._model_value_cpp() self.assert_lcm_equal( self._cpp_value_to_py_message(model_value), model_message) def test_all_serializers_exist(self): """Checks that all of Drake's Python LCM messages have a matching C++ serializer bound for use by LcmPublisherSystem. """ # The drake_lcm_py_library() in drake/lcmtypes/BUILD.bazel generates # a module __init__.py that enumerates all Drake Python LCM messages. # Fetch that module's list of message classes. all_message_classes = [ getattr(drake_lcmtypes, name) for name in dir(drake_lcmtypes) if any([name.startswith("lcmt_"), name.startswith("experimental_lcmt_")]) ] self.assertGreater(len(all_message_classes), 1) # Confirm that each message class is partnered with the definition of a # C++ serializer in lcm_py_bind_cpp_serializers.cc. lcm = DrakeLcm() for message_class in all_message_classes: # Check that the Python message class is a valid template value. serializer = mut._Serializer_[message_class] self.assertIsNotNone(serializer) # Confirm that we can actually instantiate a publisher that takes # the matching C++ message on its input port. mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=message_class, lcm=lcm, use_cpp_serializer=True) def test_buses(self): self.assertIsInstance(mut.LcmBuses.kLcmUrlMemqNull, str) dut = mut.LcmBuses() dut.Add("default", DrakeLcm()) self.assertEqual(dut.size(), 1) self.assertIsInstance(dut.Find("Basic test", "default"), DrakeLcm) self.assertEqual(len(dut.GetAllBusNames()), 1) def test_bus_config(self): bus_config = { "foo": DrakeLcmParams(), "bar": DrakeLcmParams(), "quux": None, } bus_config["foo"].lcm_url = "memq://1" bus_config["bar"].lcm_url = "memq://2" builder = DiagramBuilder() buses = mut.ApplyLcmBusConfig(bus_config, builder) self.assertEqual(buses.size(), 3) self.assertIsInstance(buses.Find("Config test", "foo"), mut.LcmInterfaceSystem) def _process_event(self, dut): # Use a Simulator to invoke the update event on `dut`. (Wouldn't it be # nice if the Systems API was simple enough that we could apply events # without calling a Simulator!) simulator = Simulator(dut) simulator.AdvanceTo(0.00025) # Arbitrary positive value. return simulator.get_context().Clone() def test_subscriber(self): lcm = DrakeLcm() dut = mut.LcmSubscriberSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm, wait_for_message_on_initialization_timeout=0.0) model_message = self._model_message() lcm.Publish(channel="TEST_CHANNEL", buffer=model_message.encode()) lcm.HandleSubscriptions(0) context = self._process_event(dut) actual_message = dut.get_output_port(0).Eval(context) self.assert_lcm_equal(actual_message, model_message) # Test LcmInterfaceSystem overloads lcm_system = mut.LcmInterfaceSystem(lcm=lcm) dut = mut.LcmSubscriberSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm_system) lcm.Publish(channel="TEST_CHANNEL", buffer=model_message.encode()) lcm.HandleSubscriptions(0) context = self._process_event(dut) actual_message = dut.get_output_port(0).Eval(context) self.assert_lcm_equal(actual_message, model_message) def test_subscriber_cpp(self): lcm = DrakeLcm() dut = mut.LcmSubscriberSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm, use_cpp_serializer=True, wait_for_message_on_initialization_timeout=0.0) model_message = self._model_message() lcm.Publish(channel="TEST_CHANNEL", buffer=model_message.encode()) lcm.HandleSubscriptions(0) context = self._process_event(dut) abstract = dut.get_output_port(0).EvalAbstract(context) actual_message = self._cpp_value_to_py_message(abstract) self.assert_lcm_equal(actual_message, model_message) def test_subscriber_wait_for_message(self): """Checks how `WaitForMessage` works without Python threads.""" lcm = DrakeLcm() sub = mut.LcmSubscriberSystem.Make("TEST_LOOP", lcmt_header, lcm) value = Value(lcmt_header()) for old_message_count in range(3): message = lcmt_header() message.utime = old_message_count + 1 lcm.Publish("TEST_LOOP", message.encode()) for attempt in range(10): new_count = sub.WaitForMessage( old_message_count, value, timeout=0.02) if new_count > old_message_count: break lcm.HandleSubscriptions(0) self.assertEqual(value.get_value().utime, old_message_count + 1) def _fix_and_publish(self, dut, value): context = dut.CreateDefaultContext() dut.get_input_port(0).FixValue(context, value) dut.ForcedPublish(context) def test_publisher(self): lcm = DrakeLcm() dut = mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm, publish_period=0.1, publish_offset=0.01) subscriber = Subscriber(lcm, "TEST_CHANNEL", lcmt_quaternion) model_message = self._model_message() self._fix_and_publish(dut, Value(model_message)) lcm.HandleSubscriptions(0) self.assert_lcm_equal(subscriber.message, model_message) # Test `publish_triggers` overload. mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm, publish_period=0.1, publish_offset=0.01, publish_triggers={TriggerType.kPeriodic}) # Test LcmInterfaceSystem overloads lcm_system = mut.LcmInterfaceSystem(lcm=lcm) dut = mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm_system, publish_period=0.1, publish_offset=0.01) self._fix_and_publish(dut, Value(model_message)) lcm.HandleSubscriptions(0) self.assert_lcm_equal(subscriber.message, model_message) # Test `publish_triggers` overload. mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm_system, publish_period=0.1, publish_offset=0.01, publish_triggers={TriggerType.kPeriodic}) def test_publisher_cpp(self): lcm = DrakeLcm() dut = mut.LcmPublisherSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm, use_cpp_serializer=True) subscriber = Subscriber(lcm, "TEST_CHANNEL", lcmt_quaternion) model_message = self._model_message() model_value = self._model_value_cpp() self._fix_and_publish(dut, model_value) lcm.HandleSubscriptions(0) self.assert_lcm_equal(subscriber.message, model_message) class PythonMessageSource(LeafSystem): """A source system whose output port contains a Python lcmt_header.""" def __init__(self): LeafSystem.__init__(self) self.DeclareAbstractOutputPort( "output", self.AllocateOutput, self.CalcOutput) def AllocateOutput(self): return Value(lcmt_header()) def CalcOutput(self, context, output): message = output.get_mutable_value() message.utime = int(context.get_time() * 1e6) message.frame_name = "frame_name" def test_diagram_publisher(self): """Acceptance tests that a Python LeafSystem is able to output LCM messages for LcmPublisherSystem to transmit. """ lcm = DrakeLcm() builder = DiagramBuilder() source = builder.AddSystem(TestSystemsLcm.PythonMessageSource()) publisher = builder.AddSystem( mut.LcmPublisherSystem.Make( channel="LCMT_HEADER", lcm_type=lcmt_header, lcm=lcm, publish_period=0.05)) builder.Connect(source.get_output_port(), publisher.get_input_port()) diagram = builder.Build() diagram.ForcedPublish(diagram.CreateDefaultContext()) def test_lcm_scope(self): builder = DiagramBuilder() source = builder.AddSystem(ConstantVectorSource(np.zeros(4))) scope, publisher = mut.LcmScopeSystem.AddToBuilder( builder=builder, lcm=DrakeLcm(), signal=source.get_output_port(0), channel="TEST_CHANNEL", publish_period=0.001) self.assertIsInstance(scope, mut.LcmScopeSystem) self.assertIsInstance(publisher, mut.LcmPublisherSystem) def test_lcm_interface_system_getters(self): lcm = DrakeLcm() lcm_system = mut.LcmInterfaceSystem(lcm=lcm) self.assertIsInstance(lcm_system.get_lcm_url(), str) self.assertEqual(lcm_system.HandleSubscriptions(timeout_millis=10), 0) def test_lcm_interface_system_diagram(self): # First, check the class doc. self.assertIn( "only inherits from LeafSystem", mut.LcmInterfaceSystem.__doc__) # Next, construct a diagram and add both the interface system and # a subscriber. builder = DiagramBuilder() lcm = DrakeLcm() lcm_system = builder.AddSystem(mut.LcmInterfaceSystem(lcm=lcm)) # Create subscriber in the diagram. subscriber = builder.AddSystem(mut.LcmSubscriberSystem.Make( channel="TEST_CHANNEL", lcm_type=lcmt_quaternion, lcm=lcm)) diagram = builder.Build() simulator = Simulator(diagram) simulator.Initialize() # Publish test message. model_message = self._model_message() lcm.Publish("TEST_CHANNEL", model_message.encode()) # Simulate to a non-zero time to ensure the subscriber picks up the # message. eps = np.finfo(float).eps simulator.AdvanceTo(eps) # Ensure that we have what we want. context = subscriber.GetMyContextFromRoot( simulator.get_mutable_context()) actual_message = subscriber.get_output_port(0).Eval(context) self.assert_lcm_equal(actual_message, model_message)
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/autodiffutils/autodiffutils_py_everything.cc
#include <Eigen/Core> #include <unsupported/Eigen/AutoDiff> #include "drake/bindings/pydrake/autodiff_types_pybind.h" #include "drake/bindings/pydrake/documentation_pybind.h" #include "drake/bindings/pydrake/math_operators_pybind.h" #include "drake/bindings/pydrake/pydrake_pybind.h" #include "drake/common/drake_throw.h" #include "drake/math/autodiff.h" #include "drake/math/autodiff_gradient.h" using Eigen::AutoDiffScalar; using Eigen::VectorXd; using std::cos; using std::sin; namespace drake { namespace pydrake { namespace internal { void DefineAutodiffutils(py::module m) { // NOLINTNEXTLINE(build/namespaces): Emulate placement in namespace. using namespace drake::math; constexpr auto& doc = pydrake_doc.drake.math; // TODO(m-chaturvedi) Add Pybind11 documentation. py::class_<AutoDiffXd> autodiff(m, "AutoDiffXd"); autodiff // BR .def(py::init<double>(), py::arg("value"), "Constructs a value with empty derivatives.") .def(py::init<const double&, const VectorXd&>(), py::arg("value"), py::arg("derivatives"), "Constructs a value with the given derivatives.") .def(py::init<double, Eigen::Index, Eigen::Index>(), py::arg("value"), py::arg("size"), py::arg("offset"), "Constructs a value with a single partial derivative of 1.0 at the " "given `offset` in a vector of `size` otherwise-zero derivatives.") .def("value", [](const AutoDiffXd& self) { return self.value(); }) .def("derivatives", [](const AutoDiffXd& self) { return self.derivatives(); }) .def("__str__", [](const AutoDiffXd& self) { return py::str("AD{{{}, nderiv={}}}") .format(self.value(), self.derivatives().size()); }) .def("__repr__", [](const AutoDiffXd& self) { return py::str("<AutoDiffXd {} nderiv={}>") .format(self.value(), self.derivatives().size()); }) // Arithmetic .def(-py::self) .def(py::self + py::self) .def(py::self + double()) .def(double() + py::self) .def(py::self - py::self) .def(py::self - double()) .def(double() - py::self) .def(py::self * py::self) .def(py::self * double()) .def(double() * py::self) .def(py::self / py::self) .def(py::self / double()) .def(double() / py::self) // Logical comparison .def(py::self == py::self) .def(py::self == double()) .def(py::self != py::self) .def(py::self != double()) .def(py::self < py::self) .def(py::self < double()) .def(py::self <= py::self) .def(py::self <= double()) .def(py::self > py::self) .def(py::self > double()) .def(py::self >= py::self) .def(py::self >= double()) // Additional math .def( "__pow__", [](const AutoDiffXd& base, double exponent) { return pow(base, exponent); }, py::is_operator()) .def("__abs__", [](const AutoDiffXd& x) { return abs(x); }) .def(py::pickle( [](const AutoDiffXd& self) { return py::make_tuple(self.value(), self.derivatives()); }, [](py::tuple t) { DRAKE_THROW_UNLESS(t.size() == 2); return AutoDiffXd(t[0].cast<double>(), t[1].cast<VectorXd>()); })); DefCopyAndDeepCopy(&autodiff); py::implicitly_convertible<double, AutoDiffXd>(); py::implicitly_convertible<int, AutoDiffXd>(); pydrake::internal::BindMathOperators<AutoDiffXd>(&autodiff); // Mirror for numpy. autodiff.attr("arcsin") = autodiff.attr("asin"); autodiff.attr("arccos") = autodiff.attr("acos"); autodiff.attr("arctan2") = autodiff.attr("atan2"); m.def( "InitializeAutoDiff", [](const Eigen::MatrixXd& value, std::optional<int> num_derivatives, std::optional<int> deriv_num_start) { return InitializeAutoDiff(value, num_derivatives, deriv_num_start); }, py::arg("value"), py::arg("num_derivatives") = std::nullopt, py::arg("deriv_num_start") = std::nullopt, doc.InitializeAutoDiff.doc_just_value); m.def( "InitializeAutoDiff", [](const Eigen::MatrixXd& value, const Eigen::MatrixXd& gradient) { return InitializeAutoDiff(value, gradient); }, py::arg("value"), py::arg("gradient"), doc.InitializeAutoDiff.doc_value_and_gradient); m.def( "ExtractValue", [](const MatrixX<AutoDiffXd>& auto_diff_matrix) { return ExtractValue(auto_diff_matrix); }, py::arg("auto_diff_matrix"), doc.ExtractValue.doc); m.def( "ExtractGradient", [](const MatrixX<AutoDiffXd>& auto_diff_matrix) { return ExtractGradient(auto_diff_matrix); }, py::arg("auto_diff_matrix"), doc.ExtractGradient.doc); } } // namespace internal } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/autodiffutils/BUILD.bazel
load("//bindings/pydrake:pydrake.bzl", "add_lint_tests_pydrake") load("//tools/install:install.bzl", "install") load( "//tools/skylark:drake_cc.bzl", "drake_cc_library", ) load( "//tools/skylark:drake_py.bzl", "drake_py_library", "drake_py_unittest", ) load( "//tools/skylark:pybind.bzl", "get_pybind_package_info", ) package(default_visibility = ["//visibility:private"]) # This determines how `PYTHONPATH` is configured, and how to install the # bindings. PACKAGE_INFO = get_pybind_package_info("//bindings") # N.B. The `pydrake.autodiffutils` module is part of the root module dependency # cycle. Refer to bindings/pydrake/common/module_cycle.md for details. drake_cc_library( name = "autodiffutils_py", srcs = [ "autodiffutils_py_everything.cc", ], hdrs = [ "autodiffutils_py.h", ], declare_installed_headers = False, visibility = [ "//bindings/pydrake/common:__pkg__", ], deps = [ "//bindings/pydrake:autodiff_types_pybind", "//bindings/pydrake:documentation_pybind", "//bindings/pydrake:math_operators_pybind", "//bindings/pydrake:pydrake_pybind", "@pybind11", ], ) drake_py_library( name = "autodiffutils_extra", srcs = ["_autodiffutils_extra.py"], visibility = [ "//bindings/pydrake/common:__pkg__", ], ) install( name = "install", targets = [":autodiffutils_extra"], py_dest = PACKAGE_INFO.py_dest, visibility = ["//bindings/pydrake:__pkg__"], ) drake_py_unittest( name = "autodiffutils_test", deps = [ "//bindings/pydrake/common/test_utilities:algebra_test_util_py", "//bindings/pydrake/common/test_utilities:autodiffutils_test_util_py", "//bindings/pydrake/common/test_utilities:numpy_compare_py", "//bindings/pydrake/common/test_utilities:pickle_compare_py", ], ) add_lint_tests_pydrake()
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/autodiffutils/autodiffutils_py.h
#pragma once #include "drake/bindings/pydrake/pydrake_pybind.h" namespace drake { namespace pydrake { namespace internal { /* Defines all bindings for the pydrake.autodiffutils module. */ void DefineAutodiffutils(py::module m); } // namespace internal } // namespace pydrake } // namespace drake
0
/home/johnshepherd/drake/bindings/pydrake
/home/johnshepherd/drake/bindings/pydrake/autodiffutils/_autodiffutils_extra.py
# See `ExecuteExtraPythonCode` in `pydrake_pybind.h` for usage details and # rationale. import numpy as np def InitializeAutoDiffTuple(*args): """Given a series of array_like input arguments, create a tuple of corresponding AutoDiff matrices with values equal to the input matrices and properly initialized derivative vectors. The size of the derivative vector of each element of the matrices in the output tuple will be the same, and will equal the sum of the number of elements of the matrices in args. The 0th element of the derivative vectors will correspond to the derivative with respect to the 0th element of the first argument. Subsequent derivative vector elements correspond first to subsequent elements of the first input argument (traversed first by row, then by column), and so on for subsequent arguments. This is a pythonic implementation of drake::math::InitializeAutoDiffTuple in C++. """ num_derivatives = 0 for arg in args: num_derivatives += np.asarray(arg).size autodiff_tuple = [] deriv_num_start = 0 for arg in args: autodiff_tuple.append(InitializeAutoDiff(arg, num_derivatives, deriv_num_start)) deriv_num_start += np.asarray(arg).size return tuple(autodiff_tuple) @np.vectorize def autodiff_equal_to(a, b, *, semantic=False): """ Provides a structural equality check for arrays of AutoDiffXd scalars, i.e. returns True if both the values and derivates are equal. Arguments: a, b: Arrays to compare. semantic: If False, performs *literal* comparison, meaning the value and derivatives must match in both value and shape. If True, performs *semantic* comparison, meaning that empty derivatives is equivalent to purely zero-valued derivatives. Note: Zero-valued derivatives of different size are *not* equivalent. """ assert isinstance(a, AutoDiffXd), type(a) assert isinstance(b, AutoDiffXd), type(b) if a.value() == b.value(): da = a.derivatives() db = b.derivatives() if da.shape == db.shape and (da == db).all(): return True da_empty = da.size == 0 db_empty = db.size == 0 if semantic and (da_empty or db_empty): da_zero = (da == 0.0).all() db_zero = (db == 0.0).all() if (da_zero and db_empty) or (da_empty and db_zero): return True return False
0
/home/johnshepherd/drake/bindings/pydrake/autodiffutils
/home/johnshepherd/drake/bindings/pydrake/autodiffutils/test/autodiffutils_test.py
import pydrake.autodiffutils as mut from pydrake.autodiffutils import ( AutoDiffXd, ExtractGradient, ExtractValue, InitializeAutoDiff, InitializeAutoDiffTuple, ) import copy import itertools import unittest import numpy as np import pydrake.math as drake_math from pydrake.common.test_utilities import numpy_compare from pydrake.common.test_utilities.algebra_test_util import ( ScalarAlgebra, VectorizedAlgebra, ) from pydrake.common.test_utilities.autodiffutils_test_util import ( autodiff_scalar_pass_through, autodiff_vector_pass_through, autodiff_vector3_pass_through, ) from pydrake.common.test_utilities.pickle_compare import assert_pickle # Use convenience abbreviation. AD = AutoDiffXd def check_logical(func, a, b, expected): # Checks logical operations, with broadcasting, checking that `a` and `b` # (of type `T`) have compatible logical operators when the left or right # operands are `float`s. Specifically, tests: # - f(T, T) # - f(T, float) # - f(float, T) numpy_compare.assert_equal(func(a, b), expected) af = numpy_compare.to_float(a) bf = numpy_compare.to_float(b) numpy_compare.assert_equal(func(a, bf), expected) numpy_compare.assert_equal(func(af, b), expected) class TestAutoDiffXd(unittest.TestCase): def test_scalar_api(self): # Test the unit vector constructor. a = AD(value=1, size=2, offset=1) self.assertEqual(a.value(), 1.0) numpy_compare.assert_equal(a.derivatives(), [0.0, 1.0]) # Test the dense derivatives vector constructor. a = AD(value=1, derivatives=[1.0, 2.0]) self.assertEqual(a.value(), 1.0) numpy_compare.assert_equal(a.derivatives(), [1.0, 2.0]) self.assertEqual(str(a), "AD{1.0, nderiv=2}") self.assertEqual(repr(a), "<AutoDiffXd 1.0 nderiv=2>") numpy_compare.assert_equal(a, a) # Test construction from `float` and `int`. numpy_compare.assert_equal(AD(1), AD(1., [])) numpy_compare.assert_equal(AD(1.), AD(1., [])) # Test implicit conversion from a simple dtype to AutoDiff. numpy_compare.assert_equal( autodiff_scalar_pass_through(1), # int AD(1., [])) numpy_compare.assert_equal( autodiff_scalar_pass_through(1.), # float AD(1., [])) # Test explicit conversion to float. with self.assertRaises(TypeError) as cm: float(a) self.assertIn( "not 'pydrake.autodiffutils.AutoDiffXd'", str(cm.exception)) a_scalar = np.array(a) with self.assertRaises(TypeError) as cm: float(a_scalar) if np.lib.NumpyVersion(np.__version__) < "1.14.0": self.assertEqual( "don't know how to convert scalar number to float", str(cm.exception)) else: self.assertRegex( str(cm.exception), r"float\(\) argument must be a string or a (real )?number, " r"not 'pydrake\.autodiffutils\.AutoDiffXd'") # Test multi-element pass-through. x = np.array([AD(1.), AD(2.), AD(3.)]) numpy_compare.assert_equal(autodiff_vector_pass_through(x), x) # Ensure fixed-size vectors are correctly converted (#9886). numpy_compare.assert_equal(autodiff_vector3_pass_through(x), x) # Ensure we can copy. numpy_compare.assert_equal(copy.copy(a), a) numpy_compare.assert_equal(copy.deepcopy(a), a) # Ensure that we can pickle. assert_pickle(self, a, lambda x: x) def test_array_api(self): a = AD(1, [1., 0]) b = AD(2, [0, 1.]) x = np.array([a, b]) self.assertEqual(x.dtype, object) # Idempotent check. numpy_compare.assert_equal(x, x) # Conversion. with self.assertRaises(TypeError): # Avoid implicit coercion, as this will imply information loss. xf = np.zeros(2, dtype=float) xf[:] = x with self.assertRaises(TypeError): # We could define `__float__` to allow this, but then that will # enable implicit coercion, which we should avoid. xf = x.astype(dtype=float) # Presently, does not convert. x = np.zeros((3, 3), dtype=AD) self.assertFalse(isinstance(x[0, 0], AD)) x = np.eye(3).astype(AD) self.assertFalse(isinstance(x[0, 0], AD)) # Test implicit conversion. numpy_compare.assert_equal( autodiff_vector_pass_through([1, 2]), # int [AD(1., []), AD(2., [])]) numpy_compare.assert_equal( autodiff_vector_pass_through([1., 2.]), # float [AD(1., []), AD(2., [])]) def _check_algebra(self, algebra): a_scalar = AD(1, [1., 0]) b_scalar = AD(2, [0, 1.]) c_scalar = AD(0, [1., 0]) d_scalar = AD(1, [0, 1.]) a, b, c, d = map( algebra.to_algebra, (a_scalar, b_scalar, c_scalar, d_scalar)) # Arithmetic numpy_compare.assert_equal(-a, AD(-1, [-1., 0])) numpy_compare.assert_equal(a + b, AD(3, [1, 1])) numpy_compare.assert_equal(a + 1, AD(2, [1, 0])) numpy_compare.assert_equal(1 + a, AD(2, [1, 0])) numpy_compare.assert_equal(a - b, AD(-1, [1, -1])) numpy_compare.assert_equal(a - 1, AD(0, [1, 0])) numpy_compare.assert_equal(1 - a, AD(0, [-1, 0])) numpy_compare.assert_equal(a * b, AD(2, [2, 1])) numpy_compare.assert_equal(a * 2, AD(2, [2, 0])) numpy_compare.assert_equal(2 * a, AD(2, [2, 0])) numpy_compare.assert_equal(a / b, AD(1./2, [1./2, -1./4])) numpy_compare.assert_equal(a / 2, AD(0.5, [0.5, 0])) numpy_compare.assert_equal(2 / a, AD(2, [-2, 0])) # Logical check_logical(lambda x, y: x == y, a, a, True) check_logical(algebra.eq, a, a, True) check_logical(lambda x, y: x != y, a, a, False) check_logical(algebra.ne, a, a, False) check_logical(lambda x, y: x < y, a, b, True) check_logical(algebra.lt, a, b, True) check_logical(lambda x, y: x <= y, a, b, True) check_logical(algebra.le, a, b, True) check_logical(lambda x, y: x > y, a, b, False) check_logical(algebra.gt, a, b, False) check_logical(lambda x, y: x >= y, a, b, False) check_logical(algebra.ge, a, b, False) # Additional math # - See `math_overloads_test` for scalar overloads. numpy_compare.assert_equal(a**2, AD(1, [2., 0])) numpy_compare.assert_equal(algebra.log(a), AD(0, [1., 0])) numpy_compare.assert_equal(algebra.abs(-a), AD(1, [1., 0])) numpy_compare.assert_equal(algebra.exp(a), AD(np.e, [np.e, 0])) numpy_compare.assert_equal(algebra.sqrt(a), AD(1, [0.5, 0])) numpy_compare.assert_equal(algebra.pow(a, 2), AD(1, [2., 0])) numpy_compare.assert_equal(algebra.pow(a, 0.5), AD(1, [0.5, 0])) numpy_compare.assert_equal(algebra.sin(c), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.cos(c), AD(1, [0, 0])) numpy_compare.assert_equal(algebra.tan(c), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.arcsin(c), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.arccos(c), AD(np.pi / 2, [-1, 0])) numpy_compare.assert_equal(algebra.arctan2(c, d), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.sinh(c), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.cosh(c), AD(1, [0, 0])) numpy_compare.assert_equal(algebra.tanh(c), AD(0, [1, 0])) numpy_compare.assert_equal(algebra.min(a, b), a_scalar) numpy_compare.assert_equal(algebra.max(a, b), b_scalar) # Because `ceil` and `floor` return `double`, we have to special case # this comparison since the matrix is `dtype=object`, even though the # elements are all doubles. We must cast it to float. # N.B. This would be fixed if we registered a UFunc for these # methods, so NumPy would have already returned a `float` array. ceil_a = algebra.ceil(a) floor_a = algebra.floor(a) if isinstance(algebra, VectorizedAlgebra): self.assertEqual(ceil_a.dtype, object) self.assertIsInstance(ceil_a[0], float) ceil_a = ceil_a.astype(float) floor_a = floor_a.astype(float) numpy_compare.assert_equal(ceil_a, a_scalar.value()) numpy_compare.assert_equal(floor_a, a_scalar.value()) # Return value so it can be inspected. return a def test_scalar_algebra(self): a = self._check_algebra(ScalarAlgebra()) self.assertEqual(type(a), AD) def test_array_algebra(self): a = self._check_algebra(VectorizedAlgebra()) self.assertEqual(type(a), np.ndarray) self.assertEqual(a.shape, (2,)) def test_linear_algebra(self): a_scalar = AD(1, [1., 0]) b_scalar = AD(2, [0, 1.]) A = np.array([[a_scalar, a_scalar]]) B = np.array([[b_scalar, b_scalar]]).T C = np.dot(A, B) numpy_compare.assert_equal(C, [[AD(4, [4., 2])]]) # Before NumPy 1.17, `matmul` was not supported for `dtype=object` # (#11332), and `np.dot` should be used instead. if np.lib.NumpyVersion(np.__version__) < "1.17.0": with self.assertRaises(TypeError): C2 = np.matmul(A, B) else: C2 = np.matmul(A, B) numpy_compare.assert_equal(C2, C) # Type mixing Bf = np.array([[2., 2]]).T C2 = np.dot(A, Bf) # Leverages implicit casting. numpy_compare.assert_equal(C2, [[AD(4, [4., 0])]]) # Other methods. X = np.array([[a_scalar, b_scalar], [b_scalar, a_scalar]]) numpy_compare.assert_equal(np.trace(X), AD(2, [2., 0])) # `inv` is a ufunc that we must implement, if possible. However, given # that this is currently `dtype=object`, it would be extremely unwise # to do so. See #8116 for alternative. with self.assertRaises(TypeError): Y = np.linalg.inv(X) # Use workaround for inverse. For now, just check values. X_float = numpy_compare.to_float(X) Xinv_float = np.linalg.inv(X_float) Xinv = drake_math.inv(X) np.testing.assert_equal(numpy_compare.to_float(Xinv), Xinv_float) def test_math_utils(self): a = InitializeAutoDiff(value=[1, 2, 3], num_derivatives=3, deriv_num_start=0) np.testing.assert_array_equal(ExtractValue(auto_diff_matrix=a), np.array([[1, 2, 3]]).T) np.testing.assert_array_equal(ExtractGradient(auto_diff_matrix=a), np.eye(3)) a, b = InitializeAutoDiffTuple([1], [2, 3]) np.testing.assert_array_equal(ExtractValue(a), np.array([[1]])) np.testing.assert_array_equal(ExtractValue(b), np.array([[2, 3]]).T) np.testing.assert_array_equal(ExtractGradient(a), np.eye(1, 3)) np.testing.assert_array_equal(ExtractGradient(b), np.hstack((np.zeros((2, 1)), np.eye(2)))) c_grad = [[2, 4, 5], [1, -1, 0]] c = InitializeAutoDiff(value=[2, 3], gradient=c_grad) np.testing.assert_array_equal(ExtractValue(c), np.array([2, 3]).reshape((2, 1))) np.testing.assert_array_equal(ExtractGradient(c), np.array(c_grad)) def test_autodiff_equal_to(self): a = AD(1.0, [1.0, 2.0]) b = AD(1.0, [3.0, 4.0]) # Test expected contract in terms of `np.vectorize` and derivatives. np.testing.assert_array_equal( mut.autodiff_equal_to(a, a), True, ) np.testing.assert_array_equal( mut.autodiff_equal_to([a, a], a), [True, True], ) np.testing.assert_array_equal( mut.autodiff_equal_to(a, b), False, ) np.testing.assert_array_equal( mut.autodiff_equal_to([a], [b]), [False], ) with self.assertRaises(AssertionError): mut.autodiff_equal_to(a, 1.0) # Test contract for `semantic=True`. empty_deriv = AD(1.0) zero_deriv1 = AD(1.0, [0.0]) zero_deriv2 = AD(1.0, [0.0, 0.0]) nonzero_deriv2 = AD(1.0, [0.0, 1e-8]) self.assertTrue(mut.autodiff_equal_to(empty_deriv, empty_deriv)) self.assertFalse(mut.autodiff_equal_to(empty_deriv, zero_deriv1)) self.assertTrue( mut.autodiff_equal_to(empty_deriv, zero_deriv1, semantic=True) ) self.assertTrue( mut.autodiff_equal_to(empty_deriv, zero_deriv2, semantic=True) ) self.assertFalse( mut.autodiff_equal_to(zero_deriv1, zero_deriv2, semantic=True) ) self.assertFalse( mut.autodiff_equal_to(empty_deriv, nonzero_deriv2, semantic=True) ) def test_vectorized_binary_operator_type_combinatorics(self): """ Tests vectorized binary operator via brute-force combinatorics per #15549. This complements test with the same name in ``symbolic_test.py``. """ def expand_values(value): return ( # Scalar. value, # Scalar array. np.array(value), # Size-1 array. np.array([value]), # Size-2 array. np.array([value, value]), ) operators = drake_math._OPERATORS operators_reverse = drake_math._OPERATORS_REVERSE x = AD(2.0, [1.0, 2.0]) y = AD(0.0, [3.0, 4.0]) T_operands_x = expand_values(x) T_operands_y = expand_values(y) numeric_operands = ( # Float. # - Native. expand_values(1.0) # - np.generic + expand_values(np.float64(1.0)) # Int. # - Native. + expand_values(1) # - np.generic + expand_values(np.int64(1.0)) ) def check_operands(op, lhs_operands, rhs_operands): operand_combinatorics_iter = itertools.product( lhs_operands, rhs_operands ) op_reverse = operators_reverse[op] for lhs, rhs in operand_combinatorics_iter: hint_for_error = f"{op.__doc__}: {repr(lhs)}, {repr(rhs)}" with numpy_compare.soft_sub_test(hint_for_error): value = op(lhs, rhs) reverse_value = op_reverse(rhs, lhs) numpy_compare.assert_equal(value, reverse_value) # Combinations (unordered) that we're interested in. operand_combinations = ( (T_operands_x, T_operands_y), (T_operands_x, numeric_operands), ) for op in operators: for (op_a, op_b) in operand_combinations: check_operands(op, op_a, op_b) check_operands(op, op_b, op_a)
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/ubuntu-noble.bazelrc
# Options for explicitly using Clang. # Keep this in sync with doc/_pages/from_source.md. common:clang --repo_env=CC=clang-15 common:clang --repo_env=CXX=clang++-15 build:clang --action_env=CC=clang-15 build:clang --action_env=CXX=clang++-15 build:clang --host_action_env=CC=clang-15 build:clang --host_action_env=CXX=clang++-15 build --define=UBUNTU_VERSION=24.04
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/BUILD.bazel
load("//tools/lint:lint.bzl", "add_lint_tests") load("//tools/skylark:drake_py.bzl", "drake_py_binary", "drake_py_library") load( "//tools/skylark:drake_runfiles_binary.bzl", "drake_runfiles_binary", ) package(default_visibility = ["//visibility:public"]) drake_py_library( name = "module_py", srcs = ["__init__.py"], visibility = [":__subpackages__"], ) alias( name = "meldis", actual = "//bindings/pydrake/visualization:meldis", ) alias( name = "model_visualizer", actual = "//bindings/pydrake/visualization:model_visualizer", ) drake_py_binary( name = "model_visualizer_private", testonly = True, srcs = ["model_visualizer_private.py"], data = ["//:some_test_models"], visibility = ["//visibility:private"], deps = ["//bindings/pydrake/visualization:model_visualizer"], ) alias( name = "mesh_to_model", actual = "//bindings/pydrake/multibody:mesh_to_model", ) alias( name = "fix_inertia", actual = "//bindings/pydrake/multibody:fix_inertia", ) # === config_setting rules === # When this is set, a Drake build will promote some warnings to errors. # See drake/tools/cc_toolchain/bazel.rc for details. config_setting( name = "drake_werror", values = {"define": "DRAKE_WERROR=ON"}, ) config_setting( name = "with_gurobi", values = {"define": "WITH_GUROBI=ON"}, ) config_setting( name = "with_mosek", values = {"define": "WITH_MOSEK=ON"}, ) config_setting( name = "with_snopt", values = {"define": "WITH_SNOPT=ON"}, ) config_setting( name = "using_sanitizer", values = {"define": "USING_SANITIZER=ON"}, ) config_setting( name = "using_memcheck", values = {"define": "USING_MEMCHECK=ON"}, ) # Clarabel is an open-source solver, and is included in the Drake build by # default. The Clarabel solver is irrelevant to some users of # MathematicalProgram, so we provide a hidden switch to shut it off for # developers who don't actually need it. This is not a supported configuration. # Use at your own risk: --define=NO_CLARABEL=ON config_setting( name = "no_clarabel", values = {"define": "NO_CLARABEL=ON"}, ) # CLP is an open-source solver, and is included in the Drake build by # default. The CLP solver is irrelevant to some users of MathematicalProgram, # so we provide a hidden switch to shut it off for developers who don't # actually need it. This is not a supported configuration. Use at your own # risk: --define=NO_CLP=ON config_setting( name = "no_clp", values = {"define": "NO_CLP=ON"}, ) # CSDP is an open-source solver, and is included in the Drake build by default. # The CSDP solver is irrelevant to some users of MathematicalProgram, so we # provide a hidden switch to shut it off for developers who don't actually need # it. This is not a supported configuration. Use at your own risk: # --define=NO_CSDP=ON config_setting( name = "no_csdp", values = {"define": "NO_CSDP=ON"}, ) # IPOPT is an open-source solver, and is included in the Drake build by # default. The IPOPT solver is irrelevant to some users of MathematicalProgram, # so we provide a hidden switch to shut it off for developers who don't # actually need it. This is not a supported configuration. Use at your own # risk: --define=NO_IPOPT=ON config_setting( name = "no_ipopt", values = {"define": "NO_IPOPT=ON"}, ) # NLOPT is an open-source solver, and is included in the Drake build by # default. The NLOPT solver is irrelevant to some users of MathematicalProgram, # so we provide a hidden switch to shut it off for developers who don't # actually need it. This is not a supported configuration. Use at your own # risk: --define=NO_NLOPT=ON config_setting( name = "no_nlopt", values = {"define": "NO_NLOPT=ON"}, ) # OSQP is an open-source solver, and is included in the Drake build by # default. The OSQP solver is irrelevant to some users of MathematicalProgram, # so we provide a hidden switch to shut it off for developers who don't # actually need it. This is not a supported configuration. Use at your own # risk: --define=NO_OSQP=ON config_setting( name = "no_osqp", values = {"define": "NO_OSQP=ON"}, ) # SCS is an open-source solver, and is included in the Drake build by default. # The SCS solver is irrelevant to some users of MathematicalProgram, so we # provide a hidden switch to shut it off for developers who don't actually need # it. This is not a supported configuration. Use at your own risk: # --define=NO_SCS=ON config_setting( name = "no_scs", values = {"define": "NO_SCS=ON"}, ) # We are exploring adding USD support to Drake. For the moment, it is opt-in. # See https://github.com/PixarAnimationStudios/OpenUSD for details. # Use `--define=WITH_USD=ON` on the bazel command line to enable it. # (In CI, --config=everything also enables it on Ubuntu.) config_setting( name = "with_usd", values = {"define": "WITH_USD=ON"}, ) config_setting( name = "ubuntu_jammy", values = {"define": "UBUNTU_VERSION=22.04"}, ) config_setting( name = "ubuntu_noble", values = {"define": "UBUNTU_VERSION=24.04"}, ) add_lint_tests()
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/model_visualizer_private.py
"""(Internal use only) This program allows Drake developers to visualize model files used by tests, e.g.: bazel run //tools:model_visualizer_private -- \ package://drake/geometry/render/test/box.sdf When using a Drake URI (package://drake), the filegroup with the model to be visualized must be added to either `//:all_models` or `//:some_test_models` in the top-level BUILD.bazel file. """ from pydrake.visualization.model_visualizer import _main _main()
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/ubuntu.bazelrc
# Common options for Ubuntu, no matter the version. build --force_pic build --fission=dbg build --features=per_object_debug_info # Configure ${PATH} for actions. # N.B. Ensure this is consistent with `execute.bzl`. build --action_env=PATH=/usr/bin:/bin # Disable python imports from ~/.local (pip -U) during build and test. # https://github.com/bazelbuild/bazel/issues/4939 # https://github.com/RobotLocomotion/drake/issues/8475 build --action_env=PYTHONNOUSERSITE=1 build --test_env=PYTHONNOUSERSITE=1 # Enable OpenMP (when requested via --config=omp or --config=everything). build:omp --copt=-DEIGEN_DONT_PARALLELIZE build:omp --copt=-fopenmp build:omp --linkopt=-fopenmp build:packaging --config=omp build:everything --config=omp # Options for explicitly using GCC. common:gcc --repo_env=CC=gcc common:gcc --repo_env=CXX=g++ build:gcc --action_env=CC=gcc build:gcc --action_env=CXX=g++ build:gcc --host_action_env=CC=gcc build:gcc --host_action_env=CXX=g++
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/macos-arch-i386.bazelrc
# Options for macOS when running on i386 as reported by `arch` (i.e., x86_64). # N.B. Ensure this is consistent with `execute.bzl`. build --action_env=PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin # Releases on macOS x86_64 no longer contain IPOPT. build:packaging --define=NO_IPOPT=ON
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/README.md
This directory contains build system files related to the Bazel build system. https://bazel.build/ See the `README.md` files in sub-diretories for additional details. See `//doc/bazel.rst` for additional Drake-specific information.
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/ubuntu-jammy.bazelrc
# Options for explicitly using Clang. # Keep this in sync with doc/_pages/from_source.md. common:clang --repo_env=CC=clang-15 common:clang --repo_env=CXX=clang++-15 build:clang --action_env=CC=clang-15 build:clang --action_env=CXX=clang++-15 build:clang --host_action_env=CC=clang-15 build:clang --host_action_env=CXX=clang++-15 # In CI (but only in CI), we want to test the opt-in USD build support. # TODO(jwnimmer-tri) We should try to enable USD by default on all platforms. build:everything --define=WITH_USD=ON build --define=UBUNTU_VERSION=22.04
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/macos-arch-arm64.bazelrc
# Options for macOS when running on arm64 as reported by `arch`. # N.B. Ensure this is consistent with `execute.bzl`. build --action_env=PATH=/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/bazel.rc
# Don't use bzlmod yet. common --enable_bzlmod=false # Default to an optimized build. build -c opt # Default build options. build --strip=never build --strict_system_includes # Use C++20 by default. build --cxxopt=-std=c++20 build --host_cxxopt=-std=c++20 # The legacy __init__.py causes runfiles test flakes with Bazel >= 7, and # in any case we don't want this feature in the first place. We should # create all __init__.py files ourselves by hand anywhere we need them. build --incompatible_default_to_explicit_init_py # Default test options. build --test_output=errors build --test_summary=terse # By default, disable execution of tests that require proprietary software. # Individual targets that use proprietary software are responsible for ensuring # they can also build without it, typically by using a select statement. # config_setting rules for proprietary software are provided in //tools. build --test_tag_filters=-gurobi,-mosek,-snopt # Allow user environment variables relating to host graphics configuration to # flow through to tests that use X. build --test_env=DISPLAY build --test_env=XAUTHORITY # Location of the Gurobi license key file, typically named "gurobi.lic". # Setting this --test_env for all configurations is deliberate to improve # remote caching performance. build --test_env=GRB_LICENSE_FILE # Location of the MOSEK license file, typically named "mosek.lic". Setting # this --test_env for all configurations is deliberate to improve remote # caching performance. build --test_env=MOSEKLM_LICENSE_FILE # Prevent LCM messages from leaking outside of tests. # See documentation: https://lcm-proj.github.io/group__LcmC__lcm__t.html#gaf29963ef43edadf45296d5ad82c18d4b # noqa # WARNING: If your LCM test depends on communication between separate LCM # instances (and you cannot easily mock the LCM instances), make sure that each # instance has a consistent, but non-default, LCM URL. build --test_env=LCM_DEFAULT_URL=memq:// # Prevent matplotlib from showing windows (#11029). build --test_env=MPLBACKEND=Template ### A configuration that enables all optional dependencies. ### build:everything --test_tag_filters=-no_everything ### A configuration that enables Gurobi. ### # -- To use this config, the GRB_LICENSE_FILE environment variable must be set # -- to the location of the Gurobi license key file. On Ubuntu, the GUROBI_HOME # -- environment variable must be set to the linux64 directory of the extracted # -- archive downloaded from gurobi.com when the installation was not at the # -- default location of /opt/gurobi1002/linux64. # # -- To run tests where Gurobi is used, ensure that you include # -- "gurobi_test_tags()" from //tools/skylark:test_tags.bzl. # -- If Gurobi is optional, set gurobi_required=False. build:gurobi --define=WITH_GUROBI=ON build:everything --define=WITH_GUROBI=ON # N.B. The build:packaging configuration does NOT use Gurobi (yet). # See https://github.com/RobotLocomotion/drake/issues/10804. build:packaging --test_tag_filters=-gurobi ### A configuration that enables MOSEK™. ### # -- To use this config, the MOSEKLM_LICENSE_FILE environment variable must be # -- set to the location of the MOSEK license file. # # -- To run tests where MOSEK™ is used, ensure that you include # -- "mosek_test_tags()" from //tools/skylark:test_tags.bzl. # -- If MOSEK™ is optional, set mosek_required=False. build:mosek --define=WITH_MOSEK=ON build:packaging --define=WITH_MOSEK=ON build:everything --define=WITH_MOSEK=ON ### A configuration that enables SNOPT. ### # -- To use this config, you must have access to the private repository # -- RobotLocomotion/snopt on GitHub, and your local git must be configured # -- with SSH keys as documented at https://drake.mit.edu/from_source.html. # # -- To run tests that require SNOPT, also specify a set of test_tag_filters # -- that does not exclude the "snopt" tag. build:snopt --define=WITH_SNOPT=ON build:packaging --define=WITH_SNOPT=ON build:everything --define=WITH_SNOPT=ON
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/macos.bazelrc
# Common options for macOS, no matter the arch (x86 or arm). # Suppress numerous "'_FORTIFY_SOURCE' macro redefined" warnings when using # sanitizers. build:asan --copt=-Wno-macro-redefined build:asan_everything --copt=-Wno-macro-redefined build:tsan --copt=-Wno-macro-redefined build:tsan_everything --copt=-Wno-macro-redefined build:ubsan --copt=-Wno-macro-redefined build:ubsan_everything --copt=-Wno-macro-redefined # https://github.com/bazelbuild/bazel/issues/14294 build --notrim_test_configuration # Options for explicitly using Clang. common:clang --repo_env=CC=clang common:clang --repo_env=CXX=clang++ build:clang --action_env=CC=clang build:clang --action_env=CXX=clang++ build:clang --host_action_env=CC=clang build:clang --host_action_env=CXX=clang++
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/__init__.py
# Empty Python module `__init__`, required to make this a module.
0
/home/johnshepherd/drake
/home/johnshepherd/drake/tools/prstat
#!/bin/bash # prstat -- Report the lines of code added or changed in the current tree vs # upstream/master, excluding files that are known not to count against the line # limit for platform review. set -e # Exclude data files and files that are in a /dev/ folder. excludes="" excludes="$excludes --exclude=*.mtl" excludes="$excludes --exclude=*.obj" excludes="$excludes --exclude=*.sdf" excludes="$excludes --exclude=*.urdf" excludes="$excludes --exclude=*.xml" excludes="$excludes --exclude=*/dev/*" # Also exclude files that contain the reviewable.io phrase that indicates # generated code. See https://github.com/Reviewable/Reviewable/wiki/FAQ. marker="GENERATED FILE ""DO NOT EDIT" toplevel=$(git rev-parse --show-toplevel) git_base_path=$(git merge-base upstream/master HEAD) for relpath in $(git diff $git_base_path | lsdiff $excludes --strip=1); do if fgrep -s -q -e"$marker" $toplevel/$relpath; then excludes="$excludes --exclude=$relpath" fi done # Display a final summary. git diff $git_base_path | filterdiff --strip-match=1 --strip=1 $excludes | diffstat -p 0
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/lcm_gen/BUILD.bazel
load( "@drake//tools/skylark:drake_cc.bzl", "drake_cc_googletest", "drake_cc_library", ) load( "@drake//tools/skylark:drake_py.bzl", "drake_py_binary", "drake_py_library", "drake_py_unittest", ) load("//tools/lint:lint.bzl", "add_lint_tests") # The library target for this tool. drake_py_library( name = "module_py", srcs = ["__init__.py"], deps = ["//tools:module_py"], ) # The command-line target for this tool. drake_py_binary( name = "lcm_gen", srcs = ["__init__.py"], tags = [ # The "module_py" handles the linting for "__init__.py"; we suppress # it here to avoid duplicate linter complaints. "nolint", ], visibility = ["//:__subpackages__"], deps = [":module_py"], ) drake_py_unittest( name = "lcm_gen_test", data = [ "test/goal/lima.hpp", "test/goal/mike.hpp", "test/goal/november.hpp", "test/lima.lcm", "test/mike.lcm", "test/november.lcm", ], deps = [ ":module_py", "@rules_python//python/runfiles", ], ) # We'll run the upstream reference implementation of lcm-gen, for comparison. # When doing that, we'll use the LCM package name "romeo" to distinguish the # upstream reference output from our tool's output, which is in package "papa". # That way, we can safely include both in the same test program for comparison. # This rule generates the package-renamed `*.lcm` source files. genrule( name = "gen_romeo_lcm_sources", testonly = True, srcs = [ "test/lima.lcm", "test/mike.lcm", "test/november.lcm", ], outs = [ "test/romeo/lima.lcm", "test/romeo/mike.lcm", "test/romeo/november.lcm", ], cmd = " && ".join([ " ".join([ # Replace 'papa' with 'romeo'. "sed -e 's#papa#romeo#g;'", "$(execpath test/{}.lcm)".format(name), " > ", "$(RULEDIR)/test/romeo/{}.lcm".format(name), ]) for name in [ "lima", "mike", "november", ] ]), ) # Run the upstream reference implementation of lcm-gen. genrule( name = "gen_romeo_hpp", testonly = True, srcs = [ ":test/romeo/lima.lcm", ":test/romeo/mike.lcm", ":test/romeo/november.lcm", ], outs = [ "test/romeo/lima.hpp", "test/romeo/mike.hpp", "test/romeo/november.hpp", ], cmd = " ".join([ "$(execpath @lcm//:lcm-gen)", "--cpp", "--cpp-std=c++11", "--use-quotes-for-includes", "--cpp-hpath=$(RULEDIR)/test", "$(execpath :test/romeo/lima.lcm)", "$(execpath :test/romeo/mike.lcm)", "$(execpath :test/romeo/november.lcm)", ]), tools = [ "@lcm//:lcm-gen", ], ) # Compile the reference implementation's C++ output (i.e., headers). cc_library( name = "romeo", testonly = True, hdrs = [ ":test/romeo/lima.hpp", ":test/romeo/mike.hpp", ":test/romeo/november.hpp", ], includes = ["test"], tags = ["nolint"], deps = [ "@lcm", ], ) # Compile our tools' generated headers. Here we use the _goal_ header files # instead of _auto-generated_ header files so we can separate the questions of # "does the tool generate the expected headers" (via the lcm_gen_test) vs "do # the headers encode/decode correctly" (via the functional_test). cc_library( name = "papa", testonly = True, hdrs = [ "test/goal/lima.hpp", "test/goal/mike.hpp", "test/goal/november.hpp", ], include_prefix = "papa", strip_include_prefix = "test/goal", tags = ["nolint"], ) drake_cc_googletest( name = "functional_test", deps = [ ":papa", ":romeo", "//lcm:lcm_messages", ], ) # TODO(jwnimmer-tri) Add a unit test that checks our claims about an upstream # lcm_gen message being able to depend on our lcm_gen message as a nested # sub-struct. At the moment we have no test coverage of the legacy API's # nesting support (_encodeNoHash, _decodeNoHash, _computeHash). add_lint_tests()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/lcm_gen/__init__.py
"""A portable re-implementation of lcm-gen (see lcm-proj.github.io) using only the Python 3 standard library. Currently, the only output language supported is C++. We match the convention of upstream lcm-gen that the output header filename is the same as the message struct name. # Details For the LCM syntax and encoding specification, refer to: https://lcm-proj.github.io/lcm/content/lcm-type-ref.html In the generated C++ classes, two similar APIs are offered: - The "legacy" API matches the upstream lcm-gen tool, e.g., the `encode()` and `decode()` functions operate on `void*` data and return an offset counter. - The "new" API is `_encode()` and `_decode()` functions that operate on `uint8_t*` data via a cursor, and return a success bool. These functions take advantage of modern C++ 17 (e.g., `constexpr` for the hash functions). Messages generated by the upstream lcm-gen tool can successfully refer to sub-struct messages generated by this tool. Messages generated by this tool can NOT refer to sub-struct messages generated by the upstream lcm-gen tool. In other words, for any message that is generated by this version of lcm-gen, all of its nested structs must also be generated by this version of lcm-gen. """ import argparse import dataclasses import enum import io import os import pathlib import re import struct import token import tokenize from typing import Optional, List, Union # A brief summary of LCM's grammar. # # Productions: # root -> package_decl? struct_decl* ; # package_decl -> PACKAGE identifier SEMI ; # struct_decl -> STRUCT identifier LCURL struct_statement* RCURL ; # struct_statement -> ( const_statement | field_statement ) ; # const_statement -> CONST primitive_type const_definition # ( COMMA const_definition )* SEMI ; # const_definition -> identifier EQ value # field_statement -> qualified_identifier identifier array_dims? SEMI ; # array_dims -> ( LSQUARE array_dim RSQUARE )* ; # array_dim -> ( integer | identifier ); # qualified_identifier -> ( identifier DOT )? identifier ; # # Where primitive_type, identifier, integer, and value are primitive lexemes. # # This tool does not support the deprecated[1] and undocumented "enum" keyword. # [1] https://github.com/lcm-proj/lcm/commit/d9dcf8e3 PrimitiveType = enum.Enum("PrimitiveType", " ".join([ "boolean", "byte", "double", "float", "int8_t", "int16_t", "int32_t", "int64_t", "string", ])) PrimitiveType.__str__ = lambda self: self.name @dataclasses.dataclass(frozen=True) class UserType: """A struct name from an LCM message definition, e.g., "foo" or "foo.bar". """ package: Optional[str] name: str def __str__(self): if self.package is None: return self.name return f"{self.package}.{self.name}" @dataclasses.dataclass(frozen=True) class StructField: """A field within an LCM message definition.""" name: str typ: Union[PrimitiveType, UserType] array_dims: List[Union[int, str]] = dataclasses.field(default_factory=list) def __str__(self): result = f"{self.typ} {self.name}" for dim in self.array_dims: result += f"[{dim}]" return result @dataclasses.dataclass(frozen=True) class StructConstant: """A constant within an LCM message definition.""" name: str typ: PrimitiveType value: Union[int, float] value_str: str def __str__(self): return f"const {self.typ} {self.name} = {self.value_str}" @dataclasses.dataclass(frozen=True) class Struct: """The parse tree for an LCM message definition.""" typ: UserType fields: List[StructField] = dataclasses.field(default_factory=list) constants: List[StructConstant] = dataclasses.field(default_factory=list) def __str__(self): result = f"struct {self.typ} {{\n" for c in self.constants: result += f" {c};\n" for f in self.fields: result += f" {f};\n" result += "}\n" return result class Parser: """A basic recursive descent parser for the LCM message grammar. This parser only accepts files that have exactly one struct_decl. Having no structs (or more than one struct) is a parse error. """ @staticmethod def parse(*, filename): """Returns a parsed Struct for the given filename.""" return Parser(filename=filename)._root() def __init__(self, *, filename): """(Internal use only.)""" self._filename = filename self._result = None # Load the file. data = pathlib.Path(filename).read_text(encoding="utf-8") # Remove comments. data = self._remove_c_comments(data) data = self._remove_cpp_comments(data) # Tokenize. bytes_io = io.BytesIO(bytes(data, encoding="utf-8")) self._tokens = list(tokenize.tokenize(bytes_io.readline)) self._i = 0 self._consume(token.ENCODING) @staticmethod def _remove_c_comments(data): """Returns data with its C-style comments replaced with whitespace (so that column numbers in error messages still make sense). """ while True: m = re.search(r"/\*.*?\*/", data, flags=re.DOTALL) if not m: break replacement = "".join([ ch if ch == "\n" else " " for ch in m.group() ]) start, end = m.span() data = data[:start] + replacement + data[end:] return data @staticmethod def _remove_cpp_comments(data): """Returns data stripped of its C++-style comments.""" return re.sub(r"//.*$", "", data, flags=re.MULTILINE) def _current_type(self): """Returns the type of the current token. The enumerated types are per the Python `token` module. """ return self._tokens[self._i][0] def _current_value(self): """Returns string value of the current token.""" return self._tokens[self._i][1] def _syntax_error_details(self): """Provides the detail attribute of a SyntaxError.""" return ( self._filename, self._tokens[self._i][2][0], None, None) def _expect(self, expected_type, expected_value=None): """Raises a syntax error unless the current token matches the expected type and value (if given). """ actual_value = self._current_value() actual_type = self._current_type() actual_typename = token.tok_name[actual_type] expected_typename = token.tok_name[expected_type] if expected_value is not None and actual_value != expected_value: raise SyntaxError( f"Expected '{expected_value}' " f"but got '{actual_value}'", self._syntax_error_details()) if actual_type != expected_type: raise SyntaxError( f"Expected a token.{expected_typename}" f" but got a token.{actual_typename} ('{actual_value}')", self._syntax_error_details()) def _advance(self): """Advances the parser to the next token, skipping whitespace.""" self._i += 1 while self._current_type() in (token.NEWLINE, token.NL): self._i += 1 def _consume(self, token_type, token_value=None): """Does expect() then advance(). Returns the prior value (i.e., the consumed value) as a string. """ result = self._current_value() self._expect(expected_type=token_type, expected_value=token_value) if token_type != token.ENDMARKER: self._advance() return result def _root(self): """Parses a root production.""" package = None if self._current_value() == "package": package = self._package_decl() self._struct_decl(package=package) self._consume(token.ENDMARKER) return self._result def _package_decl(self): """Parses a package_decl production.""" self._consume(token.NAME, "package") package = self._consume(token.NAME) self._consume(token.OP, ";") return package def _struct_decl(self, package): """Parses a struct_decl production.""" self._consume(token.NAME, "struct") name = self._consume(token.NAME) self._result = Struct(typ=UserType(package=package, name=name)) self._consume(token.OP, "{") while True: if self._current_type() != token.NAME: break elif self._current_value() == "const": self._const_statement() else: self._field_statement() self._consume(token.OP, "}") def _const_statement(self): """Parses a const_statement production.""" self._consume(token.NAME, "const") typ_str = self._consume(token.NAME) typ = None try: typ = PrimitiveType[typ_str] except KeyError: pass if typ is None or typ == PrimitiveType.string: self._i -= 1 raise SyntaxError( f"Expected a primitive type name but got '{typ_str}'", self._syntax_error_details()) self._const_definition(typ=typ) while self._current_value() == ",": self._consume(token.OP, ",") self._const_definition(typ=typ) self._consume(token.OP, ";") def _const_definition(self, *, typ): """Parses a const_definition production.""" name = self._consume(token.NAME) self._consume(token.OP, "=") value_sign = "" if self._current_value() in ["+", "-"]: value_sign = self._consume(token.OP) value_str = value_sign + self._consume(token.NUMBER) try: if typ.name in ["float", "double"]: value = float(value_str) else: value = int(value_str) except ValueError: value = None if value is None: self._i -= 1 raise SyntaxError( f"Invalid constant value '{value_str}' for {typ.name}", self._syntax_error_details()) self._result.constants.append(StructConstant( name=name, typ=typ, value=value, value_str=value_str)) def _field_statement(self): """Parses a field_statement production.""" typ = self._qualified_identifier() name = self._consume(token.NAME) array_dims = [] while self._current_value() == "[": self._consume(token.OP, "[") if self._current_type() == token.NAME: dim = self._consume(token.NAME) else: dim = int(self._consume(token.NUMBER)) assert dim >= 0 self._consume(token.OP, "]") array_dims.append(dim) self._consume(token.OP, ";") self._result.fields.append(StructField( name=name, typ=typ, array_dims=array_dims)) def _qualified_identifier(self): """Parses a qualified_identifier production.""" name1 = self._consume(token.NAME) try: return PrimitiveType[name1] except KeyError: pass if self._current_value() == ".": self._consume(token.OP, ".") name2 = self._consume(token.NAME) return UserType(package=name1, name=name2) current_package = self._result.typ.package return UserType(package=current_package, name=name1) _CPP_TEMPLATE = """\ #pragma once #include <array> #include <cstddef> #include <cstdint> #include <cstring> #include <string> #include <tuple> #include <utility> #include <vector> @@SUBSTRUCT_INCLUDES@@ @@NAMESPACE_BEGIN@@ class @@STRUCT_NAME@@ { public: @@MEMBER_CONSTANTS@@ @@MEMBER_FIELDS@@ // These functions match the expected API from the legacy lcm-gen tool, // but note that we use `int64_t` instead of `int` for byte counts. //@{ static const char* getTypeName() { return "@@STRUCT_NAME@@"; } int64_t getEncodedSize() const { return 8 + _getEncodedSizeNoHash(); } int64_t _getEncodedSizeNoHash() const { int64_t _result = 0; @@GET_ENCODED_SIZE_NO_HASH@@ return _result; } template <bool with_hash = true> int64_t encode(void* buf, int64_t offset, int64_t maxlen) const { uint8_t* const _begin = static_cast<uint8_t*>(buf); uint8_t* const _start = _begin + offset; uint8_t* const _end = _begin + maxlen; uint8_t* _cursor = _start; return this->_encode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _encodeNoHash(void* buf, int64_t offset, int64_t maxlen) const { return encode<false>(buf, offset, maxlen); } template <bool with_hash = true> int64_t decode(const void* buf, int64_t offset, int64_t maxlen) { const uint8_t* const _begin = static_cast<const uint8_t*>(buf); const uint8_t* const _start = _begin + offset; const uint8_t* const _end = _begin + maxlen; const uint8_t* _cursor = _start; return this->_decode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _decodeNoHash(const void* buf, int64_t offset, int64_t maxlen) { return decode<false>(buf, offset, maxlen); } static constexpr int64_t getHash() { return static_cast<int64_t>(_get_hash_impl()); } template <typename Parents> static uint64_t _computeHash(const Parents*) { return getHash(); } //@} // New-style (constexpr) hashing. template <size_t N = 0> static constexpr uint64_t _get_hash_impl( const std::array<uint64_t, N>& parents = {}) { const uint64_t base_hash = @@BASE_HASH@@; @@GET_HASH_DECLARE_NEW_PARENTS@@ for (size_t n = 0; n < N; ++n) { if (parents[n] == base_hash) { // Special case for recursive message definition. return 0; } @@GET_HASH_UPDATE_NEW_PARENT@@ } @@COMPOSITE_HASH@@ return (composite_hash << 1) + ((composite_hash >> 63) & 1); } // New-style encoding. template <bool with_hash = true> bool _encode(uint8_t** _cursor, uint8_t* _end) const { constexpr int64_t _hash = _get_hash_impl(); return // true iff success @@ENCODE@@ } // New-style decoding. template <bool with_hash = true> bool _decode(const uint8_t** _cursor, const uint8_t* _end) { constexpr int64_t _expected_hash = _get_hash_impl(); int64_t _hash = _expected_hash; return // true iff success @@DECODE@@ } private: // Given an N-byte integer at `_input` in network byte order, returns it as // a host unsigned integer using the matching unsigned integer type. (This // is also used to convert host to network order; it's the same operation.) template <size_t N> static auto _byteswap(const void* _input) { // clang-format off using result_t = std::conditional_t< N == 1, uint8_t, std::conditional_t< N == 2, uint16_t, std::conditional_t< N == 4, uint32_t, std::conditional_t< N == 8, uint64_t, void>>>>; // clang-format on result_t _result; std::memcpy(&_result, _input, N); // TODO(jwnimmer-tri) Don't bswap on PowerPC. if constexpr (N == 1) { return _result; } else if constexpr (N == 2) { return __builtin_bswap16(_result); } else if constexpr (N == 4) { return __builtin_bswap32(_result); } else if constexpr (N == 8) { return __builtin_bswap64(_result); } } // The dimensions of an array, for use during encoding / decoding, e.g., for // a message field `int8_t image[6][4]` we'd use `ArrayDims<2>{6, 4}`. template <size_t ndims> using ArrayDims = std::array<int64_t, ndims>; // Returns the second and following elements of _dims (i.e., _dims[1:]). // https://en.wikipedia.org/wiki/CAR_and_CDR template <size_t ndims> static ArrayDims<ndims - 1> _cdr(const std::array<int64_t, ndims>& _dims) { static_assert(ndims > 0); ArrayDims<ndims - 1> _result; for (size_t i = 1; i < ndims; ++i) { _result[i - 1] = _dims[i]; } return _result; } // Given a field (or child element within a field), encodes it into the given // byte cursor and advances the cursor, returning true on success. Arrays are // passed with `_input` as vector-like container and `_dims` as the list of // multi-dimensional vector sizes, e.g., `int8_t image[6][4]` would be called // like `_encode_field(image.at(0), &cursor, end, ArrayDims<2>{6, 4})`. In // LCM messages, multi-dimensional arrays are encoded using C's memory layout // (i.e., with the last dimension as the most tightly packed.) template <typename T, size_t ndims = 0> static bool _encode_field(const T& _input, uint8_t** _cursor, uint8_t* _end, const ArrayDims<ndims>& _dims = ArrayDims<0>{}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD input. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(&_input); std::memcpy(*_cursor, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String input. const int32_t _size = _input.size() + 1; const bool ok = (_input.size() < INT32_MAX) && (*_cursor + sizeof(_size) + _size <= _end) && _encode_field(_size, _cursor, _end); if (ok) { std::memcpy(*_cursor, _input.c_str(), _size); } *_cursor += _size; return ok; } else { // Struct input. return _input.template _encode<false>(_cursor, _end); } } else { // Cross-check the container size vs the size specified in the message's // size field. (For fixed-size containers this is a no-op.) if (static_cast<int64_t>(_input.size()) != _dims[0]) { return false; } // Encode each sub-item in turn, forwarding all the _dims but the first. for (const auto& _child : _input) { if (!_encode_field(_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } // Given a pointer to a field (or child element within a field), decodes it // from the given byte cursor and advances the cursor, returning true on // success. The array `_dims` and storage order follow the same pattern as in // _encode_field(); refer to those docs for details. template <typename T, size_t ndims = 0> static bool _decode_field(T* _output, const uint8_t** _cursor, const uint8_t* _end, const ArrayDims<ndims>& _dims = {}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD output. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(*_cursor); std::memcpy(_output, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String output. int32_t _size{}; const bool ok = _decode_field(&_size, _cursor, _end) && (_size > 0) && (*_cursor + _size <= _end); if (ok) { _output->replace(_output->begin(), _output->end(), *_cursor, *_cursor + _size - 1); } *_cursor += _size; return ok; } else { // Struct output. return _output->template _decode<false>(_cursor, _end); } } else { // In case of a variable-size dimension, resize our storage to match. if constexpr (std::is_same_v<T, std::vector<typename T::value_type>>) { _output->resize(_dims[0]); } // Decode each sub-item in turn. for (auto& _child : *_output) { if (!_decode_field(&_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } }; @@NAMESPACE_END@@ """ class CppGen: """Produces C++ message code for an LCM message definition.""" _FIXED_SIZE = { PrimitiveType.boolean: 1, PrimitiveType.byte: 1, PrimitiveType.double: 8, PrimitiveType.float: 4, PrimitiveType.int8_t: 1, PrimitiveType.int16_t: 2, PrimitiveType.int32_t: 4, PrimitiveType.int64_t: 8, } def __init__(self, struct): self._struct = struct self._result = None # Fields used as array sizes will need special treatment. We'll collect # a list of their names up-front. self._size_variables = [] for field in self._struct.fields: for dim in field.array_dims: if isinstance(dim, str): if dim not in self._size_variables: self._size_variables.append(dim) def generate(self): """Returns the C++ text for the message provided in the constructor.""" self._result = _CPP_TEMPLATE self._fill_includes() self._fill_names() self._fill_member_constants() self._fill_member_fields() self._fill_encoded_size() self._fill_encode() self._fill_decode() self._fill_base_hash() self._fill_get_hash() return self._result def _replace(self, old, new): updated = self._result.replace(old, new) assert updated != self._result self._result = updated def _fill_includes(self): filenames = [ f"{field.typ.package}/{field.typ.name}.hpp" for field in self._struct.fields if isinstance(field.typ, UserType) ] includes = "\n".join([ f'#include "{filename}"\n' for filename in sorted(set(filenames)) ]) if includes: includes += "\n" self._replace("@@SUBSTRUCT_INCLUDES@@\n\n", includes) def _fill_names(self): """Updates the namespace and struct names for this message.""" namespace_begin, namespace_end = self._namespace_begin_end() self._replace("@@NAMESPACE_BEGIN@@\n\n", namespace_begin) self._replace("\n@@NAMESPACE_END@@\n", namespace_end) self._replace("@@STRUCT_NAME@@", self._struct.typ.name) def _namespace_begin_end(self): """Returns the NAMESPACE_{BEGIN,END} substitutions.""" package = self._struct.typ.package if package is None: return ("", "") return (f"namespace {package} {{\n\n", f"\n}} // namespace {package}\n") def _fill_member_constants(self): """Updates member constants for this message.""" content = "".join([ " static constexpr {typ} {name} = {value};\n".format( typ=self._full_typename(const.typ), name=const.name, value=const.value_str, ) for const in self._struct.constants ]) if content: content += "\n" self._replace("@@MEMBER_CONSTANTS@@\n", content) def _full_typename(self, typ): """Returns the C++ typename for the given Parser typ.""" if typ == PrimitiveType.boolean: return "bool" if typ == PrimitiveType.byte: return "uint8_t" if typ == PrimitiveType.string: return "std::string" if isinstance(typ, UserType) and typ.package is not None: return f"{typ.package}::{typ.name}" return typ.name def _fill_member_fields(self): """Updates member fields for this message.""" content = "".join([ f" {self._to_member_field_type(field)} {field.name};\n" for field in self._struct.fields ]) if content: content += "\n" self._replace("@@MEMBER_FIELDS@@\n", content) def _to_member_field_type(self, field): """Returns the C++ type for a member field declaration.""" result = self._full_typename(field.typ) for dim in reversed(field.array_dims): if isinstance(dim, int): result = f"std::array<{result}, {dim}>" else: result = f"std::vector<{result}>" return result def _fill_encoded_size(self): """Updates the getEncodedSize() implementation for this message.""" content = "" pad = " " * 4 for name in self._size_variables: content += f"{pad}if ({name} < 0) {{\n" content += f"{pad} return _result;\n" content += f"{pad}}}\n" for field in self._struct.fields: for line in self._fill_one_encoded_size(field).splitlines(): content += f"{pad}{line}\n" self._replace("@@GET_ENCODED_SIZE_NO_HASH@@\n", content) def _fill_one_encoded_size(self, field): """Returns the getEncodedSize() stanza for one member field.""" # For fixed-size elements, we can compute the byte size directly. known_encoded_size = self._known_encoded_size(field) if known_encoded_size is not None: return f"_result += {known_encoded_size}; // {field.name}\n" # For variable-size elements, we need to loop in case of arrays. content = "" pad = "" var = field.name for i in range(len(field.array_dims)): new_var = f"_{field.name}_{i}" content += f"{pad}for (const auto& {new_var} : {var}) {{\n" var = new_var pad += " " * 2 if field.typ == PrimitiveType.string: content += f"{pad}_result += sizeof(int32_t) + {var}.size() + 1;\n" else: assert isinstance(field.typ, UserType) content += f"{pad}_result += {var}._getEncodedSizeNoHash();\n" for _ in field.array_dims: pad = pad[:-2] content += f"{pad}}}\n" return content def _known_encoded_size(self, field): """If field will have a known encoded size at runtime, returns a string expression for that size; otherwise None. """ primitive_size = self._FIXED_SIZE.get(field.typ) if primitive_size is None: return None result = f"{primitive_size}" for dim in field.array_dims: result += f" * {dim}" return result def _fill_encode(self): """Updates the encode() implementation for this message.""" # Each string in `operations` is one call to a bool-valued helper that # encodes one field (or the hash) or guards out-of-bounds values. operations = [] # Check that all variable-length sizes are valid. operations.extend([ f"({dim} >= 0)" for dim in self._size_variables ]) # Encode the hash. operations.extend([ "(with_hash ? _encode_field(_hash, _cursor, _end) : true)", ]) # Encode the fields. for item in self._struct.fields: operations.extend(self._fill_one_encode(item)) # Format the sequence of operations as a C++ short-circuit expression. content = " &&\n".join([ " " * 8 + item for item in operations ]) + ";\n" self._replace("@@ENCODE@@\n", content) def _fill_one_encode(self, field): """Returns the encode() stanzas for one member field.""" extra_array_dims = "" if field.array_dims: dims = [str(dim) for dim in field.array_dims] extra_array_dims = f", ArrayDims<{len(dims)}>{{{', '.join(dims)}}}" return [ f"_encode_field({field.name}, _cursor, _end{extra_array_dims})", ] def _fill_decode(self): """Updates the decode() implementation for this message.""" # Each string in `operations` is one call to a bool-valued helper that # decodes one field (or the hash) or guards out-of-bounds values. operations = [] # Decode the hash. operations.extend([ "(with_hash ? _decode_field(&_hash, _cursor, _end) : true)", "(_hash == _expected_hash)", ]) # Decode the fields. for item in self._struct.fields: operations.extend(self._fill_one_decode(item)) # Format the sequence of operations as a C++ short-circuit expression. content = " &&\n".join([ " " * 8 + item for item in operations ]) + ";\n" self._replace("@@DECODE@@\n", content) def _fill_one_decode(self, field): """Returns the decode() stanzas for one member field.""" extra_array_dims = "" if field.array_dims: dims = [str(dim) for dim in field.array_dims] extra_array_dims = f", ArrayDims<{len(dims)}>{{{', '.join(dims)}}}" operations = [ f"_decode_field(&{field.name}, _cursor, _end{extra_array_dims})", ] if field.name in self._size_variables: # When we decode a field that specifies the size of a vector<...>, # we'll immediately check that it was sane. operations.append(f"({field.name} >= 0)") return operations def _fill_base_hash(self): """Updates the 'base hash' constant for this message, following the specs from https://lcm-proj.github.io/lcm/content/lcm-type-ref.html. """ # Collect the list of data to be hashed (int or str). data = [] for item in self._struct.fields: data.append(item.name) if isinstance(item.typ, PrimitiveType): data.append(item.typ.name) data.append(len(item.array_dims)) for dim in item.array_dims: data.append(1 if isinstance(dim, str) else 0) data.append(str(dim)) # Consolidate the data to be hashed into a uniform sequence of bytes. # Integers are truncated to one byte. chars = bytearray() for x in data: if isinstance(x, int): chars.append(x % 256) else: assert isinstance(x, str) chars.append(len(x) % 256) chars.extend([ord(ch) for ch in x]) # Hashify the bytes, interpreting them as an int8_t sequence. value = 0x12345678 for (c,) in struct.iter_unpack("<b", chars): # The mixing arithmetic uses signed integers. value = ((value << 8) ^ (value >> 55)) + c # Truncate as unsigned (i.e., uint64_t). value %= 2**64 # Cast back to signed (i.e., int64_t). if value >= 2**63: value -= 2**64 # Cast back to a unsigned (i.e., uint64_t). value %= 2**64 self._replace("@@BASE_HASH@@", f"0x{value:016x}ull") def _fill_get_hash(self): """Fills in the _get_hash_impl substitutions for this message.""" pad = " " * 4 content = f"{pad}const uint64_t composite_hash = base_hash" has_any_user_types = False for field in self._struct.fields: if isinstance(field.typ, UserType): child_type = self._full_typename(field.typ) child_hash = f"{child_type}::_get_hash_impl(new_parents)" content += f"\n{pad} + {child_hash}" has_any_user_types = True content += ";" self._replace("@@COMPOSITE_HASH@@", content) if has_any_user_types: self._replace( "@@GET_HASH_DECLARE_NEW_PARENTS@@", pad + "std::array<uint64_t, N + 1> new_parents{base_hash};") self._replace( "@@GET_HASH_UPDATE_NEW_PARENT@@", pad + " new_parents[n + 1] = parents[n];") else: self._replace("@@GET_HASH_DECLARE_NEW_PARENTS@@\n", "") self._replace("@@GET_HASH_UPDATE_NEW_PARENT@@\n", "") def main(): description, _ = __doc__.split("# Details") parser = argparse.ArgumentParser(description=description) parser.add_argument( "src", nargs="+", help="*.lcm source file(s)") parser.add_argument( "--outdir", required=True, type=pathlib.Path, help="Directory where output files should be written") args = parser.parse_args() # If we were invoked via `bazel run`, we must be careful to interpret # args.src relative to the cwd of the user, not our runfiles. real_cwd = os.environ.get("BUILD_WORKING_DIRECTORY") if real_cwd is not None: os.chdir(real_cwd) returncode = 0 for src in args.src: struct = Parser.parse(filename=src) generator = CppGen(struct=struct) content = generator.generate() path = args.outdir / f"{struct.typ.name}.hpp" path.write_text(content, encoding="utf-8") if __name__ == "__main__": main()
0
/home/johnshepherd/drake/tools/lcm_gen
/home/johnshepherd/drake/tools/lcm_gen/test/functional_test.cc
#include <gmock/gmock.h> #include <gtest/gtest.h> // The "papa" (i.e, "probed") messages are generated by our lcm_gen tool. // These are the "classes under test" for this file. // The "romeo" (i.e., "reference") messages are generated by the upstream // lcm-gen tool. Here, we use those as an "oracle" to compare against. // clang-format off #include "papa/lima.hpp" #include "papa/mike.hpp" #include "papa/november.hpp" #include "romeo/lima.hpp" #include "romeo/mike.hpp" #include "romeo/november.hpp" // clang-format on #include "drake/lcm/lcm_messages.h" namespace { // We use vector<int> here so that googletest doesn't print the bytes as // `char`s -- we want to see the data as numbers, not ASCII goop. template <typename Message> std::vector<int> Encode(const Message& message) { std::vector<uint8_t> chars = drake::lcm::EncodeLcmMessage(message); return std::vector<int>(chars.begin(), chars.end()); } // Compares the encoded bytes for two messages. The messages are same type in // the sense that they are the same LCM message definition, but the C++ types // will actually be different -- one for our own lcm_gen tool and one for the // upstream lcm_gen tool. template <typename ActualMessage, typename ExpectedMessage> bool CompareEncode(const ActualMessage& actual, const ExpectedMessage& expected) { const auto actual_bytes = Encode(actual); const auto expected_bytes = Encode(expected); // As a convenience, provide a specific wrong-size failure message. // The default output for EXPECT_EQ on the two arrays is a bit hard to grok. EXPECT_EQ(actual_bytes.size(), expected_bytes.size()); // As a convenience, provide an acute summary of the first differing byte. // The default output for EXPECT_EQ on the two arrays is a bit hard to grok. for (size_t i = 0; i < actual_bytes.size() && i < expected_bytes.size(); ++i) { if (actual_bytes[i] != expected_bytes[i]) { EXPECT_EQ(actual_bytes[i], expected_bytes[i]) << i << "th message byte"; break; } } // If this fails, the both array's entire contents will be printed. EXPECT_EQ(actual_bytes, expected_bytes); return actual_bytes == expected_bytes; } // Check that our encoded 'lima' message is identical to upstream. GTEST_TEST(RomeoTest, LimaEncode) { // The hash is the same. ASSERT_EQ(papa::lima::getHash(), romeo::lima::getHash()); // Default-constructed messages encode the same. papa::lima papa{}; romeo::lima romeo{}; ASSERT_TRUE(CompareEncode(papa, romeo)); // We'll change the struct fields one at a time, and keep checking that both // papa and romeo encode the same as we do so. // clang-format off auto mutations = std::make_tuple( [](auto* message) { message->golf = true; }, [](auto* message) { message->bravo = 22; }, [](auto* message) { message->delta = 2.25; }, [](auto* message) { message->foxtrot = 22.125; }, [](auto* message) { message->india8 = 22; }, [](auto* message) { message->india16 = 22222; }, [](auto* message) { message->india32 = 22222222; }, [](auto* message) { message->india64 = 222222222222222222; }); // clang-format on const auto check_one = [&](const auto& mutation) { EXPECT_NO_THROW(mutation(&papa)); EXPECT_NO_THROW(mutation(&romeo)); ASSERT_TRUE(CompareEncode(papa, romeo)); }; std::apply( [&](const auto&... mutation) { (check_one(mutation), ...); }, mutations); } // Check that our generated code for `struct lima` can send and receive data. GTEST_TEST(PapaTest, LimaRoundTrip) { papa::lima send{}; send.golf = true; send.bravo = 22; send.delta = 2.25; send.foxtrot = 22.125; send.india8 = 22; send.india16 = 22222; send.india32 = 22222222; send.india64 = 222222222222222222; auto data = drake::lcm::EncodeLcmMessage(send); auto receive = drake::lcm::DecodeLcmMessage<papa::lima>(data); EXPECT_EQ(receive.golf, true); EXPECT_EQ(receive.bravo, 22); EXPECT_EQ(receive.delta, 2.25); EXPECT_EQ(receive.foxtrot, 22.125); EXPECT_EQ(receive.india8, 22); EXPECT_EQ(receive.india16, 22222); EXPECT_EQ(receive.india32, 22222222); EXPECT_EQ(receive.india64, 222222222222222222); // When the received data is cut short, the message detects the error. data.resize(data.size() - 1); EXPECT_EQ(receive.decode(data.data(), 0, data.size()), -1); // When the send buffer isn't large enough, the message detects the error. EXPECT_EQ(send.encode(data.data(), 0, data.size()), -1); } // With the old lcm-gen, partially-fixed-size arrays are typed as std::vector so // need manual size bookkeeping. template <class T> void MaybeResizeLegacyLcmGenArray(std::vector<T>* array, int new_size) { DRAKE_DEMAND(array != nullptr); array->resize(new_size); } // With our new lcm-gen, partially-fixed-size arrays are typed as std::array so // don't need any manual bookkeeping. template <class T, std::size_t N> void MaybeResizeLegacyLcmGenArray(const std::array<T, N>*, int new_size) { DRAKE_DEMAND(new_size == N); } // Check that our encoded 'mike' message is identical to upstream. GTEST_TEST(RomeoTest, MikeEncode) { ASSERT_EQ(papa::mike::getHash(), romeo::mike::getHash()); papa::mike papa{}; romeo::mike romeo{}; ASSERT_TRUE(CompareEncode(papa, romeo)); // We'll change the struct fields one at a time, and keep checking that both // papa and romeo encode the same as we do so. // clang-format off auto mutations = std::make_tuple( [](auto* message) { message->delta[0] = 2.5; }, [](auto* message) { message->foxtrot[3][4] = 1.25; }, [](auto* message) { message->alpha.india8 = 22; }, [](auto* message) { message->sierra = "sierra"; }, [](auto* message) { const int rows = 2; const int cols = 3; message->rows = rows; message->cols = cols; message->bravo.resize(rows); message->india8.resize(rows); for (auto& item : message->india8) { item.resize(cols); } MaybeResizeLegacyLcmGenArray(&message->india16, 7); for (auto& item : message->india16) { item.resize(cols); } message->india32.resize(rows); for (auto& item : message->india32) { MaybeResizeLegacyLcmGenArray(&item, 11); } message->yankee.resize(rows); message->zulu.resize(rows); for (auto& item : message->zulu) { MaybeResizeLegacyLcmGenArray(&item, 2); } }, [](auto* message) { message->xray[1].india8 = 22; }, [](auto* message) { message->yankee[1].india16 = 2222; }, [](auto* message) { message->zulu[1][0].india32 = 22222; }); // clang-format on const auto check_one = [&](const auto& mutation) { EXPECT_NO_THROW(mutation(&papa)); EXPECT_NO_THROW(mutation(&romeo)); ASSERT_TRUE(CompareEncode(papa, romeo)); }; std::apply( [&](const auto&... mutation) { (check_one(mutation), ...); }, mutations); } // Checks that our generated code for `struct mike` can send and receive data. GTEST_TEST(PapaTest, MikeRoundTrip) { papa::mike send{}; send.delta = {2.5, 22.25, 222.125}; send.foxtrot[3] = {2222}; send.alpha.india8 = 22; send.sierra = "sierra"; send.rows = 2; send.cols = 3; send.bravo.resize(send.rows); send.india8.resize(send.rows); for (auto& item : send.india8) { item.resize(send.cols); } for (auto& item : send.india16) { item.resize(send.cols); } send.india32.resize(send.rows); send.xray[1].india8 = 22; send.yankee.resize(send.rows); send.yankee[1].india16 = 2222; send.zulu.resize(send.rows); send.zulu[1][0].india32 = 22222; auto data = drake::lcm::EncodeLcmMessage(send); auto receive = drake::lcm::DecodeLcmMessage<papa::mike>(data); EXPECT_EQ(receive.delta.at(2), 222.125); EXPECT_EQ(receive.foxtrot.at(3).at(0), 2222); EXPECT_EQ(receive.alpha.india8, 22); EXPECT_EQ(receive.sierra, "sierra"); EXPECT_EQ(receive.rows, 2); EXPECT_EQ(receive.cols, 3); EXPECT_EQ(receive.xray.at(1).india8, 22); EXPECT_EQ(receive.yankee.at(1).india16, 2222); EXPECT_EQ(receive.zulu.at(1).at(0).india32, 22222); // When the message is internally inconsistent, encoding detects the error. send.india8.at(1).resize(send.cols - 1); EXPECT_EQ(send.encode(data.data(), 0, data.size()), -1); send.india8.at(1).resize(send.cols); // When the received data is cut short, the message detects the error. data.resize(data.size() - 1); EXPECT_EQ(receive.decode(data.data(), 0, data.size()), -1); // When the send buffer isn't large enough, the message detects the error. EXPECT_EQ(send.encode(data.data(), 0, data.size()), -1); } // Check that our encoded 'november' message is identical to upstream. GTEST_TEST(RomeoTest, NovemberEncode) { // The hash is the same. ASSERT_EQ(papa::november::getHash(), romeo::november::getHash()); // Default-constructed messages encode the same. papa::november papa{}; romeo::november romeo{}; ASSERT_TRUE(CompareEncode(papa, romeo)); // We'll change the struct fields one at a time, and keep checking that both // papa and romeo encode the same as we do so. // clang-format off auto mutations = std::make_tuple( [](auto* message) { message->alpha.india8 = 22; }, [](auto* message) { message->bravo.india16 = 2222; }, [](auto* message) { message->charlie = 22222222; }); // clang-format on const auto check_one = [&](const auto& mutation) { EXPECT_NO_THROW(mutation(&papa)); EXPECT_NO_THROW(mutation(&romeo)); ASSERT_TRUE(CompareEncode(papa, romeo)); }; std::apply( [&](const auto&... mutation) { (check_one(mutation), ...); }, mutations); } // Check that our generated code for `struct november` can send and receive // data. GTEST_TEST(PapaTest, NovemberRoundTrip) { papa::november send{}; send.alpha.india8 = 22; send.bravo.india16 = 2222; send.charlie = 22222222; auto data = drake::lcm::EncodeLcmMessage(send); auto receive = drake::lcm::DecodeLcmMessage<papa::november>(data); EXPECT_EQ(receive.alpha.india8, 22); EXPECT_EQ(receive.bravo.india16, 2222); EXPECT_EQ(receive.charlie, 22222222); // When the received data is cut short, the message detects the error. data.resize(data.size() - 1); EXPECT_EQ(receive.decode(data.data(), 0, data.size()), -1); // When the send buffer isn't large enough, the message detects the error. EXPECT_EQ(send.encode(data.data(), 0, data.size()), -1); } // TODO(jwnimmer-tri) We're currently missing unit test coverage for messages // that can nest inside themselves (i.e., recursive message definitions). See // "We avoid recursions by setting C([list]) = 0 if [list] contains C." from // https://lcm-proj.github.io/lcm/content/lcm-type-ref.html#fingerprint-computation // Our codegen probably does this correctly already, but we should test it. } // namespace
0
/home/johnshepherd/drake/tools/lcm_gen
/home/johnshepherd/drake/tools/lcm_gen/test/lima.lcm
// A sample LCM message definition for unit testing. package papa; /** This is multi-line C-style comment. * It is veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery long. */ struct lima { // All fixed-size primitive types (i.e., no string). boolean golf; byte bravo; double delta; float foxtrot; int8_t india8; int16_t india16; int32_t india32; int64_t india64; // Constants. const double charlie_delta = 3.25e1; const float charlie_foxtrot = 4.5e2; const int8_t charlie_india8 = -8; const int16_t charlie_india16 = 16; const int32_t charlie_india32 = 32; const int64_t charlie_india64 = 64; }
0
/home/johnshepherd/drake/tools/lcm_gen
/home/johnshepherd/drake/tools/lcm_gen/test/lcm_gen_test.py
from pathlib import Path import tempfile import unittest from python import runfiles from tools.lcm_gen import ( CppGen, Parser, PrimitiveType, Struct, UserType, ) class BaseTest(unittest.TestCase): def setUp(self): self.maxDiff = None self._manifest = runfiles.Create() self._lima_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/lima.lcm")) self._lima_hpp_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/goal/lima.hpp")) self._mike_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/mike.lcm")) self._mike_hpp_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/goal/mike.hpp")) self._november_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/november.lcm")) self._november_hpp_path = Path(self._manifest.Rlocation( "drake/tools/lcm_gen/test/goal/november.hpp")) assert self._lima_path.exists() assert self._lima_hpp_path.exists() assert self._mike_path.exists() assert self._mike_hpp_path.exists() assert self._november_path.exists() assert self._november_hpp_path.exists() class TestParser(BaseTest): """Tests for the Parser class.""" @staticmethod def _join_lines(lines): return "\n".join(lines) + "\n" def test_remove_c_comments(self): """C comments are replaced by whitespace (to preserve line numbers in error messages). """ self.assertMultiLineEqual( Parser._remove_c_comments(self._join_lines([ "foo", "bar /* comment", "still comment ", "comment */ eol", "bar/*quux*/baz", ])), self._join_lines([ "foo", "bar ", " ", " eol", "bar baz", ])) def test_remove_cpp_comments(self): """C++ comments are snipped to end-of-line.""" self.assertMultiLineEqual( Parser._remove_cpp_comments(self._join_lines([ "foo", "bar // eol", "// line", "next", ])), self._join_lines([ "foo", "bar ", "", "next" ])) def test_parse_lima(self): """Checks the parse tree for `lima.lcm`.""" lima = Parser.parse(filename=self._lima_path) self.assertEqual(str(lima), """\ struct papa.lima { const double charlie_delta = 3.25e1; const float charlie_foxtrot = 4.5e2; const int8_t charlie_india8 = -8; const int16_t charlie_india16 = 16; const int32_t charlie_india32 = 32; const int64_t charlie_india64 = 64; boolean golf; byte bravo; double delta; float foxtrot; int8_t india8; int16_t india16; int32_t india32; int64_t india64; } """) # Check the value <=> value_str equivalence. for c in lima.constants: if c.typ in (PrimitiveType.float, PrimitiveType.double): self.assertIsInstance(c.value, float) self.assertEqual(c.value, float(c.value_str)) else: self.assertIsInstance(c.value, int) self.assertEqual(c.value, int(c.value_str)) def test_parse_mike(self): """Checks the parse tree for `mike.lcm`.""" mike = Parser.parse(filename=self._mike_path) self.assertEqual(str(mike), """\ struct papa.mike { double delta[3]; float foxtrot[4][5]; papa.lima alpha; string sierra; int32_t rows; int32_t cols; byte bravo[rows]; int8_t india8[rows][cols]; int16_t india16[7][cols]; int32_t india32[rows][11]; papa.lima xray[2]; papa.lima yankee[rows]; papa.lima zulu[rows][2]; } """) # Check one field carefully for its exact representation. (zulu,) = [f for f in mike.fields if f.name == "zulu"] self.assertEqual(zulu.typ.package, "papa") self.assertEqual(zulu.typ.name, "lima") self.assertEqual(zulu.array_dims[0], "rows") self.assertEqual(zulu.array_dims[1], 2) def test_parse_november(self): """Checks the parse tree for `november.lcm`.""" november = Parser.parse(filename=self._november_path) self.assertEqual(str(november), """\ struct papa.november { papa.lima alpha; papa.lima bravo; int32_t charlie; } """) def test_type_str(self): """Tests UserType.__str__. The end-to-end tests of the parser wouldn't usually hit these corner cases because most types are immediately resolved to use fully-qualified names (i.e., with a non-None package). """ dut = UserType(package=None, name="bar") self.assertEqual(str(dut), "bar") dut = UserType(package="foo", name="bar") self.assertEqual(str(dut), "foo.bar") def _parse_str(self, content): with tempfile.NamedTemporaryFile( mode="w", prefix="lcm_gen_test_", encoding="utf-8") as f: f.write(content) f.flush() return Parser.parse(filename=f.name) def test_missing_package_semi(self): with self.assertRaisesRegex(SyntaxError, "Expected ';'.*got.*struct"): self._parse_str('package foo /*;*/ struct empty { }') def test_multiline_const(self): foo = self._parse_str('struct foo { const int8_t x = 1, y = 2; }') self.assertEqual(foo.constants[0].name, "x") self.assertEqual(foo.constants[0].value, 1) self.assertEqual(foo.constants[1].name, "y") self.assertEqual(foo.constants[1].value, 2) def test_bad_const_type(self): with self.assertRaisesRegex(SyntaxError, "Expected.*primitive type"): self._parse_str('struct bad { const string name = "foo"; }') with self.assertRaisesRegex(SyntaxError, "Expected.*primitive type"): self._parse_str('struct bad { const bignum x = 2222; }') def test_bad_const_value(self): with self.assertRaisesRegex(SyntaxError, "Invalid.*value.*0x7f"): self._parse_str('struct bad { const int8_t x = 0x7f; }') def test_missing_const_name(self): with self.assertRaisesRegex(SyntaxError, "Expected.*NAME"): self._parse_str('struct bad { const double /* name */ = 1; }') class TestCppGen(BaseTest): """Tests for the CppGen class. For the most part, these merely compare the generated code to a checked-in goal file. Testing that the generated code works as intended happens in the C++ unit test `functional_test.cc`. """ _HELP = """ =========================================================================== To replace the goal files with newly-regenerated copies, run this command: bazel run //tools/lcm_gen -- \ tools/lcm_gen/test/*.lcm --outdir=tools/lcm_gen/test/goal =========================================================================== """ def test_lima_text(self): """The generated text for lima.h exactly matches the goal file.""" lima = Parser.parse(filename=self._lima_path) expected_text = self._lima_hpp_path.read_text(encoding="utf-8") actual_text = CppGen(struct=lima).generate() self.assertMultiLineEqual(expected_text, actual_text, self._HELP) def test_mike_text(self): """The generated text for mike.h exactly matches the goal file.""" mike = Parser.parse(filename=self._mike_path) expected_text = self._mike_hpp_path.read_text(encoding="utf-8") actual_text = CppGen(struct=mike).generate() self.assertMultiLineEqual(expected_text, actual_text, self._HELP) def test_november_text(self): """The generated text for november.h exactly matches the goal file.""" november = Parser.parse(filename=self._november_path) expected_text = self._november_hpp_path.read_text(encoding="utf-8") actual_text = CppGen(struct=november).generate() self.assertMultiLineEqual(expected_text, actual_text, self._HELP) def test_no_package(self): """Sanity test for a message without any LCM package specified.""" empty = Struct(typ=UserType(package=None, name="empty")) actual_text = CppGen(struct=empty).generate() lines = actual_text.splitlines() while lines[0] == "" or lines[0][0] == "#": # Skip over blank lines and preprocessor lines. lines.pop(0) # The first real line of code should be the class opener. self.assertEqual(lines[0], "class empty {") # The last real line of code should be the class closer. self.assertEqual(lines[-1], "};")
0
/home/johnshepherd/drake/tools/lcm_gen
/home/johnshepherd/drake/tools/lcm_gen/test/november.lcm
// A sample LCM message definition for unit testing. package papa; struct november { // Sub-struct (non-array), using unqualified (no package) name. lima alpha; // Sub-struct (non-array), using qualified name. papa.lima bravo; // Tack a primitive onto the end. int32_t charlie; }
0
/home/johnshepherd/drake/tools/lcm_gen
/home/johnshepherd/drake/tools/lcm_gen/test/mike.lcm
// A sample LCM message definition for unit testing. package papa; struct mike { // Fixed-size arrays. double delta[3]; float foxtrot[4][5]; // Sub-struct (non-array), using unqualified (no package) name. lima alpha; // Variable-length string. string sierra; // Variable-sized arrays. int32_t rows; int32_t cols; byte bravo[rows]; int8_t india8[rows][cols]; int16_t india16[7][cols]; int32_t india32[rows][11]; // Arrays of sub-structs, using package-qualified name. papa.lima xray[2]; papa.lima yankee[rows]; papa.lima zulu[rows][2]; }
0
/home/johnshepherd/drake/tools/lcm_gen/test
/home/johnshepherd/drake/tools/lcm_gen/test/goal/mike.hpp
#pragma once #include <array> #include <cstddef> #include <cstdint> #include <cstring> #include <string> #include <tuple> #include <utility> #include <vector> #include "papa/lima.hpp" namespace papa { class mike { public: std::array<double, 3> delta; std::array<std::array<float, 5>, 4> foxtrot; papa::lima alpha; std::string sierra; int32_t rows; int32_t cols; std::vector<uint8_t> bravo; std::vector<std::vector<int8_t>> india8; std::array<std::vector<int16_t>, 7> india16; std::vector<std::array<int32_t, 11>> india32; std::array<papa::lima, 2> xray; std::vector<papa::lima> yankee; std::vector<std::array<papa::lima, 2>> zulu; // These functions match the expected API from the legacy lcm-gen tool, // but note that we use `int64_t` instead of `int` for byte counts. //@{ static const char* getTypeName() { return "mike"; } int64_t getEncodedSize() const { return 8 + _getEncodedSizeNoHash(); } int64_t _getEncodedSizeNoHash() const { int64_t _result = 0; if (rows < 0) { return _result; } if (cols < 0) { return _result; } _result += 8 * 3; // delta _result += 4 * 4 * 5; // foxtrot _result += alpha._getEncodedSizeNoHash(); _result += sizeof(int32_t) + sierra.size() + 1; _result += 4; // rows _result += 4; // cols _result += 1 * rows; // bravo _result += 1 * rows * cols; // india8 _result += 2 * 7 * cols; // india16 _result += 4 * rows * 11; // india32 for (const auto& _xray_0 : xray) { _result += _xray_0._getEncodedSizeNoHash(); } for (const auto& _yankee_0 : yankee) { _result += _yankee_0._getEncodedSizeNoHash(); } for (const auto& _zulu_0 : zulu) { for (const auto& _zulu_1 : _zulu_0) { _result += _zulu_1._getEncodedSizeNoHash(); } } return _result; } template <bool with_hash = true> int64_t encode(void* buf, int64_t offset, int64_t maxlen) const { uint8_t* const _begin = static_cast<uint8_t*>(buf); uint8_t* const _start = _begin + offset; uint8_t* const _end = _begin + maxlen; uint8_t* _cursor = _start; return this->_encode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _encodeNoHash(void* buf, int64_t offset, int64_t maxlen) const { return encode<false>(buf, offset, maxlen); } template <bool with_hash = true> int64_t decode(const void* buf, int64_t offset, int64_t maxlen) { const uint8_t* const _begin = static_cast<const uint8_t*>(buf); const uint8_t* const _start = _begin + offset; const uint8_t* const _end = _begin + maxlen; const uint8_t* _cursor = _start; return this->_decode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _decodeNoHash(const void* buf, int64_t offset, int64_t maxlen) { return decode<false>(buf, offset, maxlen); } static constexpr int64_t getHash() { return static_cast<int64_t>(_get_hash_impl()); } template <typename Parents> static uint64_t _computeHash(const Parents*) { return getHash(); } //@} // New-style (constexpr) hashing. template <size_t N = 0> static constexpr uint64_t _get_hash_impl( const std::array<uint64_t, N>& parents = {}) { const uint64_t base_hash = 0xd2dc16c61113f6b3ull; std::array<uint64_t, N + 1> new_parents{base_hash}; for (size_t n = 0; n < N; ++n) { if (parents[n] == base_hash) { // Special case for recursive message definition. return 0; } new_parents[n + 1] = parents[n]; } const uint64_t composite_hash = base_hash + papa::lima::_get_hash_impl(new_parents) + papa::lima::_get_hash_impl(new_parents) + papa::lima::_get_hash_impl(new_parents) + papa::lima::_get_hash_impl(new_parents); return (composite_hash << 1) + ((composite_hash >> 63) & 1); } // New-style encoding. template <bool with_hash = true> bool _encode(uint8_t** _cursor, uint8_t* _end) const { constexpr int64_t _hash = _get_hash_impl(); return // true iff success (rows >= 0) && (cols >= 0) && (with_hash ? _encode_field(_hash, _cursor, _end) : true) && _encode_field(delta, _cursor, _end, ArrayDims<1>{3}) && _encode_field(foxtrot, _cursor, _end, ArrayDims<2>{4, 5}) && _encode_field(alpha, _cursor, _end) && _encode_field(sierra, _cursor, _end) && _encode_field(rows, _cursor, _end) && _encode_field(cols, _cursor, _end) && _encode_field(bravo, _cursor, _end, ArrayDims<1>{rows}) && _encode_field(india8, _cursor, _end, ArrayDims<2>{rows, cols}) && _encode_field(india16, _cursor, _end, ArrayDims<2>{7, cols}) && _encode_field(india32, _cursor, _end, ArrayDims<2>{rows, 11}) && _encode_field(xray, _cursor, _end, ArrayDims<1>{2}) && _encode_field(yankee, _cursor, _end, ArrayDims<1>{rows}) && _encode_field(zulu, _cursor, _end, ArrayDims<2>{rows, 2}); } // New-style decoding. template <bool with_hash = true> bool _decode(const uint8_t** _cursor, const uint8_t* _end) { constexpr int64_t _expected_hash = _get_hash_impl(); int64_t _hash = _expected_hash; return // true iff success (with_hash ? _decode_field(&_hash, _cursor, _end) : true) && (_hash == _expected_hash) && _decode_field(&delta, _cursor, _end, ArrayDims<1>{3}) && _decode_field(&foxtrot, _cursor, _end, ArrayDims<2>{4, 5}) && _decode_field(&alpha, _cursor, _end) && _decode_field(&sierra, _cursor, _end) && _decode_field(&rows, _cursor, _end) && (rows >= 0) && _decode_field(&cols, _cursor, _end) && (cols >= 0) && _decode_field(&bravo, _cursor, _end, ArrayDims<1>{rows}) && _decode_field(&india8, _cursor, _end, ArrayDims<2>{rows, cols}) && _decode_field(&india16, _cursor, _end, ArrayDims<2>{7, cols}) && _decode_field(&india32, _cursor, _end, ArrayDims<2>{rows, 11}) && _decode_field(&xray, _cursor, _end, ArrayDims<1>{2}) && _decode_field(&yankee, _cursor, _end, ArrayDims<1>{rows}) && _decode_field(&zulu, _cursor, _end, ArrayDims<2>{rows, 2}); } private: // Given an N-byte integer at `_input` in network byte order, returns it as // a host unsigned integer using the matching unsigned integer type. (This // is also used to convert host to network order; it's the same operation.) template <size_t N> static auto _byteswap(const void* _input) { // clang-format off using result_t = std::conditional_t< N == 1, uint8_t, std::conditional_t< N == 2, uint16_t, std::conditional_t< N == 4, uint32_t, std::conditional_t< N == 8, uint64_t, void>>>>; // clang-format on result_t _result; std::memcpy(&_result, _input, N); // TODO(jwnimmer-tri) Don't bswap on PowerPC. if constexpr (N == 1) { return _result; } else if constexpr (N == 2) { return __builtin_bswap16(_result); } else if constexpr (N == 4) { return __builtin_bswap32(_result); } else if constexpr (N == 8) { return __builtin_bswap64(_result); } } // The dimensions of an array, for use during encoding / decoding, e.g., for // a message field `int8_t image[6][4]` we'd use `ArrayDims<2>{6, 4}`. template <size_t ndims> using ArrayDims = std::array<int64_t, ndims>; // Returns the second and following elements of _dims (i.e., _dims[1:]). // https://en.wikipedia.org/wiki/CAR_and_CDR template <size_t ndims> static ArrayDims<ndims - 1> _cdr(const std::array<int64_t, ndims>& _dims) { static_assert(ndims > 0); ArrayDims<ndims - 1> _result; for (size_t i = 1; i < ndims; ++i) { _result[i - 1] = _dims[i]; } return _result; } // Given a field (or child element within a field), encodes it into the given // byte cursor and advances the cursor, returning true on success. Arrays are // passed with `_input` as vector-like container and `_dims` as the list of // multi-dimensional vector sizes, e.g., `int8_t image[6][4]` would be called // like `_encode_field(image.at(0), &cursor, end, ArrayDims<2>{6, 4})`. In // LCM messages, multi-dimensional arrays are encoded using C's memory layout // (i.e., with the last dimension as the most tightly packed.) template <typename T, size_t ndims = 0> static bool _encode_field(const T& _input, uint8_t** _cursor, uint8_t* _end, const ArrayDims<ndims>& _dims = ArrayDims<0>{}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD input. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(&_input); std::memcpy(*_cursor, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String input. const int32_t _size = _input.size() + 1; const bool ok = (_input.size() < INT32_MAX) && (*_cursor + sizeof(_size) + _size <= _end) && _encode_field(_size, _cursor, _end); if (ok) { std::memcpy(*_cursor, _input.c_str(), _size); } *_cursor += _size; return ok; } else { // Struct input. return _input.template _encode<false>(_cursor, _end); } } else { // Cross-check the container size vs the size specified in the message's // size field. (For fixed-size containers this is a no-op.) if (static_cast<int64_t>(_input.size()) != _dims[0]) { return false; } // Encode each sub-item in turn, forwarding all the _dims but the first. for (const auto& _child : _input) { if (!_encode_field(_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } // Given a pointer to a field (or child element within a field), decodes it // from the given byte cursor and advances the cursor, returning true on // success. The array `_dims` and storage order follow the same pattern as in // _encode_field(); refer to those docs for details. template <typename T, size_t ndims = 0> static bool _decode_field(T* _output, const uint8_t** _cursor, const uint8_t* _end, const ArrayDims<ndims>& _dims = {}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD output. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(*_cursor); std::memcpy(_output, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String output. int32_t _size{}; const bool ok = _decode_field(&_size, _cursor, _end) && (_size > 0) && (*_cursor + _size <= _end); if (ok) { _output->replace(_output->begin(), _output->end(), *_cursor, *_cursor + _size - 1); } *_cursor += _size; return ok; } else { // Struct output. return _output->template _decode<false>(_cursor, _end); } } else { // In case of a variable-size dimension, resize our storage to match. if constexpr (std::is_same_v<T, std::vector<typename T::value_type>>) { _output->resize(_dims[0]); } // Decode each sub-item in turn. for (auto& _child : *_output) { if (!_decode_field(&_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } }; } // namespace papa
0
/home/johnshepherd/drake/tools/lcm_gen/test
/home/johnshepherd/drake/tools/lcm_gen/test/goal/lima.hpp
#pragma once #include <array> #include <cstddef> #include <cstdint> #include <cstring> #include <string> #include <tuple> #include <utility> #include <vector> namespace papa { class lima { public: static constexpr double charlie_delta = 3.25e1; static constexpr float charlie_foxtrot = 4.5e2; static constexpr int8_t charlie_india8 = -8; static constexpr int16_t charlie_india16 = 16; static constexpr int32_t charlie_india32 = 32; static constexpr int64_t charlie_india64 = 64; bool golf; uint8_t bravo; double delta; float foxtrot; int8_t india8; int16_t india16; int32_t india32; int64_t india64; // These functions match the expected API from the legacy lcm-gen tool, // but note that we use `int64_t` instead of `int` for byte counts. //@{ static const char* getTypeName() { return "lima"; } int64_t getEncodedSize() const { return 8 + _getEncodedSizeNoHash(); } int64_t _getEncodedSizeNoHash() const { int64_t _result = 0; _result += 1; // golf _result += 1; // bravo _result += 8; // delta _result += 4; // foxtrot _result += 1; // india8 _result += 2; // india16 _result += 4; // india32 _result += 8; // india64 return _result; } template <bool with_hash = true> int64_t encode(void* buf, int64_t offset, int64_t maxlen) const { uint8_t* const _begin = static_cast<uint8_t*>(buf); uint8_t* const _start = _begin + offset; uint8_t* const _end = _begin + maxlen; uint8_t* _cursor = _start; return this->_encode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _encodeNoHash(void* buf, int64_t offset, int64_t maxlen) const { return encode<false>(buf, offset, maxlen); } template <bool with_hash = true> int64_t decode(const void* buf, int64_t offset, int64_t maxlen) { const uint8_t* const _begin = static_cast<const uint8_t*>(buf); const uint8_t* const _start = _begin + offset; const uint8_t* const _end = _begin + maxlen; const uint8_t* _cursor = _start; return this->_decode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _decodeNoHash(const void* buf, int64_t offset, int64_t maxlen) { return decode<false>(buf, offset, maxlen); } static constexpr int64_t getHash() { return static_cast<int64_t>(_get_hash_impl()); } template <typename Parents> static uint64_t _computeHash(const Parents*) { return getHash(); } //@} // New-style (constexpr) hashing. template <size_t N = 0> static constexpr uint64_t _get_hash_impl( const std::array<uint64_t, N>& parents = {}) { const uint64_t base_hash = 0x35fef8dfc801b95eull; for (size_t n = 0; n < N; ++n) { if (parents[n] == base_hash) { // Special case for recursive message definition. return 0; } } const uint64_t composite_hash = base_hash; return (composite_hash << 1) + ((composite_hash >> 63) & 1); } // New-style encoding. template <bool with_hash = true> bool _encode(uint8_t** _cursor, uint8_t* _end) const { constexpr int64_t _hash = _get_hash_impl(); return // true iff success (with_hash ? _encode_field(_hash, _cursor, _end) : true) && _encode_field(golf, _cursor, _end) && _encode_field(bravo, _cursor, _end) && _encode_field(delta, _cursor, _end) && _encode_field(foxtrot, _cursor, _end) && _encode_field(india8, _cursor, _end) && _encode_field(india16, _cursor, _end) && _encode_field(india32, _cursor, _end) && _encode_field(india64, _cursor, _end); } // New-style decoding. template <bool with_hash = true> bool _decode(const uint8_t** _cursor, const uint8_t* _end) { constexpr int64_t _expected_hash = _get_hash_impl(); int64_t _hash = _expected_hash; return // true iff success (with_hash ? _decode_field(&_hash, _cursor, _end) : true) && (_hash == _expected_hash) && _decode_field(&golf, _cursor, _end) && _decode_field(&bravo, _cursor, _end) && _decode_field(&delta, _cursor, _end) && _decode_field(&foxtrot, _cursor, _end) && _decode_field(&india8, _cursor, _end) && _decode_field(&india16, _cursor, _end) && _decode_field(&india32, _cursor, _end) && _decode_field(&india64, _cursor, _end); } private: // Given an N-byte integer at `_input` in network byte order, returns it as // a host unsigned integer using the matching unsigned integer type. (This // is also used to convert host to network order; it's the same operation.) template <size_t N> static auto _byteswap(const void* _input) { // clang-format off using result_t = std::conditional_t< N == 1, uint8_t, std::conditional_t< N == 2, uint16_t, std::conditional_t< N == 4, uint32_t, std::conditional_t< N == 8, uint64_t, void>>>>; // clang-format on result_t _result; std::memcpy(&_result, _input, N); // TODO(jwnimmer-tri) Don't bswap on PowerPC. if constexpr (N == 1) { return _result; } else if constexpr (N == 2) { return __builtin_bswap16(_result); } else if constexpr (N == 4) { return __builtin_bswap32(_result); } else if constexpr (N == 8) { return __builtin_bswap64(_result); } } // The dimensions of an array, for use during encoding / decoding, e.g., for // a message field `int8_t image[6][4]` we'd use `ArrayDims<2>{6, 4}`. template <size_t ndims> using ArrayDims = std::array<int64_t, ndims>; // Returns the second and following elements of _dims (i.e., _dims[1:]). // https://en.wikipedia.org/wiki/CAR_and_CDR template <size_t ndims> static ArrayDims<ndims - 1> _cdr(const std::array<int64_t, ndims>& _dims) { static_assert(ndims > 0); ArrayDims<ndims - 1> _result; for (size_t i = 1; i < ndims; ++i) { _result[i - 1] = _dims[i]; } return _result; } // Given a field (or child element within a field), encodes it into the given // byte cursor and advances the cursor, returning true on success. Arrays are // passed with `_input` as vector-like container and `_dims` as the list of // multi-dimensional vector sizes, e.g., `int8_t image[6][4]` would be called // like `_encode_field(image.at(0), &cursor, end, ArrayDims<2>{6, 4})`. In // LCM messages, multi-dimensional arrays are encoded using C's memory layout // (i.e., with the last dimension as the most tightly packed.) template <typename T, size_t ndims = 0> static bool _encode_field(const T& _input, uint8_t** _cursor, uint8_t* _end, const ArrayDims<ndims>& _dims = ArrayDims<0>{}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD input. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(&_input); std::memcpy(*_cursor, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String input. const int32_t _size = _input.size() + 1; const bool ok = (_input.size() < INT32_MAX) && (*_cursor + sizeof(_size) + _size <= _end) && _encode_field(_size, _cursor, _end); if (ok) { std::memcpy(*_cursor, _input.c_str(), _size); } *_cursor += _size; return ok; } else { // Struct input. return _input.template _encode<false>(_cursor, _end); } } else { // Cross-check the container size vs the size specified in the message's // size field. (For fixed-size containers this is a no-op.) if (static_cast<int64_t>(_input.size()) != _dims[0]) { return false; } // Encode each sub-item in turn, forwarding all the _dims but the first. for (const auto& _child : _input) { if (!_encode_field(_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } // Given a pointer to a field (or child element within a field), decodes it // from the given byte cursor and advances the cursor, returning true on // success. The array `_dims` and storage order follow the same pattern as in // _encode_field(); refer to those docs for details. template <typename T, size_t ndims = 0> static bool _decode_field(T* _output, const uint8_t** _cursor, const uint8_t* _end, const ArrayDims<ndims>& _dims = {}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD output. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(*_cursor); std::memcpy(_output, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String output. int32_t _size{}; const bool ok = _decode_field(&_size, _cursor, _end) && (_size > 0) && (*_cursor + _size <= _end); if (ok) { _output->replace(_output->begin(), _output->end(), *_cursor, *_cursor + _size - 1); } *_cursor += _size; return ok; } else { // Struct output. return _output->template _decode<false>(_cursor, _end); } } else { // In case of a variable-size dimension, resize our storage to match. if constexpr (std::is_same_v<T, std::vector<typename T::value_type>>) { _output->resize(_dims[0]); } // Decode each sub-item in turn. for (auto& _child : *_output) { if (!_decode_field(&_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } }; } // namespace papa
0
/home/johnshepherd/drake/tools/lcm_gen/test
/home/johnshepherd/drake/tools/lcm_gen/test/goal/november.hpp
#pragma once #include <array> #include <cstddef> #include <cstdint> #include <cstring> #include <string> #include <tuple> #include <utility> #include <vector> #include "papa/lima.hpp" namespace papa { class november { public: papa::lima alpha; papa::lima bravo; int32_t charlie; // These functions match the expected API from the legacy lcm-gen tool, // but note that we use `int64_t` instead of `int` for byte counts. //@{ static const char* getTypeName() { return "november"; } int64_t getEncodedSize() const { return 8 + _getEncodedSizeNoHash(); } int64_t _getEncodedSizeNoHash() const { int64_t _result = 0; _result += alpha._getEncodedSizeNoHash(); _result += bravo._getEncodedSizeNoHash(); _result += 4; // charlie return _result; } template <bool with_hash = true> int64_t encode(void* buf, int64_t offset, int64_t maxlen) const { uint8_t* const _begin = static_cast<uint8_t*>(buf); uint8_t* const _start = _begin + offset; uint8_t* const _end = _begin + maxlen; uint8_t* _cursor = _start; return this->_encode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _encodeNoHash(void* buf, int64_t offset, int64_t maxlen) const { return encode<false>(buf, offset, maxlen); } template <bool with_hash = true> int64_t decode(const void* buf, int64_t offset, int64_t maxlen) { const uint8_t* const _begin = static_cast<const uint8_t*>(buf); const uint8_t* const _start = _begin + offset; const uint8_t* const _end = _begin + maxlen; const uint8_t* _cursor = _start; return this->_decode<with_hash>(&_cursor, _end) ? (_cursor - _start) : -1; } int64_t _decodeNoHash(const void* buf, int64_t offset, int64_t maxlen) { return decode<false>(buf, offset, maxlen); } static constexpr int64_t getHash() { return static_cast<int64_t>(_get_hash_impl()); } template <typename Parents> static uint64_t _computeHash(const Parents*) { return getHash(); } //@} // New-style (constexpr) hashing. template <size_t N = 0> static constexpr uint64_t _get_hash_impl( const std::array<uint64_t, N>& parents = {}) { const uint64_t base_hash = 0x86ad239bfc105cc3ull; std::array<uint64_t, N + 1> new_parents{base_hash}; for (size_t n = 0; n < N; ++n) { if (parents[n] == base_hash) { // Special case for recursive message definition. return 0; } new_parents[n + 1] = parents[n]; } const uint64_t composite_hash = base_hash + papa::lima::_get_hash_impl(new_parents) + papa::lima::_get_hash_impl(new_parents); return (composite_hash << 1) + ((composite_hash >> 63) & 1); } // New-style encoding. template <bool with_hash = true> bool _encode(uint8_t** _cursor, uint8_t* _end) const { constexpr int64_t _hash = _get_hash_impl(); return // true iff success (with_hash ? _encode_field(_hash, _cursor, _end) : true) && _encode_field(alpha, _cursor, _end) && _encode_field(bravo, _cursor, _end) && _encode_field(charlie, _cursor, _end); } // New-style decoding. template <bool with_hash = true> bool _decode(const uint8_t** _cursor, const uint8_t* _end) { constexpr int64_t _expected_hash = _get_hash_impl(); int64_t _hash = _expected_hash; return // true iff success (with_hash ? _decode_field(&_hash, _cursor, _end) : true) && (_hash == _expected_hash) && _decode_field(&alpha, _cursor, _end) && _decode_field(&bravo, _cursor, _end) && _decode_field(&charlie, _cursor, _end); } private: // Given an N-byte integer at `_input` in network byte order, returns it as // a host unsigned integer using the matching unsigned integer type. (This // is also used to convert host to network order; it's the same operation.) template <size_t N> static auto _byteswap(const void* _input) { // clang-format off using result_t = std::conditional_t< N == 1, uint8_t, std::conditional_t< N == 2, uint16_t, std::conditional_t< N == 4, uint32_t, std::conditional_t< N == 8, uint64_t, void>>>>; // clang-format on result_t _result; std::memcpy(&_result, _input, N); // TODO(jwnimmer-tri) Don't bswap on PowerPC. if constexpr (N == 1) { return _result; } else if constexpr (N == 2) { return __builtin_bswap16(_result); } else if constexpr (N == 4) { return __builtin_bswap32(_result); } else if constexpr (N == 8) { return __builtin_bswap64(_result); } } // The dimensions of an array, for use during encoding / decoding, e.g., for // a message field `int8_t image[6][4]` we'd use `ArrayDims<2>{6, 4}`. template <size_t ndims> using ArrayDims = std::array<int64_t, ndims>; // Returns the second and following elements of _dims (i.e., _dims[1:]). // https://en.wikipedia.org/wiki/CAR_and_CDR template <size_t ndims> static ArrayDims<ndims - 1> _cdr(const std::array<int64_t, ndims>& _dims) { static_assert(ndims > 0); ArrayDims<ndims - 1> _result; for (size_t i = 1; i < ndims; ++i) { _result[i - 1] = _dims[i]; } return _result; } // Given a field (or child element within a field), encodes it into the given // byte cursor and advances the cursor, returning true on success. Arrays are // passed with `_input` as vector-like container and `_dims` as the list of // multi-dimensional vector sizes, e.g., `int8_t image[6][4]` would be called // like `_encode_field(image.at(0), &cursor, end, ArrayDims<2>{6, 4})`. In // LCM messages, multi-dimensional arrays are encoded using C's memory layout // (i.e., with the last dimension as the most tightly packed.) template <typename T, size_t ndims = 0> static bool _encode_field(const T& _input, uint8_t** _cursor, uint8_t* _end, const ArrayDims<ndims>& _dims = ArrayDims<0>{}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD input. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(&_input); std::memcpy(*_cursor, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String input. const int32_t _size = _input.size() + 1; const bool ok = (_input.size() < INT32_MAX) && (*_cursor + sizeof(_size) + _size <= _end) && _encode_field(_size, _cursor, _end); if (ok) { std::memcpy(*_cursor, _input.c_str(), _size); } *_cursor += _size; return ok; } else { // Struct input. return _input.template _encode<false>(_cursor, _end); } } else { // Cross-check the container size vs the size specified in the message's // size field. (For fixed-size containers this is a no-op.) if (static_cast<int64_t>(_input.size()) != _dims[0]) { return false; } // Encode each sub-item in turn, forwarding all the _dims but the first. for (const auto& _child : _input) { if (!_encode_field(_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } // Given a pointer to a field (or child element within a field), decodes it // from the given byte cursor and advances the cursor, returning true on // success. The array `_dims` and storage order follow the same pattern as in // _encode_field(); refer to those docs for details. template <typename T, size_t ndims = 0> static bool _decode_field(T* _output, const uint8_t** _cursor, const uint8_t* _end, const ArrayDims<ndims>& _dims = {}) { static_assert(!std::is_pointer_v<T>); if constexpr (ndims == 0) { // With no array dimensions, just decode the field directly. if constexpr (std::is_fundamental_v<T>) { // POD output. constexpr size_t N = sizeof(T); if (*_cursor + N > _end) { return false; } auto _swapped = _byteswap<N>(*_cursor); std::memcpy(_output, &_swapped, N); *_cursor += N; return true; } else if constexpr (std::is_same_v<T, std::string>) { // String output. int32_t _size{}; const bool ok = _decode_field(&_size, _cursor, _end) && (_size > 0) && (*_cursor + _size <= _end); if (ok) { _output->replace(_output->begin(), _output->end(), *_cursor, *_cursor + _size - 1); } *_cursor += _size; return ok; } else { // Struct output. return _output->template _decode<false>(_cursor, _end); } } else { // In case of a variable-size dimension, resize our storage to match. if constexpr (std::is_same_v<T, std::vector<typename T::value_type>>) { _output->resize(_dims[0]); } // Decode each sub-item in turn. for (auto& _child : *_output) { if (!_decode_field(&_child, _cursor, _end, _cdr(_dims))) { return false; } } return true; } } }; } // namespace papa
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/BUILD.bazel
load("//tools/install:check_licenses.bzl", "check_licenses") load("//tools/install:install.bzl", "install") load("//tools/lint:lint.bzl", "add_lint_tests") load( "//tools/skylark:drake_py.bzl", "drake_py_binary", "drake_py_library", "drake_py_test", ) drake_py_library( name = "module_py", srcs = ["__init__.py"], visibility = [":__subpackages__"], deps = ["//tools:module_py"], ) drake_py_binary( name = "mirror_to_s3", srcs = [ "metadata.py", "mirror_to_s3.py", ], visibility = ["//visibility:private"], deps = [":module_py"], ) drake_py_binary( name = "new_release", srcs = [ "metadata.py", "new_release.py", ], visibility = ["//visibility:private"], deps = [ ":module_py", "@github3_py_internal//:github3_py", ], ) drake_py_binary( name = "vendor_cxx", srcs = ["vendor_cxx.py"], visibility = [ # These should all be of the form "@foo_internal//:__subpackages__". "@clarabel_cpp_internal//:__subpackages__", "@clp_internal//:__subpackages__", "@coinutils_internal//:__subpackages__", "@conex_internal//:__subpackages__", "@fcl_internal//:__subpackages__", "@gz_math_internal//:__subpackages__", "@gz_utils_internal//:__subpackages__", "@ipopt_internal_fromsource//:__subpackages__", "@msgpack_internal//:__subpackages__", "@nlopt_internal//:__subpackages__", "@qhull_internal//:__subpackages__", "@sdformat_internal//:__subpackages__", "@tinyobjloader_internal//:__subpackages__", "@yaml_cpp_internal//:__subpackages__", ], deps = [":module_py"], ) drake_py_test( name = "vendor_cxx_test", srcs = [ "vendor_cxx.py", "vendor_cxx_test.py", ], allow_import_unittest = True, deps = [":module_py"], ) drake_py_binary( name = "cmake_configure_file", srcs = ["cmake_configure_file.py"], visibility = ["//visibility:public"], ) _DRAKE_EXTERNAL_PACKAGE_INSTALLS = ["@%s//:install" % p for p in [ "ccd_internal", "clp_internal", "coinutils_internal", "com_jidesoft_jide_oss", "common_robotics_utilities", "commons_io", "curl_internal", "fcl_internal", "fmt", "gz_math_internal", "gz_utils_internal", "ipopt", "lcm", "libpng_internal", "libtiff_internal", "meshcat", "msgpack_internal", "net_sf_jchart2d", "nanoflann_internal", "nlohmann_internal", "nlopt_internal", "org_apache_xmlgraphics_commons", "picosha2_internal", "pybind11", "qhull_internal", "sdformat_internal", "spdlog", "statsjs", "stduuid_internal", "suitesparse_internal", "tinyobjloader_internal", "tinyxml2_internal", "usockets_internal", "uwebsockets_internal", "vtk_internal", "yaml_cpp_internal", ]] + ["//tools/workspace/%s:install" % p for p in [ "abseil_cpp_internal", "conex_internal", "gflags", "libjpeg_turbo_internal", ]] + select({ "//conditions:default": ["@csdp_internal//:install"], "//tools:no_csdp": [], }) + select({ "//conditions:default": [ "//tools/workspace/crate_universe:install", "@clarabel_cpp_internal//:install", ], "//tools:no_clarabel": [], }) + select({ "//tools:with_gurobi": ["@gurobi//:install"], "//conditions:default": [], }) + select({ "//tools:with_mosek": ["@mosek//:install"], "//conditions:default": [], }) + select({ "//tools:with_snopt": ["//tools/workspace/snopt:install"], "//conditions:default": [], }) + select({ "//conditions:default": [ "@osqp_internal//:install", "@qdldl_internal//:install", ], "//tools:no_osqp": [], }) + select({ "//conditions:default": ["@scs_internal//:install"], "//tools:no_scs": [], }) + select({ "//tools:with_usd": ["@onetbb_internal//:install"], "//conditions:default": [], }) install( name = "install_external_packages", visibility = ["//:__pkg__"], deps = _DRAKE_EXTERNAL_PACKAGE_INSTALLS, ) check_licenses( _DRAKE_EXTERNAL_PACKAGE_INSTALLS, ignore_labels = [ # Jar files have their license notices embedded into the jar already; # there is no need for them to install a separate license file. "@com_jidesoft_jide_oss//:install", "@commons_io//:install", "@net_sf_jchart2d//:install", "@org_apache_xmlgraphics_commons//:install", ], ) add_lint_tests()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/pkg_config.BUILD.tpl
# -*- bazel -*- # %{topcomment} load("@drake//tools/skylark:cc.bzl", "cc_library") licenses(%{licenses}) package(default_visibility = ["//visibility:public"]) cc_library( name = %{name}, srcs = %{srcs}, hdrs = %{hdrs}, copts = %{copts}, defines = %{defines}, includes = %{includes}, linkopts = %{linkopts}, deps = %{deps}, deprecation = %{extra_deprecation}, ) %{build_epilog}
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/mirror_to_s3.py
"""Mirrors source archives used by repository rules to the drake-mirror bucket on Amazon S3. Unless either --no-download or --no-upload option is specified, needs suitable AWS credentials to be configured per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html. This script is neither called during the build nor expected to be called by most developers or users of the project. It is only supported when run under Python 3 on macOS or Ubuntu. To run: bazel build //tools/workspace:mirror_to_s3 bazel-bin/tools/workspace/mirror_to_s3 [--no-download] [--no-upload] The --no-download option implies --no-upload. """ import hashlib import os import sys import tempfile import boto3 import botocore import requests from tools.workspace.metadata import read_repository_metadata BUCKET_NAME = 'drake-mirror' BUCKET_URL = 'https://s3.amazonaws.com/drake-mirror/' CLOUDFRONT_URL = 'https://drake-mirror.csail.mit.edu/' CHUNK_SIZE = 65536 def main(argv): transformed_metadata = [] for key, value in read_repository_metadata().items(): if 'downloads' in value: downloads = value['downloads'] else: downloads = [value] for download in downloads: transformed_value = {'sha256': download['sha256']} for url in download['urls']: if url.startswith(BUCKET_URL): transformed_value['object_key'] = url[len(BUCKET_URL):] elif not url.startswith(CLOUDFRONT_URL): if 'url' in transformed_value: raise Exception( f'Multiple non-mirror urls for @{key}. Verify ' f'BUCKET_URL {BUCKET_URL} and CLOUDFRONT_URL ' f'{CLOUDFRONT_URL} are correct and check for ' f'duplicate url values.') transformed_value['url'] = url if 'object_key' not in transformed_value: raise Exception( f'Could NOT determine S3 object key for @{key}. Verify ' f'BUCKET_URL {BUCKET_URL} is correct and check for ' f'missing url value with prefix {BUCKET_URL}.') if 'url' not in transformed_value: raise Exception( f'Missing non-mirror url for @{key}. Verify BUCKET_URL ' f'{BUCKET_URL} is correct and check for missing url value ' f'with prefix {BUCKET_URL}.') transformed_metadata.append(transformed_value) s3_resource = boto3.resource('s3') for value in transformed_metadata: object_key = value['object_key'] sha256 = value['sha256'] url = value['url'] if '--no-download' in argv: print(f'NOT querying S3 object key {object_key} because ' f'--no-download was specified') continue s3_object = s3_resource.Object(BUCKET_NAME, object_key) try: s3_object.load() print(f'S3 object key {object_key} already exists') except botocore.exceptions.ClientError as exception: # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html#rest-object-head-permissions if exception.response['Error']['Code'] in ['403', '404']: print(f'S3 object key {object_key} does NOT exist') with tempfile.TemporaryDirectory() as directory: filename = os.path.join(directory, os.path.basename(object_key)) print(f'Downloading from URL {url}...') with requests.get(url, stream=True) as response: with open(filename, 'wb') as file_object: for chunk in response.iter_content( chunk_size=CHUNK_SIZE): file_object.write(chunk) print(f'Computing and verifying SHA-256 checksum of ' f'file {filename}...') hash_object = hashlib.sha256() with open(filename, 'rb') as file_object: buffer = file_object.read(CHUNK_SIZE) while buffer: hash_object.update(buffer) buffer = file_object.read(CHUNK_SIZE) hexdigest = hash_object.hexdigest() if hexdigest != sha256: raise Exception( f'Expected SHA-256 checksum of file {filename} to ' f'be {sha256}, but actual checksum was computed ' f'to be {hexdigest}') if '--no-upload' in argv: print(f'NOT uploading file {filename} to S3 object ' f'key {object_key} because --no-upload was ' f'specified') else: print(f'Uploading file {filename} to S3 object key ' f'{object_key}...') s3_object.upload_file(filename) else: raise if __name__ == '__main__': main(sys.argv)
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/vendor_cxx.py
"""Build system tool that transforms C++ source code for easy vendoring. Rewrites the include statements, namespace, and symbol visibility with the goal of producing a completely independent build of some upstream library, even when statically linking other versions of the library into the same DSO. Note that this works only on C++ code, not plain C code. """ import argparse from enum import Enum import re def _designate_wrapped_lines(lines): """Given a list[str] of the lines in a C++ source file, returns a list[bool] that is True iff the corresponding line should be part of the inline hidden namespace. We MUST wrap all C/C++ code. We must NOT wrap #include statements. Blank lines and other non-C++ lines such as comments or non-#include preprocessor directives can go either way; we'll group them into the wrapping status of their neighbors in order to minimize the number of added lines. """ class Flag(Enum): WRAP = 1 NO_WRAP = -1 DONT_CARE = 0 # Regexs to match various kinds of code patterns. is_include = re.compile(r'^\s*#\s*include\s*["<].*$') is_preprocessor = re.compile(r'^\s*#.*$') is_blank = re.compile(r'^\s*$') is_blank_cpp_comment = re.compile(r'^\s*//.*$') is_blank_c_comment_begin = re.compile(r'^\s*/\*.*$') is_c_comment_end = re.compile(r'^.*\*/\s*(.*)$') # Loop over all lines and determine each one's flag. flags = [None] * len(lines) i = 0 while i < len(lines): line = lines[i] # When the prior line has continuation, this line inherits its Flag. if i > 0 and lines[i - 1].endswith('\\'): flags[i] = flags[i - 1] i += 1 continue # We must NOT wrap #include statements. if is_include.match(line): flags[i] = Flag.NO_WRAP i += 1 continue # Other preprocessor directives can go either way. if is_preprocessor.match(line): flags[i] = Flag.DONT_CARE i += 1 continue # Blank lines (or lines that are blank other than their comments) # can go either way. if is_blank.match(line) or is_blank_cpp_comment.match(line): flags[i] = Flag.DONT_CARE i += 1 continue # For C-style comments, consume the entire comment block immediately. if is_blank_c_comment_begin.match(line): first_c_comment_line = i while True: line = lines[i] match = is_c_comment_end.match(line) flags[i] = Flag.DONT_CARE i += 1 if match: break # If the close-comment marker had code after it, we need to go back # and set the entire C-style comment to WRAP. (trailing,) = match.groups() if trailing: for fixup in range(first_c_comment_line, i): flags[fixup] = Flag.WRAP continue # We MUST wrap all C/C++ code. flags[i] = Flag.WRAP i += 1 # We want to insert inline namespaces such that: # # - all WRAP lines are enclosed; # - no NO_WRAP lines are enclosed; # - the only DONT_CARE lines enclosed are surrouneded by WRAP. # # We'll do that by growing the NO_WRAP spans as large as possible. # Grow the start-of-file run of NO_WRAP: for i in range(len(flags)): if flags[i] == Flag.DONT_CARE: flags[i] = Flag.NO_WRAP else: break # Grow the end-of-file run of NO_WRAP: for i in range(len(flags) - 1, -1, -1): if flags[i] == Flag.DONT_CARE: flags[i] = Flag.NO_WRAP else: break # Grow any interior regions of NO_WRAP: for i in range(len(flags)): if flags[i] == Flag.NO_WRAP: # Change all of the immediately prior and subsequent homogeneous # runs of DONT_CARE to NO_WRAP. for j in range(i - 1, -1, -1): if flags[j] == Flag.DONT_CARE: flags[j] = Flag.NO_WRAP else: break for j in range(i + 1, len(flags)): if flags[j] == Flag.DONT_CARE: flags[j] = Flag.NO_WRAP else: break # Anything remaining is DONT_CARE bookended by WRAP, so we'll WRAP it. for i in range(len(flags)): if flags[i] == Flag.DONT_CARE: flags[i] = Flag.WRAP # Return True only for the wrapped lines. return [x == Flag.WRAP for x in flags] def _rewrite_one_text(*, text, inline_namespace): """Rewrites the C++ file contents in `text` with specific alterations: - Wraps an inline namespace "drake_vendor" with hidden symbol visibility around all of the code in file (but not any #include statements). - Or when inline_namespace is False, simply marks all of the existing namespaces as hidden without any extra inline namespace wrapping. This does not hide the vendored library as thoroughly (it's still a potential ODR conflict during static linking) but has the benefit of working on more complicated projects that our wrapping heuristics cannot handle. Returns the new C++ contents. These changes should suffice for the most typical flavors of C++ code. Tricks like including non-standalone files (`#include "helpers.inc"`) may not work. """ # If the file is a mixed C/C++ header, then we need to leave it alone. if '\nextern "C" {\n' in text: return text # Prepare to edit one line at a time. lines = text.split('\n') if lines[-1] == '': lines.pop() hidden = '__attribute__ ((visibility ("hidden")))' # If we are only changing namespaces (not adding new ones), do that now: if not inline_namespace: # Match either 'namespace foo' or 'namespace foo {'. regex = re.compile(r'^\s*namespace\s+([^{]+?)(\s*{)?$') for i, line in enumerate(lines): match = regex.match(line) if not match: continue name, brace = match.groups() lines[i] = f'namespace {name} {hidden}{brace or ""}' text = '\n'.join(lines) + '\n' return text # We'll add an inline namespace around the C++ code in this file. # Designate each line of the file for whether it should be wrapped. should_wrap = _designate_wrapped_lines(lines) # Anytime the sense of wrapping switches, we'll insert a line. # Do this in reverse order so that the indices into lines[] are stable. open_inline = ' '.join([ 'inline namespace drake_vendor', hidden, '{']) close_inline = '} /* inline namespace drake_vendor */' for i in range(len(lines), -1, -1): this_wrap = should_wrap[i] if i < len(lines) else False prior_wrap = should_wrap[i - 1] if i > 1 else False if this_wrap == prior_wrap: continue insertion = open_inline if this_wrap else close_inline lines.insert(i, insertion) text = '\n'.join(lines) + '\n' return text def _rewrite_one_file(*, old_filename, new_filename, inline_namespace): """Reads in old_filename and write into new_filename with specific alterations as described by _rewrite_one_string(). """ # Read the original. with open(old_filename, 'r', encoding='utf-8') as in_file: old_text = in_file.read() new_text = _rewrite_one_text(text=old_text, inline_namespace=inline_namespace) # Write out the altered file. with open(new_filename, 'w', encoding='utf-8') as out_file: out_file.write(new_text) def _split_pair(arg): """Helper function to split ':'-delimited pairs on the command line. """ old, new = arg.split(':') return (old, new) def _main(): parser = argparse.ArgumentParser() parser.add_argument( '--no-inline-namespace', dest='inline_namespace', action='store_false', help='Set visibility directly without an inline namespace wrapper') parser.add_argument( 'rewrite', nargs='+', type=_split_pair, help='Filename pairs to rewrite, given as IN:OUT') args = parser.parse_args() for old_filename, new_filename in args.rewrite: _rewrite_one_file( inline_namespace=args.inline_namespace, old_filename=old_filename, new_filename=new_filename) if __name__ == '__main__': _main()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/generate_file.bzl
def _generate_file_impl(ctx): out = ctx.actions.declare_file(ctx.label.name) ctx.actions.write(out, ctx.attr.content, ctx.attr.is_executable) return [DefaultInfo( files = depset([out]), data_runfiles = ctx.runfiles(files = [out]), )] generate_file = rule( attrs = { "content": attr.string(mandatory = True), "is_executable": attr.bool(default = False), }, output_to_genfiles = True, implementation = _generate_file_impl, ) """Generate a file with specified content. This creates a rule to generate a file with specified content (which is either static or has been previously computed). Args: content (:obj:`str`): Desired content of the generated file. """
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/github.bzl
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch") load("//tools/workspace:metadata.bzl", "generate_repository_metadata") def github_archive( name, repository = None, commit = None, commit_pin = None, sha256 = "0" * 64, build_file = None, patches = None, extra_strip_prefix = "", local_repository_override = None, mirrors = None, upgrade_advice = "", **kwargs): """A macro to be called in the WORKSPACE that adds an external from GitHub using a workspace rule. This rule downloads the source code. To download attachments instead, see github_release_attachments(). Args: name: required name is the rule name and so is used for @name//... labels when referring to this archive from BUILD files. repository: required GitHub repository name in the form organization/project. commit: required commit is the tag name or git commit sha to download. commit_pin: optional boolean, set to True iff the archive should remain at the same version indefinitely, eschewing automated upgrades to newer versions. sha256: required sha256 is the expected SHA-256 checksum of the downloaded archive. When unsure, you can omit this argument (or comment it out) and then the checksum-mismatch error message will offer a suggestion. build_file: optional build file is the BUILD file label to use for building this external. As a Drake-specific abbreviation, when provided as a relative label (e.g., ":package.BUILD.bazel"), it will be taken as relative to the "@drake//tools/workspace/{name}/" package. When no build_file is provided, the BUILD file(s) within the archive will be used. patches: optional list of patches to apply, matching what's described at https://bazel.build/rules/lib/repo/git#git_repository-patches. As a Drake-specific abbreviation, when provided using relative labels (e.g., ":patches/foo.patch"), they will be taken as relative to the "@drake//tools/workspace/{name}/" package. extra_strip_prefix: optional path to strip from the downloaded archive, e.g., "src" to root the repository at "./src/" instead of "./". local_repository_override: optional local repository override can be used for temporary local testing; instead of retrieving the code from GitHub, the code is retrieved from the local filesystem path given in the argument. mirrors: required mirrors is a dict from string to list-of-string with key "github", where the list-of-strings are URLs to use, formatted using {repository} and {commit} string substitutions. The mirrors.bzl file in this directory provides a reasonable default value. upgrade_advice: optional string that describes extra steps that should be taken when upgrading to a new version. Used by //tools/workspace:new_release. """ if repository == None: fail("Missing repository=") if commit == None: fail("Missing commit=") if mirrors == None: fail("Missing mirrors=; see mirrors.bzl") build_file = _resolve_drake_abbreviation(name, build_file) patches = [ _resolve_drake_abbreviation(name, one_patch) for one_patch in (patches or []) ] if local_repository_override != None: path = local_repository_override if extra_strip_prefix: path += "/" + extra_strip_prefix if build_file == None: native.local_repository( name = name, path = path, ) else: native.new_local_repository( name = name, build_file = build_file, path = path, ) return # Once we've handled the "local_repository_override" sidestep, we delegate # to a rule (not a macro) so that we have more leeway in the actions we can # take (i.e., so we can do more than just a simple download-and-extract). _github_archive_real( name = name, repository = repository, commit = commit, commit_pin = commit_pin, sha256 = sha256, build_file = build_file, patches = patches, extra_strip_prefix = extra_strip_prefix, mirrors = mirrors, upgrade_advice = upgrade_advice, **kwargs ) def _resolve_drake_abbreviation(name, label_str): """De-abbreviates the given label_str as a Drake tools/workspace label. If the label_str is None, returns None. If the label_str is relative, interprets it relative to the "@drake//tools/workspace/{name}/" package and returns an absolute label. Otherwise, returns the label_str unchanged. """ if label_str == None: return None if label_str.startswith(":"): return "@drake//tools/workspace/" + name + label_str return label_str # Helper stub to implement a repository_rule in terms of a setup() function. def _github_archive_real_impl(repository_ctx): result = setup_github_repository(repository_ctx) if result.error != None: fail("Unable to complete setup for " + "@{} repository: {}".format( repository_ctx.name, result.error, )) _github_archive_real = repository_rule( implementation = _github_archive_real_impl, attrs = { "repository": attr.string( mandatory = True, ), "commit": attr.string( mandatory = True, ), "commit_pin": attr.bool(), "sha256": attr.string( mandatory = False, default = "0" * 64, ), "build_file": attr.label( default = None, ), "extra_strip_prefix": attr.string( default = "", ), "mirrors": attr.string_list_dict( mandatory = True, allow_empty = False, ), "patches": attr.label_list( default = [], ), "patch_tool": attr.string( default = "patch", ), "patch_args": attr.string_list( default = ["-p0"], ), "patch_cmds": attr.string_list( default = [], ), "upgrade_advice": attr.string( default = "", ), }, ) """This is a rule() formulation of the github_archive() macro. It is identical to the macro except that it does not support local_repository_override. Consult the macro documentation for full API details. """ def setup_github_repository(repository_ctx): """This is a reusable formulation of the github_archive() macro. It is identical to the macro except that (1) it does not support local repository override, and (2) it returns a status struct instead of failing internally. The result struct has a field `error` that will be non-None iff there were any errors. Consult the macro documentation for additional API details. """ # Do the download step first. (This also writes the metadata.) github_download_and_extract( repository_ctx, repository = repository_ctx.attr.repository, commit = repository_ctx.attr.commit, commit_pin = getattr(repository_ctx.attr, "commit_pin", None), mirrors = repository_ctx.attr.mirrors, sha256 = repository_ctx.attr.sha256, extra_strip_prefix = repository_ctx.attr.extra_strip_prefix, upgrade_advice = getattr(repository_ctx.attr, "upgrade_advice", ""), ) # Optionally apply source patches, using Bazel's utility helper. Here we # use getattr as a guard, in case the originating repository_rule didn't # want to declare attr support for the patchfile-related settings. patch_triggers = ["patches", "patch_cmds"] if any([getattr(repository_ctx.attr, a, None) for a in patch_triggers]): patch(repository_ctx) # We re-implement Bazel's workspace_and_buildfile utility, so that options # we don't care about (e.g., build_file_content) do not have to be declared # as attrs on our all of our own repository rules. # # Unlike workspace_and_buildfile, we create WORKSPACE.bazel and BUILD.bazel # (rather than WORKSPACE and BUILD) because when the "*.bazel" flavor is # present, it always takes precedence. files_to_be_created = ["WORKSPACE.bazel"] if repository_ctx.attr.build_file: files_to_be_created.append("BUILD.bazel") for name in files_to_be_created: if repository_ctx.path(name).exists: repository_ctx.execute(["/bin/mv", name, name + ".ignored"]) repository_ctx.file( "WORKSPACE.bazel", "workspace(name = \"{name}\")\n".format( name = repository_ctx.name, ), ) if repository_ctx.attr.build_file: repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD.bazel") return struct(error = None) def github_download_and_extract( repository_ctx, repository, commit, mirrors, output = "", sha256 = "0" * 64, extra_strip_prefix = "", upgrade_advice = "", commit_pin = None): """Download an archive of the provided GitHub repository and commit to the output path and extract it. Args: repository_ctx: context of a Bazel repository rule. repository: GitHub repository name in the form organization/project. commit: git revision for which the archive should be downloaded. mirrors: dictionary of mirrors, see mirrors.bzl in this directory for an example. output: path to the directory where the archive will be unpacked, relative to the Bazel repository directory. sha256: expected SHA-256 hash of the archive downloaded. Fallback to an incorrect default value to prevent the hash check from being disabled, but allow the first download attempt to fail and print the correct SHA-256 hash. extra_strip_prefix: optional path to strip from the downloaded archive, e.g., "src" to root the repository at "./src/" instead of "./". commit_pin: set to True iff the archive should remain at the same version indefinitely, eschewing automated upgrades to newer versions. upgrade_advice: optional string that describes extra steps that should be taken when upgrading to a new version. Used by //tools/workspace:new_release. """ urls = _urls( repository = repository, commit = commit, mirrors = mirrors, ) strip_prefix = _strip_prefix(repository, commit, extra_strip_prefix) repository_ctx.download_and_extract( urls, output = output, sha256 = _sha256(sha256), type = "tar.gz", stripPrefix = strip_prefix, ) upgrade_advice = "\n".join( [line.strip() for line in upgrade_advice.strip().split("\n")], ).replace("\\\n", "\\\n ") # Create a summary file for Drake maintainers. generate_repository_metadata( repository_ctx, repository_rule_type = "github", repository = repository, commit = commit, version_pin = commit_pin, sha256 = sha256, urls = urls, strip_prefix = strip_prefix, upgrade_advice = upgrade_advice, ) def _sha256(sha256): """Fallback to an incorrect default value of SHA-256 hash to prevent the hash check from being disabled, but allow the first download attempt of an archive to fail and print the correct hash. Args: sha256: expected SHA-256 hash of the archive to be downloaded. """ if not sha256: sha256 = "0" * 64 return sha256 def _strip_prefix(repository, commit, extra_strip_prefix): """Compute the strip prefix for a downloaded archive of the provided GitHub repository and commit. Args: repository: GitHub repository name in the form organization/project. commit: git revision for which the archive was downloaded. """ repository_split = repository.split("/") if len(repository_split) != 2: fail("repository must be formatted as organization/project") _, project = repository_split # GitHub archives omit the "v" in version tags, for some reason. if commit[0] == "v": strip_commit = commit[1:] else: strip_commit = commit result = project + "-" + strip_commit.replace("/", "-") if extra_strip_prefix: result += "/" + extra_strip_prefix return result def _is_commit_sha(commit): """Returns true iff the commit is a hexadecimal string of length 40.""" return len(commit) == 40 and all([ ch.isdigit() or (ch >= "a" and ch <= "f") for ch in commit.elems() ]) def _format_url(*, pattern, repository, commit): """Given a URL pattern for github.com or a Drake-specific mirror, substitutes in the given repository and commit (tag or git sha). The URL pattern accepts the following substitutions: The {repository} is always substituted with `repository`. The {commit} is always substituted with `commit`. If `commit` refers to a git tag, then {tag_name} will be substituted. If `commit` refers to a git branch, then {branch_name} will be substituted. If `commit` refers to a git sha, then {commit_sha} will be substituted. Patterns that contain a substitution which does not apply to the given `commit` (e.g., {commit_sha} when `commit` is a tag) will return None. The pattern must contain exactly one of {commit}, {tag_name}, {branch_name}, or {commit_sha}. """ is_commit_sha = _is_commit_sha(commit) is_tag = not is_commit_sha substitutions = { "repository": repository, "commit": commit, "tag_name": commit if is_tag else None, "commit_sha": commit if is_commit_sha else None, } for name, value in substitutions.items(): if value == None: needle = "{" + name + "}" if needle in pattern: # If the pattern uses a substitution that we do not have, # report that to our caller as "None"; don't return a URL # string with a literal "None" in it! return None return pattern.format(**substitutions) def _urls(*, repository, commit, mirrors): """Compute the urls from which an archive of the provided GitHub repository and commit may be downloaded. Args: repository: GitHub repository name in the form organization/project. commit: git revision for which the archive should be downloaded. mirrors: dictionary of mirrors, see mirrors.bzl in this directory for an example. """ result_with_nulls = [ _format_url( pattern = x, repository = repository, commit = commit, ) for x in mirrors.get("github") ] return [ url for url in result_with_nulls if url != None ] def github_release_attachments( name, repository = None, commit = None, commit_pin = None, attachments = None, extract = None, strip_prefix = None, build_file = None, mirrors = None, upgrade_advice = None, **kwargs): """A macro to be called in the WORKSPACE that adds an external from GitHub using a workspace rule. This rule downloads attachments from posted releases (e.g., precompiled binaries). To download a git source archive instead, see github_archive(). Args: name: required name is the rule name and so is used for @name//... labels when referring to this archive from BUILD files. repository: required GitHub repository name in the form organization/project. commit: required commit is the tag name to download. commit_pin: optional boolean, set to True iff the release should remain at the same version indefinitely, eschewing automated upgrades to newer versions. attachments: required dict whose keys are the filenames (attachment names) to download and values are the expected SHA-256 checksums. extract: optional list of the filenames (attachment names) that should be downloaded and extracted (e.g., `*.tgz` files), as opposed to only downloaded. strip_prefix: optional dict whose keys are the filenames (attachment names) and values indicate a prefix to strip during extraction. Note that this is only relevant for filenames that are also listed under `extract`. build_file: required build file is the BUILD file label to use for building this external. As a Drake-specific abbreviation, when provided as a relative label (e.g., ":package.BUILD.bazel"), it will be taken as relative to the "@drake//tools/workspace/{name}/" package. mirrors: required mirrors is a dict from string to list-of-string with key "github_release_attachments", where the list-of-strings are URLs to use, formatted using {repository}, {commit}, and {filename} string substitutions. The mirrors.bzl file in this directory provides a reasonable default value. upgrade_advice: optional string that describes extra steps that should be taken when upgrading to a new version. Used by //tools/workspace:new_release. """ if not repository: fail("Missing repository=") if not commit: fail("Missing commit=") if not attachments: fail("Missing attachments=") if not build_file: fail("Missing build_file=") if not mirrors: fail("Missing mirrors=; see mirrors.bzl") build_file = _resolve_drake_abbreviation(name, build_file) _github_release_attachments_real( name = name, repository = repository, commit = commit, commit_pin = commit_pin, attachments = attachments, extract = extract, strip_prefix = strip_prefix, build_file = build_file, mirrors = mirrors, upgrade_advice = upgrade_advice, **kwargs ) # Helper stub to implement a repository_rule in terms of a setup() function. def _github_release_attachments_real_impl(repository_ctx): result = setup_github_release_attachments(repository_ctx) if result.error != None: fail("Unable to complete setup for " + "@{} repository: {}".format( repository_ctx.name, result.error, )) _github_release_attachments_real = repository_rule( implementation = _github_release_attachments_real_impl, attrs = { "repository": attr.string( mandatory = True, ), "commit": attr.string( mandatory = True, ), "commit_pin": attr.bool(), "attachments": attr.string_dict( mandatory = True, ), "extract": attr.string_list(), "strip_prefix": attr.string_dict(), "build_file": attr.label( mandatory = True, ), "mirrors": attr.string_list_dict( mandatory = True, allow_empty = False, ), "upgrade_advice": attr.string( default = "", ), }, ) def setup_github_release_attachments(repository_ctx): """This is a reusable formulation of the github_release_attachments macro. It is identical to the macro except that it returns a status struct instead of failing internally. The result struct has a field `error` that will be non-None iff there were any errors. Consult the macro documentation for additional API details. """ repository = repository_ctx.attr.repository commit = repository_ctx.attr.commit commit_pin = repository_ctx.attr.commit_pin attachments = repository_ctx.attr.attachments extract = getattr(repository_ctx.attr, "extract", list()) strip_prefix = getattr(repository_ctx.attr, "strip_prefix", dict()) mirrors = repository_ctx.attr.mirrors upgrade_advice = getattr(repository_ctx.attr, "upgrade_advice", "") patterns = mirrors.get("github_release_attachments") # Download everything. downloads = [] for filename, sha256 in attachments.items(): urls = [ pattern.format( repository = repository, commit = commit, filename = filename, ) for pattern in patterns ] if filename in extract: maybe_strip_prefix = strip_prefix.get(filename, None) if maybe_strip_prefix != None: repository_ctx.download_and_extract( urls, stripPrefix = maybe_strip_prefix, sha256 = _sha256(sha256), ) else: repository_ctx.download_and_extract( urls, sha256 = _sha256(sha256), ) else: repository_ctx.download( urls, output = filename, sha256 = _sha256(sha256), ) downloads.append(dict( urls = urls, sha256 = sha256, )) # Add the BUILD file. repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD.bazel") # Create a summary file for Drake maintainers. generate_repository_metadata( repository_ctx, repository_rule_type = "github_release_attachments", repository = repository, commit = commit, version_pin = commit_pin, attachments = attachments, strip_prefix = strip_prefix, downloads = downloads, upgrade_advice = upgrade_advice, ) return struct(error = None)
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/metadata.py
"""Reports summary data related to Drake's repository rules. This implementation uses Bazel command-line actions so is suitable only for manual use, not any build rules or test automation. """ import glob import json import os import subprocess def _check_output(args): return subprocess.check_output(args).decode("utf8") def read_repository_metadata(repositories=None): """If no repositories are given, returns data for all repositories. Returns a dict of {repository_name => details_dict}. """ result = {} # Ask where the repository rules write their output. output_base = _check_output( ["bazel", "info", "output_base"]).strip() assert os.path.isdir(output_base), output_base if not repositories: # Obtain a list of known repositories. package_lines = _check_output( ["bazel", "query", "deps(//...)", "--output", "package"]) repositories = set() for line in package_lines.split("\n"): if not line.startswith("@"): continue name = line[1:].split("/")[0] repositories.add(name) # The bazel query only finds build-time dependencies. Drake also # requires some load-time dependencies such as starlark libraries, # compilers, etc. Here, we add by hand those we want to be archived # and upgraded. # # NOTE: At this time, we are skipping the rust_toolchain repositories; # see TODO in tools/workspace/rust_toolchain/repository.bzl. repositories.add("bazel_skylib") repositories.add("com_github_nelhage_rules_boost_internal") # Make sure all of the repository_rule results are up-to-date. subprocess.check_call(["bazel", "fetch", "//..."]) # Read the metadata. for name in sorted(repositories): json_path = os.path.join( output_base, "external", name, "drake_repository_metadata.json") try: with open(json_path, "r") as f: data = json.load(f) result[data["name"]] = data except IOError: pass # Add 'magic' metadata for repositories that don't/can't generate it the # usual way. result["crate_universe"] = { "repository_rule_type": "scripted", "upgrade_script": "upgrade.sh", # Downloads are associated with individual "crate__..." repositories. "downloads": {}, } result["rust_toolchain"] = { "repository_rule_type": "scripted", "upgrade_script": "upgrade.py", # Downloads are associated with individual "rust_..." repositories. "downloads": {}, } return result
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/README.md
This `//tools/workspace/...` package tree contains files related to Drake's Bazel build system, specifically files relating to downloading and/or compiling third-party software, as cited by Drake's top-level `/WORKSPACE` file. # File layout Files directly in the `//tools/workspace` package are generic helpers, unrelated to any one third-party library. Files in sub-packages such as `//tools/workspace/eigen` are specific to their third-party software; the sub-package is named to match that software's corresponding name in the `//tools/workspace/default.bzl` file. Files named `BUILD.bazel` denote the package structure within our sub-folders; in the case of the `//tools/workspace/...` packages, these are largely just visibility declarations. Files named `package.BUILD.bazel` are Drake-specific build rules for external libraries or tools that do not natively support Bazel: https://docs.bazel.build/versions/master/external.html#depending-on-non-bazel-projects Files named `repository.bzl` are repository rules, and intended to be a stable entry point for other Bazel projects to refer to the same dependencies that Drake is using: https://docs.bazel.build/versions/master/skylark/concepts.html https://docs.bazel.build/versions/master/be/workspace.html https://docs.bazel.build/versions/master/skylark/repository_rules.html Per the [Stability Guidelines](https://drake.mit.edu/stable.html), externals named as "internal" or otherwise documented to be "internal use only" are not subject to any deprecation guarantees. # Semi-automated monthly upgrades Drake maintainers will use the ``bazel-bin/tools/workspace/new_release`` tool to report and upgrade any out-of-date externals. The process is as follows: Begin from an up-to-date checkout of Drake ``master``. Read the documentation in ``//tools/workspace:new_release``. In particular, note that only certain operating systems are supported by that tool. If you haven't yet created GitHub API token per those docs, do that now. Open a new branch: ``` git checkout -b upgrades ``` Run the "upgrades needed" report. Copy its output into a temporary text file so that you can easily refer back to it as you proceed. ``` bazel run //tools/workspace:new_release ``` For each external in the report, add a commit that upgrades it. Typically, this can be done by running the script to perform one upgrade (for some external "foo"): ``` bazel run //tools/workspace:new_release -- --lint --commit foo ``` If the automated update doesn't succeed, then you'll need to make the edits manually. Ask for help in drake developers slack channel for ``#build``. If the automated update succeeded, check the output of ``new_release`` for any additional steps that need to be manually performed to complete the upgrade. Follow any advice that is given. If you didn't use ``--lint`` earlier, or need to re-test, run ``bazel test --config lint //...`` as a sanity check of the changes. If any edits are needed, stage the changes and amend the commit using ``git commit --amend``. Repeat this process for all upgrades. You can re-run the ``new_release`` report anytime, to get the remaining items that need attention. You can also list several externals to try to update at once, although this will complicate making changes to those commits if needed. Note that some externals are reported as "may need upgrade". This means that ``new_release`` is not able to automatically determine whether an upgrade is needed; therefore, these should always be upgraded. (If no upgrade is needed, the upgrade will do nothing and will not create a commit.) Each external being upgraded should have exactly one commit that does the upgrade, and each commit should either a) only impact exactly one external, or b) impact exactly those externals of a cohort which need to be upgraded. If we find any problem with an upgrade, we need to be able to revert the commit for just that one external upgrade, leaving the other upgrades intact. The ``new_release`` will automatically upgrade all externals of a cohort in a single operation. Once all upgrades are ready, open a Drake pull request and label it ``status: commits are properly curated``. Open the Reviewable page and change the drop-down that says "Combine commits for review" to choose "Review each commit separately" instead. Once all of the Jenkins builds of the pull request have passed, assign the pull request for review. If the pull request contains no especially complicated changes, it may be assigned to the on-call platform reviewer and labelled ``status: single reviewer ok``. For any non-trivial changes (i.e., changes that go beyond changing version numbers, checksums, or trivial fixups to patch files or code spelling), do not attempt to fix the problems just because you are accountable for the routine upgrade procedure every month. As a rule of thumb, if you need to spend more than 5-10 minutes on an upgrade, you should defer the work to a separate pull request: * open a pull request with the WIP patch for that one specific external; * ensure that the Jenkins output shows the problem (e.g., trigger any extra non-default builds that failed); * assign it to the feature owner associated with that external (to find out who that is, ask for help in the drake developers ``#build`` slack channel); and * omit it from the monthly upgrade pull request. The main objective of the monthly upgrade is to ensure that we stay on top of problematic changes from upstream. If we discover such problems, we want to bring them to the attention of the feature owner; their steering should provide the most efficient path to resolve the problem. If an external required non-trivial changes, even if you were able to make the changes yourself, consider separating that external into its own pull request and assigning it to the associated feature owner. # Changing the version of third-party software manually The instructions for updating third-party software differ depending on how Drake is obtaining that software. Most third-party software used by Drake will be incorporated via files named `//tools/workspace/foo:repository.bzl` where `foo` is the name of the software (`boost`, `eigen`, `vtk`, etc.). Consult that file to check which download or installation helper is used; find the helper in the list below to continue. ## Updating github_archive software versions For software downloaded from github.com and compiled from source, there are two choices, depending on whether the purpose is exploration from a local clone vs pushing to Drake master. ### Exploring github_archive changes from a local clone This allows for easy editing and debugging (e.g., adding logging) temporarily. To use a local clone of a `github_archive`, first clone the software using `git` commands manually. Then, within the relevant `//tools/workspace/foo:repository.bzl` file add a `local_repository_archive` argument to the `github_archive` macro call pointing at a local checkout, e.g.: github_archive( name = "foobar", local_repository_override = "/path/to/local/foo/bar", repository = "foo/bar", commit = "0123456789abcdef0123456789abcdef01234567", sha256 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", # noqa ) Now, `bazel build` and `test` etc. will use the local clone. When `local_repository_override` is present, the `repository` and `commit` and `sha256` arguments are ignored. Removing the `local_repository_override` reverts to using the given `commit` and ignores the local checkout. ### Finalizing github_archive changes To lock in a new upstream revision, edit the `github_archive` macro call within the relevant `//tools/workspace/foo:repository.bzl` file: - remove the `local_repository_override` (if it exists), - change the `commit` argument to refer to a different revision, - comment out the `sha256` argument, and then - run `bazel build`. Bazel's fetch step will attempt to download the new version but then complain about a checksum mismatch. Paste the new checksum into the `sha256` argument and remove its commenting-out. Then, `bazel build` should succeed. Briefly check that `//tools/workspace/foo:package.BUILD.bazel` still seems appropriate; for example, if there are hard-coded version numbers that need to match the `commit=` tag, they should be updated (this is rare). Commit and pull-request the changed lines to Drake as usual. Many changes like this will be susceptible to Ubuntu vs macOS differences, so please opt-in to the macOS build(s) in Jenkins before merging, using the instructions at https://drake.mit.edu/jenkins.html#running-an-on-demand-build. ## Updating pkg_config_repository software versions Most `pkg_config_repository` calls refer to libraries provided by the host operating system (Ubuntu, macOS, etc.). In most cases, we are stuck with the host version in order to remain compatible with the wider software ecosystem. If the host version is problematic, contact the Drake developers for advice. # Adding new third-party software The best guide for incorporating new third-party software is to mimic what Drake does for other third-party software it already uses. There are roughly three general approaches, in order of preference: - Use a library or tool from the host operating system; - Download a library or tool as source code and compile it; - Download a library or tool as binaries. When the host operating system (macOS, Ubuntu) offers a version of the software, its best to use that version in order to remain compatible with the wider software ecosystem. If the host version is problematic, contact the Drake developers for advice. When the host doesn't offer the software, compiling from source is preferred. Downloading binaries is a last resort, because they are difficult to patch and difficult to support on multiple platforms. ## Common steps to add new-third-party software TODO(jwnimmer-tri) Add documentation here about how to validate that the new software's license is acceptable to use within Drake. When adding a new external, decide whether it will be covered by our [Stability Guidelines](https://drake.mit.edu/stable.html). Broadly speaking, dependencies that come from the host operating system can be covered as stable, but dependencies that we compile from source code should be internal. If the new dependency should be in internal, name it like "foo_internal" (not just "foo") throughout all of the below. Referring to some new third-party software as "foo", the steps to incorporate it into Drake are roughly: - Create a new sub-directory `tools/workspace/foo`. - Create `tools/workspace/foo/BUILD.bazel` that calls `add_lint_tests()`. - Create `tools/workspace/foo/repository.bzl` that declares a `foo_repository()` macro or rule. The details are given below. - Edit `tools/workspace/default.bzl` to load and conditionally call the new `foo_repository()` macro or rule. When indicating licenses in the source, use the identifier from the [SPDX License List](https://spdx.org/licenses/). ## When using a library from the host operating system See `glib` for an example. Update the package setup lists to mention the new package: - `setup/ubuntu/binary_distribution/packages-DIST.txt` with the `libfoo0` runtime library; - `setup/ubuntu/source_distribution/packages-DIST.txt` with the `libfoo-dev` library; - `setup/mac/binary_distribution/Brewfile` if used in Drake's installed copy; - `setup/mac/source_distribution/Brewfile` if only used during development (not install). In `tools/workspace/foo/repository.bzl`, use `pkg_config_repository` to locate a library from the host. ## When downloading a library or tool as source code For choosing the version or commit to use in `repository.bzl`: * When upstream provides numbered releases, pin Drake to use the most recent stable release. Drake maintainers will automatically upgrade to a more recent stable release on a monthly basis. * Otherwise, pin Drake to use the most recent commit of the upstream mainline branch. Drake maintainers will automatically upgrade to a more recent mainline commit on a monthly basis. * If the pin policy is unsatisfactory for the case of some specific external, consult Drake's build system maintainers for advice. Mimic an existing example to complete the process, e.g., look at `//tools/workspace/tinyobjloader` and mimic the `repository.bzl` and `package.BUILD.bazel` files.
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/check_lists_consistency.bzl
def check_lists_consistency( *, files, glob_include, glob_exclude = None): """Checks that a hard-coded list of files fully covers a glob expression. When a package.BUILD.bazel file hard-codes a list of files (e.g., a list of headers of source files), we would like to fail-fast when upstream adds new files so that we can refresh our list. This is especially important when the version of the external is updated. Args: files (:obj:`list` of :obj:`str`): List of expected file names that will be matched by the glob expressions. glob_include (:obj:`list` of :obj:`str`): List of glob patterns to search for, per native.glob(include = ...). glob_exclude (:obj:`list` of :obj:`str`): List of glob patterns to exclude from the search, per native.glob(exclude = ...). """ all_files = native.glob(glob_include, exclude = (glob_exclude or [])) uncovered_files = sorted([x for x in all_files if x not in files]) if len(uncovered_files) != 0: fail("The following files matched a glob of upstream sources, but " + "were not covered by the package.BUILD.bazel file: {}".format( uncovered_files, ))
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/os.bzl
"""A collection of OS-related utilities intended for use in repository rules, i.e., rules used by WORKSPACE files, not BUILD files. To opt-in to the "manylinux" or "macos_wheel" build variants, set the environment variable (e.g.) `DRAKE_OS=manylinux` before running the build. The most precise way to do this is to add a `user.bazelrc` file to the root of the Drake source tree with the following content: common --repo_env=DRAKE_OS=manylinux Alternatively, you may pass `--repo_env=DRAKE_OS=manylinux` on the bazel command line. (Replace "manylinux" with "macos_wheel" as appropriate.) """ load("//tools/workspace:execute.bzl", "which") def exec_using_which(repository_ctx, command): """Run the given command (a list), using the which() function in execute.bzl to locate the executable named by the zeroth index of `command`. Return struct with attributes: - error (None when success, or else str message) - stdout (str command output, possibly empty) """ # Find the executable. fullpath = which(repository_ctx, command[0]) if fullpath == None: return struct( stdout = "", error = "could not find which '%s'" % command[0], ) # Run the executable. result = repository_ctx.execute([fullpath] + command[1:]) if result.return_code != 0: error = "error %d running %r (command %r, stdout %r, stderr %r)" % ( result.return_code, command[0], command, result.stdout, result.stderr, ) return struct(stdout = result.stdout, error = error) # Success. return struct(stdout = result.stdout, error = None) def is_wheel_build(repository_ctx): """ Returns true iff this build is a Python wheel flavor. """ drake_os = repository_ctx.os.environ.get("DRAKE_OS", "") return drake_os in ["manylinux", "macos_wheel"] def os_specific_alias(repository_ctx, mapping): """ A repository_rule helper function that creates a BUILD file with alias() declarations based on which supported OS version we are targeting. Argument: repository_ctx: The context passed to the repository_rule calling this. mapping: dict(str, list(str)) where the keys match the OS (which must be either "linux" or "osx", and the list of values are of the form name=actual as in alias(name, actual). """ key = repository_ctx.os.name if key == "mac os x": key = "osx" if key not in mapping: fail("Unsupported os.name " + key) items = mapping[key] # Emit the list of aliases. file_content = """\ # DO NOT EDIT: generated by os_specific_alias_repository() package(default_visibility = ["//visibility:public"]) """ for item in items: name, actual = item.split("=") file_content += 'alias(name = "{}", actual = "{}")\n'.format( name, actual, ) repository_ctx.file( "BUILD.bazel", content = file_content, executable = False, ) def _os_specific_alias_impl(repository_ctx): os_specific_alias(repository_ctx, repository_ctx.attr.mapping) os_specific_alias_repository = repository_rule( attrs = { "mapping": attr.string_list_dict(mandatory = True), }, implementation = _os_specific_alias_impl, )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/pkg_config.bzl
load("//tools/workspace:execute.bzl", "path", "which") load("//tools/workspace:os.bzl", "is_wheel_build") _DEFAULT_TEMPLATE = Label("@drake//tools/workspace:pkg_config.BUILD.tpl") _DEFAULT_STATIC = False def _run_pkg_config(repository_ctx, command_line, pkg_config_paths): """Run command_line with PKG_CONFIG_PATH = pkg_config_paths and return its tokenized output.""" pkg_config_path = ":".join(pkg_config_paths) result = repository_ctx.execute( command_line, environment = { "PKG_CONFIG_PATH": pkg_config_path, }, ) if result.return_code != 0: return struct(error = "error {} from {}: {}{}".format( result.return_code, command_line, result.stdout, result.stderr, )) tokens = [x for x in result.stdout.strip().split(" ") if x] return struct(tokens = tokens, error = None) def setup_pkg_config_repository(repository_ctx): """This is the macro form of the pkg_config_repository() rule below. Refer to that rule's API documentation for details. This flavor of this rule is intended to be called by other repository_rule implementation functions. The pkg_config_repository flavor of this rule is intended to be called directly from the WORKSPACE file, or from a macro that was called by the WORKSPACE file. """ # First locate pkg-config. tool_path = which(repository_ctx, "pkg-config") if not tool_path: return struct(error = "Could not find pkg-config on PATH={}".format( path(repository_ctx), )) args = [tool_path, repository_ctx.attr.modname] # Grab any extra paths requested by the user. pkg_config_paths = list(getattr( repository_ctx.attr, "pkg_config_paths", [], )) if is_wheel_build(repository_ctx): # TODO(jwnimmer-tri) Ultimately, we want the wheel build to use Bazel # to compile all dependencies. At the moment, however, some are built # using CMake files at drake/tools/wheel/image/dependencies. To find # the libraries installed by those builds, we need to add some custom # paths when calling pkg-config. pkg_config_paths.insert(0, "/opt/drake-dependencies/share/pkgconfig") pkg_config_paths.insert(0, "/opt/drake-dependencies/lib/pkgconfig") # Check if we can find the required *.pc file of any version. result = _run_pkg_config(repository_ctx, args, pkg_config_paths) if result.error != None: defer_error_os_names = getattr( repository_ctx.attr, "defer_error_os_names", [], ) if repository_ctx.os.name in defer_error_os_names: repository_ctx.file( "BUILD.bazel", """ load("@drake//tools/skylark:cc.bzl", "cc_library") cc_library( name = {name}, srcs = ["pkg_config_failed.cc"], visibility = ["//visibility:public"], ) """.format( name = repr(repository_ctx.name), ), ) return struct(value = True, error = None) return result # If we have a minimum version, enforce that. atleast_version = getattr(repository_ctx.attr, "atleast_version", "") if atleast_version: result = _run_pkg_config(repository_ctx, args + [ "--atleast-version", atleast_version, ], pkg_config_paths) if result.error != None: return struct(error = result.error + "during version check") # Determine linkopts. static = getattr(repository_ctx.attr, "static", _DEFAULT_STATIC) libs_args = args + ["--libs"] if static: libs_args = libs_args + ["--static"] result = _run_pkg_config(repository_ctx, libs_args, pkg_config_paths) if result.error != None: return result linkopts = result.tokens # Bazel "linkopts=" must be either switches ("-foo"), variables ("$(FOO)"), # or labels ("foo"). We should only get switches from `pkg-config --libs`. # However, sometimes it produces "-framework CoreFoundation" or similar, # which is *supposed* to be a single switch, but our split heuristic # chopped it up. We recombine non-switch args with their preceding arg as # a repair. We process args in reverse order to keep our loop index # unchanged by a pop. for i in reversed(range(len(linkopts))): linkopt = linkopts[i] # Absolute system paths to *.dylib and *.so files get turned into -l # instead. if linkopt.endswith(".dylib") or linkopt.endswith(".so"): if linkopt.endswith(".dylib"): possible_libdirs = [ "/usr/lib", "/usr/local/lib", ] suffix = ".dylib" elif linkopt.endswith(".so"): possible_libdirs = [ "/usr/lib", "/usr/lib/x86_64-linux-gnu", ] suffix = ".so" else: return struct(error = ("expected linkopt {} to end with " + ".dylib or .so").format(linkopt)) for dir in possible_libdirs: prefix = dir + "/lib" if linkopt.startswith(prefix): name = linkopt[len(prefix):-len(suffix)] if "/" not in name: linkopt = "-l" + name linkopts[i] = linkopt break # Add `-Wl,-rpath,<path>` for `-L<path>`. # See https://github.com/RobotLocomotion/drake/issues/7387#issuecomment-359952616 # noqa if linkopt.startswith("-L"): linkopts.insert(i, "-Wl,-rpath," + linkopt[2:]) continue # Switches stay put. if linkopt.startswith("-"): continue # A non-switch arg should be recombined with the preceding arg. non_switch_arg = linkopts.pop(i) if i == 0: return struct(error = "malformed linkopts: " + repr(linkopts)) linkopts[i - 1] += " " + non_switch_arg # Determine cflags; we'll split into includes and defines in a moment. result = _run_pkg_config( repository_ctx, args + ["--cflags"], pkg_config_paths, ) if result.error != None: return result cflags = result.tokens # Split cflags into includes and defines. The -I paths from pkg-config # will be absolute paths; we'll make them relative in a moment. absolute_includes = [] defines = [] unknown_cflags = [] # Blacklist various system include paths on macOS. blacklisted_includes = [ "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include", # noqa "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.0.sdk/usr/include", # noqa "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/usr/include", # noqa "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include", # noqa "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include", "/Library/Developer/CommandLineTools/SDKs/MacOSX11.0.sdk/usr/include", "/Library/Developer/CommandLineTools/SDKs/MacOSX12.0.sdk/usr/include", "/Library/Developer/CommandLineTools/usr/include", ] # We process in reserve order to keep our loop index unchanged by a pop. for cflag in cflags: if cflag.startswith("-I"): value = cflag[2:] if value in blacklisted_includes: continue if value not in absolute_includes: absolute_includes.append(value) elif cflag.startswith("-D"): value = cflag[2:] if value not in defines: defines.append(value) elif cflag == "-pthread": # The pkg-config output has told us to use -pthread when compiling. # When compiling the typical effect of -pthread is to -D_REENTRANT; # when linking the typical effect of -pthread is to -lpthread. In # Bazel, we can't pass -pthread in a cc_library's defines (it's not # a preprocessor definition), and we shouldn't pass -pthread only # in a cc_library's copts (i.e., non-transitively), since # respecting transitivity might be important for some toolchains. # Instead, when compiling our code that uses this library, we'll # decide to just ignore pkg-config's advice to use -pthread when # compiling and instead apply -pthread only when linking. if "-pthread" not in linkopts: linkopts.append("-pthread") elif cflag in [ "-frounding-math", "-ffloat-store", "-msse", "-msse2", "-msse3", "-msse4", "-msse4.1", "-msse4.2", "-mfpmath", ]: # We know these are okay to ignore. pass else: unknown_cflags += [cflag] if unknown_cflags: print("pkg-config of {} returned flags that we will ignore: {}".format( repository_ctx.attr.modname, unknown_cflags, )) # Symlink the absolute include paths into our repository, to obtain # relative paths for them as required by cc_library's attributes. includes = [] hdrs_path = repository_ctx.path("include") for item in absolute_includes: if item == "/usr/include" or item == "/usr/local/include": print(("pkg-config of {} returned an include path that " + "contains {} that may contain unrelated headers").format( repository_ctx.attr.modname, item, )) symlink_dest = item.replace("/", "_") repository_ctx.symlink( repository_ctx.path(item), hdrs_path.get_child(symlink_dest), ) includes += ["include/" + symlink_dest] hdrs_prologue = "glob([\"include/**\"]) + " extra_deprecation = getattr( repository_ctx.attr, "extra_deprecation", "", ) if extra_deprecation == "": extra_deprecation = None # Write out the BUILD.bazel file. substitutions = { "%{topcomment}": "DO NOT EDIT: generated by pkg_config_repository()", "%{licenses}": repr( getattr(repository_ctx.attr, "licenses", []), ), "%{name}": repr( repository_ctx.name, ), "%{srcs}": repr( getattr(repository_ctx.attr, "extra_srcs", []), ), "%{hdrs}": ( hdrs_prologue + repr( getattr(repository_ctx.attr, "extra_hdrs", []), ) ), "%{copts}": repr( getattr(repository_ctx.attr, "extra_copts", []), ), "%{defines}": repr( defines + getattr(repository_ctx.attr, "extra_defines", []), ), "%{includes}": repr( includes + getattr(repository_ctx.attr, "extra_includes", []), ), "%{linkopts}": repr( linkopts + getattr(repository_ctx.attr, "extra_linkopts", []), ), "%{deps}": repr( getattr(repository_ctx.attr, "extra_deps", []), ), "%{build_epilog}": getattr(repository_ctx.attr, "build_epilog", ""), "%{extra_deprecation}": repr(extra_deprecation), } template = getattr( repository_ctx.attr, "build_file_template", _DEFAULT_TEMPLATE, ) repository_ctx.template("BUILD.bazel", template, substitutions) return struct(value = True, error = None) def _impl(repository_ctx): result = setup_pkg_config_repository(repository_ctx) if result.error != None: fail("Unable to complete pkg-config setup for " + "@{} repository: {}".format( repository_ctx.name, result.error, )) _do_pkg_config_repository = repository_rule( # TODO(jamiesnape): Make licenses mandatory. # TODO(jamiesnape): Use of this rule may cause additional transitive # dependencies to be linked and their licenses must also be enumerated. attrs = { "licenses": attr.string_list(), "modname": attr.string(mandatory = True), "atleast_version": attr.string(), "static": attr.bool(default = _DEFAULT_STATIC), "build_file_template": attr.label( default = _DEFAULT_TEMPLATE, allow_files = True, ), "extra_srcs": attr.string_list(), "extra_hdrs": attr.string_list(), "extra_copts": attr.string_list(), "extra_defines": attr.string_list(), "extra_includes": attr.string_list(), "extra_linkopts": attr.string_list(), "extra_deps": attr.string_list(), "build_epilog": attr.string(), "pkg_config_paths": attr.string_list(), "extra_deprecation": attr.string(), "defer_error_os_names": attr.string_list(), }, local = True, configure = True, implementation = _impl, ) def pkg_config_repository(**kwargs): """Creates a repository that contains a single library target, based on the results of invoking pkg-config. The pkg_config_repository flavor of this rule is intended to be called directly from the WORKSPACE file, or from a macro that was called by the WORKSPACE file. The setup_pkg_config_repository flavor of this rule is intended to be called by other repository_rule implementation functions. Example: WORKSPACE: load("@drake//tools/workspace:pkg_config.bzl", "pkg_config_repository") # noqa pkg_config_repository( name = "foo", modname = "foo-2.0", ) BUILD: cc_library( name = "foobar", deps = ["@foo"], srcs = ["bar.cc"], ) Args: name: A unique name for this rule. licenses: Licenses of the library. Valid license types include restricted, reciprocal, notice, permissive, and unencumbered. See https://docs.bazel.build/versions/master/be/functions.html#licenses_args for more information. modname: The library name as known to pkg-config. atleast_version: (Optional) The --atleast-version to pkg-config. static: (Optional) Add linkopts for static linking to the library target. build_file_template: (Optional) (Advanced) Override the BUILD template. extra_srcs: (Optional) Extra items to add to the library target. extra_hdrs: (Optional) Extra items to add to the library target. extra_copts: (Optional) Extra items to add to the library target. extra_defines: (Optional) Extra items to add to the library target. extra_includes: (Optional) Extra items to add to the library target. extra_linkopts: (Optional) Extra items to add to the library target. extra_deps: (Optional) Extra items to add to the library target. build_epilog: (Optional) Extra text to add to the generated BUILD.bazel. pkg_config_paths: (Optional) Paths to find pkg-config files (.pc). Note that we ignore the environment variable PKG_CONFIG_PATH set by the user. extra_deprecation: (Optional) Add a deprecation message to the library BUILD target. defer_error_os_names: (Optional) On these operating systems (as named by repository_ctx.os.name), failure to find the *.pc file will yield a link-time error, not a fetch-time error. This is useful for externals that are guarded by select() statements. """ if "deprecation" in kwargs: fail("When calling pkg_config_repository, don't use deprecation=str " + "to deprecate a library; instead use extra_deprecation=str.") _do_pkg_config_repository(**kwargs)
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/mirrors.bzl
# This constant contains Drake's default lists of mirrors. It is keyed by the # repository type using magic strings ("github", etc.), and has values of type # list-of-string; each string is a pattern for a mirror URL. # # When calling a Drake workspace rule that requires a mirror= argument, this # constant is a reasonable default value. # # Each repository type has its own keyword string substitutions within its # pattern string; these will vary from one repository type to another; consult # the specific rules (e.g., github_archive()) for details. # # The first item in each list is the authoritative source (e.g., the upstream # server), if there is one. # # For Drake's defaults, Packages are mirrored from upstream (GitHub, Bitbucket, # PyPI, etc.) to CloudFront backed by an S3 bucket. # DEFAULT_MIRRORS = { "crate_universe": [ # This pattern instructs us to allow the crates.io URL. "{default_url}", # These patterns are made available as additional backups. "https://drake-mirror.csail.mit.edu/crates.io/{archive}", "https://s3.amazonaws.com/drake-mirror/crates.io/{archive}", ], "doxygen": [ "https://drake-packages.csail.mit.edu/doxygen/{archive}", "https://s3.amazonaws.com/drake-packages/doxygen/{archive}", ], "github": [ # For github.com, we choose a pattern based on the kind of commit. "https://github.com/{repository}/archive/refs/tags/{tag_name}.tar.gz", # noqa "https://github.com/{repository}/archive/{commit_sha}.tar.gz", # For Drake's mirrors, we use a single pattern no matter the commit. "https://drake-mirror.csail.mit.edu/github/{repository}/{commit}.tar.gz", # noqa "https://s3.amazonaws.com/drake-mirror/github/{repository}/{commit}.tar.gz", # noqa ], "github_release_attachments": [ "https://github.com/{repository}/releases/download/{commit}/{filename}", # noqa "https://drake-mirror.csail.mit.edu/github/{repository}/{commit}/{filename}", # noqa "https://s3.amazonaws.com/drake-mirror/github/{repository}/{commit}/{filename}", # noqa ], "maven": [ "https://jcenter.bintray.com/{fulljar}", "https://repo1.maven.org/maven2/{fulljar}", # N.B. ibiblio doesn't offer https. "http://maven.ibiblio.org/maven2/{fulljar}", ], "mosek": [ "https://download.mosek.com/{path}", "https://drake-mirror.csail.mit.edu/mosek/{path}", "https://s3.amazonaws.com/drake-mirror/mosek/{path}", ], }
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/java.bzl
load( "@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external", ) load("//tools/skylark:pathutils.bzl", "basename") def _impl(repo_ctx): os_name = repo_ctx.os.name if os_name == "mac os x": os_name = "osx" # Create the /jar/BUILD.bazel file. build_content = """\ package(default_visibility = ["//visibility:public"]) """ if os_name in repo_ctx.attr.local_os_targets: is_local = True filename = basename(repo_ctx.attr.local_jar) repo_ctx.symlink( repo_ctx.attr.local_jar, "jar/{}".format(filename), ) name = "jar" jars = [filename] build_content += "java_import(name = {name}, jars = {jars})\n".format( name = repr(name), jars = repr(jars), ) else: is_local = False name = "jar" actual = "@_maven_{}//jar".format(repo_ctx.name) build_content += "alias(name = {name}, actual = {actual})\n".format( name = repr(name), actual = repr(actual), ) repo_ctx.file("jar/BUILD.bazel", build_content) # Create the /BUILD.bazel file. name = "install" targets = [] if is_local else ["//jar"] install_content = """\ package(default_visibility = ["//visibility:public"]) load("@drake//tools/install:install.bzl", "install") install( name = {name}, targets = {targets}, java_strip_prefix = ["**/"], allowed_externals = {targets}, ) """ repo_ctx.file("BUILD.bazel", install_content.format( name = repr(name), targets = repr(targets), )) _internal_drake_java_import = repository_rule( attrs = { "licenses": attr.string_list(mandatory = True), "local_os_targets": attr.string_list(mandatory = True), "local_jar": attr.string(mandatory = True), }, implementation = _impl, ) def drake_java_import( name, *, licenses, local_os_targets, local_jar, maven_jar, maven_jar_sha256, mirrors): """A repository rule to bring in a Java dependency, either from the host's OS distribution, or else Maven. The list of local_os_targets indicates which OSs provide this jar; for those, the local_jar is the full path to the jar. Otherwise, the maven_jar will be used. The recognized values for OSs in the list of targets are either "linux" or "osx". """ java_import_external( name = "_maven_{}".format(name), licenses = licenses, jar_urls = [ x.format(fulljar = maven_jar) for x in mirrors.get("maven") ], jar_sha256 = maven_jar_sha256, ) _internal_drake_java_import( name = name, licenses = licenses, local_os_targets = local_os_targets, local_jar = local_jar, )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/new_release.py
"""Reports on which of Drake's external dependencies can be updated to a more recent version. This is intended for use by Drake maintainers (only). This program is only supported on Ubuntu Jammy 22.04. To query GitHub APIs, you'll need to authenticate yourself first. There are two ways to do this: (1) Type in your password each time you run this program: bazel run //tools/workspace:new_release -- --use_password (2) Use a GitHub API token: bazel run //tools/workspace:new_release To create the ~/.config/readonly_github_api_token.txt file used by (2), open a browser to https://github.com/settings/tokens and create a new token (it does not need any extra permissions; the default "no checkboxes are set" is good), and save the plaintext hexadecimal token to that file. This program can also automatically prepare upgrades for our GitHub externals by passing the name(s) of package(s) to upgrade as additional arguments: bazel run //tools/workspace:new_release -- --lint --commit rules_python Note that this program runs `bazel` as a subprocess, without any special command line flags. If you do need to use any flags when you run bazel, then those must be added to an rcfile; they cannot be provided on the command line. """ import argparse import getpass import hashlib import json import logging import os import re import shlex import subprocess import time import urllib from dataclasses import dataclass from tempfile import TemporaryDirectory from typing import Optional, Set import git import github3 from tools.workspace.metadata import read_repository_metadata logger = logging.getLogger('new_release') logger.setLevel(logging.INFO) warn = logger.warning info = logger.info # Repository rules that fetch from GitHub. _GITHUB_RULE_TYPES = [ "github", "github_release_attachments" ] # Repository rule that uses an external upgrade script. _SCRIPTED_RULE_TYPE = "scripted" # We'll skip these repositories when making suggestions. _IGNORED_REPOSITORIES = [ "clang_cindex_python3_internal", # Uses a non-default branch. "mosek", # Requires special, non-automated care during upgrades. "pybind11", # Uses a non-default branch. "usockets_internal", # Pinned due to upstream regression. "uwebsockets_internal", # Pinned due to upstream regression. ] # For these repositories, ignore any tags that match the specified regex. _IGNORED_TAGS = { "libpng_internal": r"v[0-9.]+(alpha|beta)[0-9]+", "sdformat_internal": r"sdformat-prerelease_[0-9.]+", } # For these repositories, we only look at tags, not releases. For the dict # value, use a blank value to match the latest tag or a regex to only select # tags that share the match with the tag currently in use; the parentheses # group in the regex denotes the portion of the tag to lock as invariant. # (This can be used to pin to a given major or major.minor release series.) _OVERLOOK_RELEASE_REPOSITORIES = { "github3_py_internal": r"^(\d+.)", "gz_math_internal": r"^(gz)", "gz_utils_internal": r"^(gz)", "petsc": r"^(v)", "pycodestyle": "", "qhull_internal": r"^(2)", "sdformat_internal": "", "xmlrunner_py": "", } # Packages in these cohorts should be upgraded together (in a single commit). _COHORTS = ( # clarabel_cpp uses crate_universe; be sure to keep them aligned. {"clarabel_cpp_internal", "crate_universe"}, # mypy uses mypy_extensions; be sure to keep them aligned. {"mypy_internal", "mypy_extensions_internal"}, # rules_rust uses rust_toolchain; be sure to keep them aligned. {"rules_rust", "rust_toolchain"}, # sdformat depends on both gz libraries; be sure to keep them aligned. {"sdformat_internal", "gz_math_internal", "gz_utils_internal"}, # uwebsockets depends on usockets; be sure to keep them aligned. {"uwebsockets_internal", "usockets_internal"}, ) @dataclass class UpgradeResult: was_upgraded: bool can_be_committed: bool = False modified_paths: Optional[Set[str]] = None commit_message: Optional[str] = None def _str_replace_forced(original, old, new): if old == new: return original result = original.replace(old, new) if result == original: raise RuntimeError(f"Could not find '{old}' to substitute") return result def _rewrite_file_contents(path, new_content): """Atomically replace the contents of path with new_content.""" with open(f"{path}.new", "w", encoding="utf-8") as f: f.write(new_content) os.rename(f"{path}.new", path) def _check_output(args): return subprocess.check_output(args).decode("utf8") def _get_default_username(): origin_url = _check_output( ["git", "config", "--get", "remote.origin.url"]).strip() # Match one of these two cases: # [email protected]:user/drake.git # https://[email protected]/user/drake.git match = re.search(r"(github.com:(.*?)/|/(.*?)@github.com)", origin_url) if not match: return None _, git_user, http_user = match.groups() return git_user or http_user def _smells_like_a_git_commit(revision): """Returns true iff revision seems to be a git commit (as opposed to a version number tag name). This might produce false positives for very long version numbers, but we've never seen that in practice. """ return len(revision) == 40 def _is_ignored_tag(commit, workspace): """Returns true iff commit matches an ignore rule or seems to be a pre-release. """ ignore_re = _IGNORED_TAGS.get(workspace) if ignore_re and re.match(ignore_re, commit): # Matches the regex of tag names to definitely ignore; do so quietly so # we don't spam the user. return True development_stages = ["alpha", "beta", "rc", "pre"] prerelease = any(stage in commit for stage in development_stages) if prerelease: # Heuristically looks like a pre-release; ignore it, but log it for the # user to check. warn(f"Skipping prerelease {commit} for {workspace}") return prerelease def _latest_tag(gh_repo, workspace): for tag in gh_repo.tags(): if _is_ignored_tag(tag.name, workspace): continue return tag.name warn(f"Could not find any matching tags for {workspace}") return None def _handle_github(workspace_name, gh, data): time.sleep(0.2) # Don't make github angry. old_commit = data["commit"] new_commit = None owner, repo_name = data["repository"].split("/") gh_repo = gh.repository(owner, repo_name) # If we're tracking via git commit, then upgrade to the newest commit. if _smells_like_a_git_commit(old_commit): new_commit = gh_repo.commit("HEAD").sha return old_commit, new_commit # Sometimes prefer checking only tags, not releases. tags_pattern = _OVERLOOK_RELEASE_REPOSITORIES.get(workspace_name) if tags_pattern == "": new_commit = _latest_tag(gh_repo, workspace_name) return old_commit, new_commit # Sometimes limit candidate tags to those matching a regex. if tags_pattern is not None: match = re.search(tags_pattern, old_commit) assert match, f"No {tags_pattern} in {old_commit}" (old_hit,) = match.groups() for tag in gh_repo.tags(): match = re.search(tags_pattern, tag.name) if match: (new_hit,) = match.groups() if old_hit == new_hit: if _is_ignored_tag(tag.name, workspace_name): continue new_commit = tag.name break return old_commit, new_commit # By default, use the latest release if there is one. Otherwise, use the # latest tag. try: new_commit = gh_repo.latest_release().tag_name if _is_ignored_tag(new_commit, workspace_name): new_commit = _latest_tag(gh_repo, workspace_name) except github3.exceptions.NotFoundError: new_commit = _latest_tag(gh_repo, workspace_name) return old_commit, new_commit def _check_for_upgrades(gh, args, metadata): for workspace_name, data in sorted(metadata.items()): if workspace_name in _IGNORED_REPOSITORIES: continue if data.get("version_pin"): continue rule_type = data["repository_rule_type"] if rule_type in _GITHUB_RULE_TYPES: old_commit, new_commit = _handle_github(workspace_name, gh, data) elif rule_type == "crate_universe": # "crate_universe" repositories are individual *output* # repositories generated by the master repository rule # crate_universe_repositories(). Upgrades happen via the # "scripted"-rule-type upgrade of "crate_universe". continue elif rule_type == _SCRIPTED_RULE_TYPE: info(f"{workspace_name} may need upgrade") continue elif rule_type == "manual": warn(f"{workspace_name} version %s needs manual inspection", data.get("version", "???")) continue else: raise RuntimeError( f"Bad rule type {rule_type} in {workspace_name}") if old_commit == new_commit: continue elif new_commit is not None: info(f"{workspace_name} needs upgrade" f" from {old_commit} to {new_commit}") else: warn(f"{workspace_name} version {old_commit}" " needs manual inspection") def _modified_paths(repo, root): """Returns the set of paths under `root` which are added, removed or altered. """ assert os.path.isdir(os.path.join(repo.working_tree_dir, root)) if not root.endswith('/'): root += '/' result = set() for item in repo.untracked_files: if item.startswith(root): result.add(item) for other in [None, 'HEAD']: for item in repo.index.diff(other): if item.a_path.startswith(root): result.add(item.a_path) if item.b_path.startswith(root): result.add(item.b_path) return result def _is_unmodified(repo, path): """Returns true iff the given `path` is unmodified in the working tree of the given `git.Repo`, `repo`. If repo is None, returns False. """ if repo is None: return False if os.path.isdir(os.path.join(repo.working_tree_dir, path)): return len(_modified_paths(repo, path)) == 0 else: for other in [None, 'HEAD']: if path in [item.b_path for item in repo.index.diff(other)]: return False return True def _do_commit(local_drake_checkout, actually_commit, workspace_names, paths, message): if actually_commit: names = ", ".join(workspace_names) local_drake_checkout.git.add('-A', *paths) local_drake_checkout.git.commit( '-o', *paths, '-m', "[workspace] " + message) info("") info("*" * 72) info(f"Done. Changes for {names} were committed.") info("Be sure to review the changes and amend the commit if needed.") info("*" * 72) info("") else: info("") info("*" * 72) info("Done. Be sure to review and commit the changes:") info(f" git add {' '.join([shlex.quote(p) for p in paths])}") info(f" git commit -m{shlex.quote('[workspace] ' + message)}") info("*" * 72) info("") def _download(url, local_filename): """Given a url, downloads it to the local_filename (overwriting anything that was there previously). Returns the sha256 checksum. """ hasher = hashlib.sha256() with open(local_filename, "wb") as f: with urllib.request.urlopen(url) as response: while True: data = response.read(4096) if not data: break hasher.update(data) f.write(data) return hasher.hexdigest() def _do_upgrade_github_archive( *, temp_dir, old_commit, new_commit, bzl_filename, repository): # Slurp the file we're supposed to modify. with open(bzl_filename, "r", encoding="utf-8") as f: lines = f.readlines() # Locate the two hexadecimal lines we need to edit. commit_line_re = re.compile( r'(?<= commit = ")(' + re.escape(old_commit) + r')(?=",)') checksum_line_re = re.compile( r'(?<= sha256 = ")([0-9a-f]{64})(?=",)') commit_line_num = None checksum_line_num = None for i, line in enumerate(lines): match = commit_line_re.search(line) if match: assert commit_line_num is None commit_line_num = i match = checksum_line_re.search(line) if match: assert checksum_line_num is None checksum_line_num = i assert commit_line_num is not None assert checksum_line_num is not None # Download the new source archive. info("Downloading new archive...") new_url = f"https://github.com/{repository}/archive/{new_commit}.tar.gz" new_filename = new_commit.replace("/", "_") new_checksum = _download(new_url, f"{temp_dir}/{new_filename}.tar.gz") # Update the repository.bzl contents and then write it out. lines[commit_line_num] = commit_line_re.sub( new_commit, lines[commit_line_num]) lines[checksum_line_num] = checksum_line_re.sub( new_checksum, lines[checksum_line_num]) _rewrite_file_contents(bzl_filename, ''.join(lines)) def _do_upgrade_github_release_attachments( *, temp_dir, old_commit, new_commit, bzl_filename, repository, old_attachments): # Slurp the file we're supposed to modify. with open(bzl_filename, "r", encoding="utf-8") as f: bzl_content = f.read() # Download the new attachments. info("Downloading new attachments...") new_attachments = {} for filename in old_attachments.keys(): new_url = (f"https://github.com/{repository}/" f"releases/download/{new_commit}/{filename}") new_checksum = _download(new_url, f"{temp_dir}/{filename}") new_attachments[filename] = new_checksum # Update the repository.bzl contents and then write it out. bzl_content = _str_replace_forced( bzl_content, f'commit = "{old_commit}"', f'commit = "{new_commit}"') for filename, old_checksum in old_attachments.items(): new_checksum = new_attachments[filename] bzl_content = _str_replace_forced( bzl_content, f'"{old_checksum}"', f'"{new_checksum}"') _rewrite_file_contents(bzl_filename, bzl_content) def _do_upgrade_scripted( *, temp_dir, local_drake_checkout, workspace_root, script): # Run the upgrade script. repo_root = local_drake_checkout.working_tree_dir subprocess.check_call([os.path.join(repo_root, workspace_root, script)]) # Look for modified paths. return _modified_paths(local_drake_checkout, workspace_root) def _do_upgrade(temp_dir, gh, local_drake_checkout, workspace_name, metadata): """Returns an `UpgradeResult` describing what (if anything) was done.""" if workspace_name not in metadata: raise RuntimeError(f"Unknown repository {workspace_name}") data = metadata[workspace_name] rule_type = data["repository_rule_type"] bzl_filename = f"tools/workspace/{workspace_name}/repository.bzl" if rule_type == _SCRIPTED_RULE_TYPE: # Determine if we should and can commit the changes made. workspace_root = f"tools/workspace/{workspace_name}/" can_commit = _is_unmodified(local_drake_checkout, workspace_root) if local_drake_checkout and not can_commit: warn(f"{workspace_root} has local changes.") warn(f"Changes made for {workspace_name} will NOT be committed.") # Do the upgrade. new_commit = None modified_paths = _do_upgrade_scripted( temp_dir=temp_dir, local_drake_checkout=local_drake_checkout, workspace_root=workspace_root, script=data["upgrade_script"], ) if not len(modified_paths): return UpgradeResult(False) else: if rule_type not in _GITHUB_RULE_TYPES: raise RuntimeError(f"Cannot auto-upgrade {workspace_name}") # Sanity check that an upgrade is possible. old_commit, new_commit = _handle_github(workspace_name, gh, data) if old_commit == new_commit: return UpgradeResult(False) elif new_commit is None: raise RuntimeError(f"Cannot auto-upgrade {workspace_name}") info(f"Upgrading {workspace_name} from {old_commit} to {new_commit}") # Determine if we should and can commit the changes made. can_commit = _is_unmodified(local_drake_checkout, bzl_filename) if local_drake_checkout and not can_commit: warn(f"{bzl_filename} has local changes.") warn(f"Changes made for {workspace_name} will NOT be committed.") # Do the upgrade. if rule_type == "github": _do_upgrade_github_archive( temp_dir=temp_dir, old_commit=old_commit, new_commit=new_commit, bzl_filename=bzl_filename, repository=data["repository"], ) else: assert rule_type == "github_release_attachments" _do_upgrade_github_release_attachments( temp_dir=temp_dir, old_commit=old_commit, new_commit=new_commit, bzl_filename=bzl_filename, repository=data["repository"], old_attachments=data["attachments"], ) modified_paths = {bzl_filename} # Copy the downloaded tarball into the repository cache. info("Populating repository cache ...") subprocess.check_call(["bazel", "fetch", "//...", f"--distdir={temp_dir}"]) # Check for additional instructions. upgrade_advice = data.get("upgrade_advice", "") if len(upgrade_advice): warn("") warn("*" * 72) warn(upgrade_advice) warn("*" * 72) warn("") # Finalize the result. message = f"Upgrade {workspace_name} to latest" if new_commit: if _smells_like_a_git_commit(new_commit): message += " commit" else: message += f" release {new_commit}" return UpgradeResult(True, can_commit, modified_paths, message) def _do_upgrades(temp_dir, gh, local_drake_checkout, workspace_names, metadata): # Make sure there are workspaces to update. if len(workspace_names) == 0: return can_commit = True modified_paths = [] commit_messages = [] modified_workspace_names = [] for workspace_name in workspace_names: result = _do_upgrade(temp_dir, gh, local_drake_checkout, workspace_name, metadata) if result.was_upgraded: can_commit = can_commit and result.can_be_committed modified_paths += result.modified_paths commit_messages.append(result.commit_message) modified_workspace_names.append(workspace_name) elif len(workspace_names) == 1: raise RuntimeError(f"No upgrade needed for {workspace_name}") # Determine if we should and can commit the changes made. if len(modified_workspace_names) == 1: _do_commit(local_drake_checkout, actually_commit=can_commit, workspace_names=modified_workspace_names, paths=modified_paths, message=commit_messages[0]) else: cohort = ', '.join(modified_workspace_names) if not can_commit: warn(f"Changes made for {cohort} will NOT be committed.") message = f"Upgrade {cohort} to latest\n\n" message += "- " + "\n- ".join(commit_messages) _do_commit(local_drake_checkout, actually_commit=can_commit, workspace_names=modified_workspace_names, paths=modified_paths, message=message) def main(): parser = argparse.ArgumentParser( prog="new_release", description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( "--commit", action="store_true", default=False, help="When upgrading repositories, automatically commit the changes.") parser.add_argument( "--lint", action="store_true", default=False, help="Also run some sanity tests on the repository, after all other" " operations have completed successfully.") parser.add_argument( "--use_password", action="store_true", default=False, help="Prompt for the GitHub password, instead of using an API token.") parser.add_argument( "--token_file", default="~/.config/readonly_github_api_token.txt", help="Uses an API token read from this filename, unless " "--use_password was given (default: %(default)s)") parser.add_argument( "--user", metavar="USER", type=str, default=_get_default_username(), help="GitHub username (default: %(default)s)") parser.add_argument( "--verbose", action="store_true", default=False) parser.add_argument( "workspace", nargs="*", metavar="WORKSPACES_NAME", type=str, help="(Optional) Instead of reporting on possible upgrades," " download new archives for the given externals" " and edit their bzl rules to match.") args = parser.parse_args() if 'BUILD_WORKSPACE_DIRECTORY' in os.environ: os.chdir(os.environ['BUILD_WORKING_DIRECTORY']) if not os.path.exists('WORKSPACE'): parser.error("Couldn't find WORKSPACE; this script must be run" " from the root of your Drake checkout.") if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(format="%(message)s") if args.use_password and not args.user: parser.error("Couldn't guess github username; you must supply --user.") # Log in to github. if args.use_password: prompt = f"Password for https://{args.user}@github.com: " gh = github3.login( username=args.user, password=getpass.getpass(prompt)) else: with open(os.path.expanduser(args.token_file), "r") as f: token = f.read().strip() gh = github3.login(token=token) # Are we operating on all repositories, or just one? if len(args.workspace): workspaces = set(args.workspace) # Grow the set of specified repositories to cover cohorts. for workspace in args.workspace: for cohort in _COHORTS: if workspace in cohort: workspaces.update(cohort) else: if args.commit: parser.error("--commit requires one or more workspaces.") # (None denotes "all".) workspaces = None if args.commit: local_drake_checkout = git.Repo(os.path.realpath(".")) else: local_drake_checkout = None # Grab the workspace metadata. info("Collecting bazel repository details...") metadata = read_repository_metadata(repositories=workspaces) logging.debug(json.dumps(metadata, sort_keys=True, indent=2)) if workspaces is not None: visited_workspaces = set() for workspace in workspaces: # If we already did this as part of a tandem upgrade, skip it. if workspace in visited_workspaces: continue # Determine if this workspace is part of a cohort. cohort_workspaces = {workspace} for cohort in _COHORTS: if workspace in cohort: cohort_workspaces = cohort # Actually do the upgrade(s). with TemporaryDirectory(prefix='drake_new_release_') as temp_dir: _do_upgrades(temp_dir, gh, local_drake_checkout, cohort_workspaces, metadata) visited_workspaces.update(cohort_workspaces) else: # Run our report of what's available. info("Checking for new releases...") _check_for_upgrades(gh, args, metadata) if args.lint: subprocess.check_call(["bazel", "test", "--config=lint", "//..."]) if __name__ == '__main__': main()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/deb.bzl
load( "//tools/workspace:execute.bzl", "execute_and_return", ) def setup_new_deb_archive(repo_ctx): """Behaves like new_deb_archive, except that (1) this is a macro instead of a rule and (2) this macro returns an error status instead of fail()ing. The return value is a struct with a field `error` that will be None on success or else a detailed message on any failure. """ name = repo_ctx.attr.name filenames = repo_ctx.attr.filenames mirrors = repo_ctx.attr.mirrors sha256s = repo_ctx.attr.sha256s build_file = repo_ctx.attr.build_file # Download and unpack all of the debs. for i in range(len(filenames)): filename = filenames[i] if i == len(sha256s): sha256s = sha256s + [""] sha256 = sha256s[i] if not sha256: # We do not permit an empty checksum; empty means "don't care". sha256 = "0" * 64 repo_ctx.download( url = [mirror + "/" + filename for mirror in mirrors], output = filename, sha256 = sha256, ) result = execute_and_return( repo_ctx, ["dpkg-deb", "-x", filename, "."], ) if result.error: return result # Add in the build file. repo_ctx.symlink(build_file, "BUILD.bazel") # Success. return struct(error = None) def _impl(repo_ctx): result = setup_new_deb_archive(repo_ctx) if result.error != None: fail("Unable to complete setup for @{} repository: {}".format( # (forced line break) repo_ctx.name, result.error, )) new_deb_archive = repository_rule( attrs = { "filenames": attr.string_list( doc = """ Base filenames of the debs, e.g., ["libfoo-dev_123_amd64.deb"]. When multiple files are listed, they will all be extracted atop each other (within our sandbox), as is typical for Debian install. """, mandatory = True, allow_empty = False, ), "mirrors": attr.string_list( doc = """ List of URLs to download from, without the filename portion, e.g., ["https://example.com/archives"]. """, mandatory = True, allow_empty = False, ), "sha256s": attr.string_list( doc = """ Checksums of the files. When unsure, you may set it to an empty string or list; the checksum error will offer a suggestion. The sha256s and filenames are matched ordering (i.e., parallel lists). """, ), "build_file": attr.label( doc = """ Label for BUILD.bazel file to add into the repository. This should contain the rules that expose the archive contents for consumers. The *.deb file contents will appear at ".", so paths are like, e.g., `hdrs = glob(["usr/include/foo/**/*.h"]),`. """, mandatory = True, allow_files = True, ), }, implementation = _impl, ) """A repository rule that downloads and unpacks one or more *.deb files. """
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/default.bzl
load("//tools/workspace:mirrors.bzl", "DEFAULT_MIRRORS") load("//tools/workspace/abseil_cpp_internal:repository.bzl", "abseil_cpp_internal_repository") # noqa load("//tools/workspace/bazel_skylib:repository.bzl", "bazel_skylib_repository") # noqa load("//tools/workspace/bazelisk:repository.bzl", "bazelisk_repository") load("//tools/workspace/blas:repository.bzl", "blas_repository") load("//tools/workspace/build_bazel_apple_support:repository.bzl", "build_bazel_apple_support_repository") # noqa load("//tools/workspace/buildifier:repository.bzl", "buildifier_repository") load("//tools/workspace/cc:repository.bzl", "cc_repository") load("//tools/workspace/ccd_internal:repository.bzl", "ccd_internal_repository") # noqa load("//tools/workspace/clang_cindex_python3_internal:repository.bzl", "clang_cindex_python3_internal_repository") # noqa load("//tools/workspace/clarabel_cpp_internal:repository.bzl", "clarabel_cpp_internal_repository") # noqa load("//tools/workspace/clp_internal:repository.bzl", "clp_internal_repository") # noqa load("//tools/workspace/coinutils_internal:repository.bzl", "coinutils_internal_repository") # noqa load("//tools/workspace/com_jidesoft_jide_oss:repository.bzl", "com_jidesoft_jide_oss_repository") # noqa load("//tools/workspace/common_robotics_utilities:repository.bzl", "common_robotics_utilities_repository") # noqa load("//tools/workspace/commons_io:repository.bzl", "commons_io_repository") load("//tools/workspace/conex_internal:repository.bzl", "conex_internal_repository") # noqa load("//tools/workspace/crate_universe:repository.bzl", "crate_universe_repositories") # noqa load("//tools/workspace/csdp_internal:repository.bzl", "csdp_internal_repository") # noqa load("//tools/workspace/curl_internal:repository.bzl", "curl_internal_repository") # noqa load("//tools/workspace/dm_control_internal:repository.bzl", "dm_control_internal_repository") # noqa load("//tools/workspace/doxygen:repository.bzl", "doxygen_repository") load("//tools/workspace/drake_models:repository.bzl", "drake_models_repository") # noqa load("//tools/workspace/eigen:repository.bzl", "eigen_repository") load("//tools/workspace/fcl_internal:repository.bzl", "fcl_internal_repository") # noqa load("//tools/workspace/fmt:repository.bzl", "fmt_repository") load("//tools/workspace/gflags:repository.bzl", "gflags_repository") load("//tools/workspace/gfortran:repository.bzl", "gfortran_repository") load("//tools/workspace/github3_py_internal:repository.bzl", "github3_py_internal_repository") # noqa load("//tools/workspace/glib:repository.bzl", "glib_repository") load("//tools/workspace/glx:repository.bzl", "glx_repository") load("//tools/workspace/googlebenchmark:repository.bzl", "googlebenchmark_repository") # noqa load("//tools/workspace/gtest:repository.bzl", "gtest_repository") load("//tools/workspace/gurobi:repository.bzl", "gurobi_repository") load("//tools/workspace/gymnasium_py:repository.bzl", "gymnasium_py_repository") # noqa load("//tools/workspace/gz_math_internal:repository.bzl", "gz_math_internal_repository") # noqa load("//tools/workspace/gz_utils_internal:repository.bzl", "gz_utils_internal_repository") # noqa load("//tools/workspace/ipopt:repository.bzl", "ipopt_repository") load("//tools/workspace/ipopt_internal_fromsource:repository.bzl", "ipopt_internal_fromsource_repository") # noqa load("//tools/workspace/ipopt_internal_pkgconfig:repository.bzl", "ipopt_internal_pkgconfig_repository") # noqa load("//tools/workspace/lapack:repository.bzl", "lapack_repository") load("//tools/workspace/lcm:repository.bzl", "lcm_repository") load("//tools/workspace/libblas:repository.bzl", "libblas_repository") load("//tools/workspace/libjpeg_turbo_internal:repository.bzl", "libjpeg_turbo_internal_repository") # noqa load("//tools/workspace/liblapack:repository.bzl", "liblapack_repository") load("//tools/workspace/libpfm:repository.bzl", "libpfm_repository") load("//tools/workspace/libpng_internal:repository.bzl", "libpng_internal_repository") # noqa load("//tools/workspace/libtiff_internal:repository.bzl", "libtiff_internal_repository") # noqa load("//tools/workspace/meshcat:repository.bzl", "meshcat_repository") load("//tools/workspace/mosek:repository.bzl", "mosek_repository") load("//tools/workspace/mpmath_py_internal:repository.bzl", "mpmath_py_internal_repository") # noqa load("//tools/workspace/msgpack_internal:repository.bzl", "msgpack_internal_repository") # noqa load("//tools/workspace/mumps_internal:repository.bzl", "mumps_internal_repository") # noqa load("//tools/workspace/mypy_extensions_internal:repository.bzl", "mypy_extensions_internal_repository") # noqa load("//tools/workspace/mypy_internal:repository.bzl", "mypy_internal_repository") # noqa load("//tools/workspace/nanoflann_internal:repository.bzl", "nanoflann_internal_repository") # noqa load("//tools/workspace/nasm:repository.bzl", "nasm_repository") load("//tools/workspace/net_sf_jchart2d:repository.bzl", "net_sf_jchart2d_repository") # noqa load("//tools/workspace/nlohmann_internal:repository.bzl", "nlohmann_internal_repository") # noqa load("//tools/workspace/nlopt_internal:repository.bzl", "nlopt_internal_repository") # noqa load("//tools/workspace/onetbb_internal:repository.bzl", "onetbb_internal_repository") # noqa load("//tools/workspace/openblas:repository.bzl", "openblas_repository") load("//tools/workspace/opencl:repository.bzl", "opencl_repository") load("//tools/workspace/opengl:repository.bzl", "opengl_repository") load("//tools/workspace/openusd_internal:repository.bzl", "openusd_internal_repository") # noqa load("//tools/workspace/org_apache_xmlgraphics_commons:repository.bzl", "org_apache_xmlgraphics_commons_repository") # noqa load("//tools/workspace/osqp_internal:repository.bzl", "osqp_internal_repository") # noqa load("//tools/workspace/picosha2_internal:repository.bzl", "picosha2_internal_repository") # noqa load("//tools/workspace/platforms:repository.bzl", "platforms_repository") load("//tools/workspace/pybind11:repository.bzl", "pybind11_repository") load("//tools/workspace/pycodestyle:repository.bzl", "pycodestyle_repository") load("//tools/workspace/python:repository.bzl", "python_repository") load("//tools/workspace/qdldl_internal:repository.bzl", "qdldl_internal_repository") # noqa load("//tools/workspace/qhull_internal:repository.bzl", "qhull_internal_repository") # noqa load("//tools/workspace/ros_xacro_internal:repository.bzl", "ros_xacro_internal_repository") # noqa load("//tools/workspace/rules_license:repository.bzl", "rules_license_repository") # noqa load("//tools/workspace/rules_python:repository.bzl", "rules_python_repository") # noqa load("//tools/workspace/rules_rust:repository.bzl", "rules_rust_repository") load("//tools/workspace/rules_rust_tinyjson:repository.bzl", "rules_rust_tinyjson_repository") # noqa load("//tools/workspace/rust_toolchain:repository.bzl", "register_rust_toolchains", "rust_toolchain_repositories") # noqa load("//tools/workspace/scs_internal:repository.bzl", "scs_internal_repository") # noqa load("//tools/workspace/sdformat_internal:repository.bzl", "sdformat_internal_repository") # noqa load("//tools/workspace/snopt:repository.bzl", "snopt_repository") load("//tools/workspace/spdlog:repository.bzl", "spdlog_repository") load("//tools/workspace/stable_baselines3_internal:repository.bzl", "stable_baselines3_internal_repository") # noqa load("//tools/workspace/statsjs:repository.bzl", "statsjs_repository") load("//tools/workspace/stduuid_internal:repository.bzl", "stduuid_internal_repository") # noqa load("//tools/workspace/styleguide:repository.bzl", "styleguide_repository") load("//tools/workspace/suitesparse_internal:repository.bzl", "suitesparse_internal_repository") # noqa load("//tools/workspace/sympy_py_internal:repository.bzl", "sympy_py_internal_repository") # noqa load("//tools/workspace/tinyobjloader_internal:repository.bzl", "tinyobjloader_internal_repository") # noqa load("//tools/workspace/tinyxml2_internal:repository.bzl", "tinyxml2_internal_repository") # noqa load("//tools/workspace/tomli_internal:repository.bzl", "tomli_internal_repository") # noqa load("//tools/workspace/typing_extensions_internal:repository.bzl", "typing_extensions_internal_repository") # noqa load("//tools/workspace/uritemplate_py_internal:repository.bzl", "uritemplate_py_internal_repository") # noqa load("//tools/workspace/usockets_internal:repository.bzl", "usockets_internal_repository") # noqa load("//tools/workspace/uwebsockets_internal:repository.bzl", "uwebsockets_internal_repository") # noqa load("//tools/workspace/voxelized_geometry_tools:repository.bzl", "voxelized_geometry_tools_repository") # noqa load("//tools/workspace/vtk_internal:repository.bzl", "vtk_internal_repository") # noqa load("//tools/workspace/x11:repository.bzl", "x11_repository") load("//tools/workspace/xmlrunner_py:repository.bzl", "xmlrunner_py_repository") # noqa load("//tools/workspace/yaml_cpp_internal:repository.bzl", "yaml_cpp_internal_repository") # noqa load("//tools/workspace/zlib:repository.bzl", "zlib_repository") def add_default_repositories(excludes = [], mirrors = DEFAULT_MIRRORS): """Declares workspace repositories for all externals needed by drake (other than those built into Bazel, of course). This is intended to be loaded and called from a WORKSPACE file. Args: excludes: list of string names of repositories to exclude; this can be useful if a WORKSPACE file has already supplied its own external of a given name. """ if "abseil_cpp_internal" not in excludes: abseil_cpp_internal_repository(name = "abseil_cpp_internal", mirrors = mirrors) # noqa if "bazelisk" not in excludes: bazelisk_repository(name = "bazelisk", mirrors = mirrors) if "bazel_skylib" not in excludes: bazel_skylib_repository(name = "bazel_skylib", mirrors = mirrors) if "blas" not in excludes: blas_repository(name = "blas") if "build_bazel_apple_support" not in excludes: build_bazel_apple_support_repository(name = "build_bazel_apple_support", mirrors = mirrors) # noqa if "buildifier" not in excludes: buildifier_repository(name = "buildifier", mirrors = mirrors) if "cc" not in excludes: cc_repository(name = "cc") if "ccd_internal" not in excludes: ccd_internal_repository(name = "ccd_internal", mirrors = mirrors) if "clang_cindex_python3_internal" not in excludes: clang_cindex_python3_internal_repository(name = "clang_cindex_python3_internal", mirrors = mirrors) # noqa if "clarabel_cpp_internal" not in excludes: clarabel_cpp_internal_repository(name = "clarabel_cpp_internal", mirrors = mirrors) # noqa if "clp_internal" not in excludes: clp_internal_repository(name = "clp_internal", mirrors = mirrors) if "coinutils_internal" not in excludes: coinutils_internal_repository(name = "coinutils_internal", mirrors = mirrors) # noqa if "com_jidesoft_jide_oss" not in excludes: com_jidesoft_jide_oss_repository(name = "com_jidesoft_jide_oss", mirrors = mirrors) # noqa if "common_robotics_utilities" not in excludes: common_robotics_utilities_repository(name = "common_robotics_utilities", mirrors = mirrors) # noqa if "commons_io" not in excludes: commons_io_repository(name = "commons_io", mirrors = mirrors) if "conex_internal" not in excludes: conex_internal_repository(name = "conex_internal", mirrors = mirrors) if "crate_universe" not in excludes: crate_universe_repositories(mirrors = mirrors, excludes = excludes) if "csdp_internal" not in excludes: csdp_internal_repository(name = "csdp_internal", mirrors = mirrors) if "curl_internal" not in excludes: curl_internal_repository(name = "curl_internal", mirrors = mirrors) if "doxygen" not in excludes: doxygen_repository(name = "doxygen", mirrors = mirrors) if "dm_control_internal" not in excludes: dm_control_internal_repository(name = "dm_control_internal", mirrors = mirrors) # noqa if "drake_models" not in excludes: drake_models_repository(name = "drake_models", mirrors = mirrors) if "eigen" not in excludes: eigen_repository(name = "eigen") if "fcl_internal" not in excludes: fcl_internal_repository(name = "fcl_internal", mirrors = mirrors) if "fmt" not in excludes: fmt_repository(name = "fmt", mirrors = mirrors) if "gflags" not in excludes: gflags_repository(name = "gflags", mirrors = mirrors) if "gfortran" not in excludes: gfortran_repository(name = "gfortran") if "github3_py_internal" not in excludes: github3_py_internal_repository(name = "github3_py_internal", mirrors = mirrors) # noqa if "glib" not in excludes: glib_repository(name = "glib") if "glx" not in excludes: glx_repository(name = "glx") if "googlebenchmark" not in excludes: googlebenchmark_repository(name = "googlebenchmark", mirrors = mirrors) if "gtest" not in excludes: gtest_repository(name = "gtest", mirrors = mirrors) if "gurobi" not in excludes: gurobi_repository(name = "gurobi") if "gz_math_internal" not in excludes: gz_math_internal_repository(name = "gz_math_internal", mirrors = mirrors) # noqa if "gz_utils_internal" not in excludes: gz_utils_internal_repository(name = "gz_utils_internal", mirrors = mirrors) # noqa if "gymnasium_py" not in excludes: gymnasium_py_repository(name = "gymnasium_py", mirrors = mirrors) if "ipopt" not in excludes: ipopt_repository(name = "ipopt") if "ipopt_internal_fromsource" not in excludes: ipopt_internal_fromsource_repository(name = "ipopt_internal_fromsource", mirrors = mirrors) # noqa if "ipopt_internal_pkgconfig" not in excludes: ipopt_internal_pkgconfig_repository(name = "ipopt_internal_pkgconfig") if "lapack" not in excludes: lapack_repository(name = "lapack") if "lcm" not in excludes: lcm_repository(name = "lcm", mirrors = mirrors) if "libblas" not in excludes: libblas_repository(name = "libblas") if "libjpeg_turbo_internal" not in excludes: libjpeg_turbo_internal_repository(name = "libjpeg_turbo_internal", mirrors = mirrors) # noqa if "liblapack" not in excludes: liblapack_repository(name = "liblapack") if "libpfm" not in excludes: libpfm_repository(name = "libpfm") if "libpng_internal" not in excludes: libpng_internal_repository(name = "libpng_internal", mirrors = mirrors) if "libtiff_internal" not in excludes: libtiff_internal_repository(name = "libtiff_internal", mirrors = mirrors) # noqa if "meshcat" not in excludes: meshcat_repository(name = "meshcat", mirrors = mirrors) if "mosek" not in excludes: mosek_repository(name = "mosek", mirrors = mirrors) if "mpmath_py_internal" not in excludes: mpmath_py_internal_repository(name = "mpmath_py_internal", mirrors = mirrors) # noqa if "msgpack_internal" not in excludes: msgpack_internal_repository(name = "msgpack_internal", mirrors = mirrors) # noqa if "mumps_internal" not in excludes: mumps_internal_repository(name = "mumps_internal") if "mypy_extensions_internal" not in excludes: mypy_extensions_internal_repository(name = "mypy_extensions_internal", mirrors = mirrors) # noqa if "mypy_internal" not in excludes: mypy_internal_repository(name = "mypy_internal", mirrors = mirrors) if "nanoflann_internal" not in excludes: nanoflann_internal_repository(name = "nanoflann_internal", mirrors = mirrors) # noqa if "nasm" not in excludes: nasm_repository(name = "nasm") if "net_sf_jchart2d" not in excludes: net_sf_jchart2d_repository(name = "net_sf_jchart2d", mirrors = mirrors) if "nlohmann_internal" not in excludes: nlohmann_internal_repository(name = "nlohmann_internal", mirrors = mirrors) # noqa if "nlopt_internal" not in excludes: nlopt_internal_repository(name = "nlopt_internal", mirrors = mirrors) if "onetbb_internal" not in excludes: onetbb_internal_repository(name = "onetbb_internal", mirrors = mirrors) if "openblas" not in excludes: # The @openblas external is deprecated in Drake's WORKSPACE and will be # removed on or after 2024-09-01. openblas_repository(name = "openblas") if "opencl" not in excludes: opencl_repository(name = "opencl") if "opengl" not in excludes: opengl_repository(name = "opengl") if "openusd_internal" not in excludes: openusd_internal_repository(name = "openusd_internal", mirrors = mirrors) # noqa if "org_apache_xmlgraphics_commons" not in excludes: org_apache_xmlgraphics_commons_repository(name = "org_apache_xmlgraphics_commons", mirrors = mirrors) # noqa if "osqp_internal" not in excludes: osqp_internal_repository(name = "osqp_internal", mirrors = mirrors) if "picosha2_internal" not in excludes: picosha2_internal_repository(name = "picosha2_internal", mirrors = mirrors) # noqa if "platforms" not in excludes: platforms_repository(name = "platforms", mirrors = mirrors) if "pybind11" not in excludes: pybind11_repository(name = "pybind11", mirrors = mirrors) if "pycodestyle" not in excludes: pycodestyle_repository(name = "pycodestyle", mirrors = mirrors) if "python" not in excludes: python_repository(name = "python") if "qdldl_internal" not in excludes: qdldl_internal_repository(name = "qdldl_internal", mirrors = mirrors) if "qhull_internal" not in excludes: qhull_internal_repository(name = "qhull_internal", mirrors = mirrors) if "ros_xacro_internal" not in excludes: ros_xacro_internal_repository(name = "ros_xacro_internal", mirrors = mirrors) # noqa if "rules_license" not in excludes: rules_license_repository(name = "rules_license", mirrors = mirrors) if "rules_python" not in excludes: rules_python_repository(name = "rules_python", mirrors = mirrors) if "rules_rust" not in excludes: rules_rust_repository(name = "rules_rust", mirrors = mirrors) if "rules_rust_tinyjson" not in excludes: rules_rust_tinyjson_repository(name = "rules_rust_tinyjson", mirrors = mirrors) # noqa if "rust_toolchain" not in excludes: rust_toolchain_repositories(mirrors = mirrors, excludes = excludes) if "scs_internal" not in excludes: scs_internal_repository(name = "scs_internal", mirrors = mirrors) if "sdformat_internal" not in excludes: sdformat_internal_repository(name = "sdformat_internal", mirrors = mirrors) # noqa if "snopt" not in excludes: snopt_repository(name = "snopt") if "spdlog" not in excludes: spdlog_repository(name = "spdlog", mirrors = mirrors) if "stable_baselines3_internal" not in excludes: stable_baselines3_internal_repository(name = "stable_baselines3_internal", mirrors = mirrors) # noqa if "statsjs" not in excludes: statsjs_repository(name = "statsjs", mirrors = mirrors) if "stduuid_internal" not in excludes: stduuid_internal_repository(name = "stduuid_internal", mirrors = mirrors) # noqa if "styleguide" not in excludes: styleguide_repository(name = "styleguide", mirrors = mirrors) if "suitesparse_internal" not in excludes: suitesparse_internal_repository(name = "suitesparse_internal", mirrors = mirrors) # noqa if "sympy_py_internal" not in excludes: sympy_py_internal_repository(name = "sympy_py_internal", mirrors = mirrors) # noqa if "tinyobjloader_internal" not in excludes: tinyobjloader_internal_repository(name = "tinyobjloader_internal", mirrors = mirrors) # noqa if "tinyxml2_internal" not in excludes: tinyxml2_internal_repository(name = "tinyxml2_internal", mirrors = mirrors) # noqa if "tomli_internal" not in excludes: tomli_internal_repository(name = "tomli_internal", mirrors = mirrors) if "typing_extensions_internal" not in excludes: typing_extensions_internal_repository(name = "typing_extensions_internal", mirrors = mirrors) # noqa if "uritemplate_py_internal" not in excludes: uritemplate_py_internal_repository(name = "uritemplate_py_internal", mirrors = mirrors) # noqa if "usockets_internal" not in excludes: usockets_internal_repository(name = "usockets_internal", mirrors = mirrors) # noqa if "uwebsockets_internal" not in excludes: uwebsockets_internal_repository(name = "uwebsockets_internal", mirrors = mirrors) # noqa if "voxelized_geometry_tools" not in excludes: voxelized_geometry_tools_repository(name = "voxelized_geometry_tools", mirrors = mirrors) # noqa if "vtk_internal" not in excludes: vtk_internal_repository(name = "vtk_internal", mirrors = mirrors) if "x11" not in excludes: x11_repository(name = "x11") if "xmlrunner_py" not in excludes: xmlrunner_py_repository(name = "xmlrunner_py", mirrors = mirrors) if "yaml_cpp_internal" not in excludes: yaml_cpp_internal_repository(name = "yaml_cpp_internal", mirrors = mirrors) # noqa if "zlib" not in excludes: zlib_repository(name = "zlib") def add_default_toolchains(excludes = []): """Register toolchains for each language (e.g., "py") not explicitly excluded and/or not using an automatically generated toolchain. Args: excludes: List of languages for which a toolchain should not be registered. """ if "py" not in excludes: native.register_toolchains( "//tools/py_toolchain:toolchain", ) if "rust" not in excludes: register_rust_toolchains() def add_default_workspace( repository_excludes = [], toolchain_excludes = [], mirrors = DEFAULT_MIRRORS): """Declare repositories in this WORKSPACE for each dependency of @drake (e.g., "eigen") that is not explicitly excluded, and register toolchains for each language (e.g., "py") not explicitly excluded and/or not using an automatically generated toolchain. Args: repository_excludes: List of repositories that should not be declared in this WORKSPACE. toolchain_excludes: List of languages for which a toolchain should not be registered. mirrors: Dictionary of mirrors from which to download repository files. See mirrors.bzl file in this directory for the file format and default values. """ add_default_repositories(excludes = repository_excludes, mirrors = mirrors) add_default_toolchains(excludes = toolchain_excludes)
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/vendor_cxx_test.py
import unittest from tools.workspace.vendor_cxx import _rewrite_one_text class TestVendorCxx(unittest.TestCase): def setUp(self): # Display all test output. self.maxDiff = None # These are the boilerplate lines that vendor_cxx injects. self._open = 'inline namespace drake_vendor __attribute__ ((visibility ("hidden"))) {' # noqa self._close = '} /* inline namespace drake_vendor */' def _check(self, old_lines, expected_new_lines, inline_namespace=True): """Tests one call to _rewrite_one_text for expected output.""" old_text = '\n'.join(old_lines) + '\n' new_text = _rewrite_one_text(text=old_text, inline_namespace=inline_namespace) expected_new_text = '\n'.join(expected_new_lines) + '\n' self.assertMultiLineEqual(expected_new_text, new_text) def test_without_inline_namespace(self): self._check([ 'namespace foo', '{', '}', ' namespace bar {', '}', ], [ 'namespace foo __attribute__ ((visibility ("hidden")))', '{', '}', 'namespace bar __attribute__ ((visibility ("hidden"))) {', '}', ], inline_namespace=False) def test_comments(self): self._check([ ' // file comment', ' /* block comment', ' continues */', 'int foo(); // eol comment', 'int bar(); /* eol comment */', ' /* intro comment */ class Baz;', ' /* single line c style */', '#include <something.h>', ' /* intro comment that spans multiple lines', ' but continues with code after */ class Quux;', ], [ ' // file comment', ' /* block comment', ' continues */', self._open, 'int foo(); // eol comment', 'int bar(); /* eol comment */', ' /* intro comment */ class Baz;', self._close, ' /* single line c style */', '#include <something.h>', self._open, ' /* intro comment that spans multiple lines', ' but continues with code after */ class Quux;', self._close, ]) def test_preprocessor(self): self._check([ '#define FOO \\', ' foo', # This line looks like code, but it isn't. 'int foo();', '// A no-op preprocessor directive:', '#', ], [ '#define FOO \\', ' foo', self._open, 'int foo();', self._close, '// A no-op preprocessor directive:', '#', ]) def test_include_guard(self): self._check([ '#ifndef FOO_HH', '#define FOO_HH', '#include <something.h>', 'int foo();', '#endif FOO_HH', ], [ '#ifndef FOO_HH', '#define FOO_HH', '#include <something.h>', self._open, 'int foo();', self._close, '#endif FOO_HH', ]) def test_pragma_once(self): self._check([ '#pragma once', '#include <something.h>', 'int foo();', ], [ '#pragma once', '#include <something.h>', self._open, 'int foo();', self._close, ]) def test_extern_c(self): """No namespaces are added for 'extern C' files.""" content = [ '#include "somelib/somefile.h"', '#include <unrelated/thing.h>', 'extern "C" {', 'int foo();', '} // extern C', ] self._check(content, content) assert __name__ == '__main__' unittest.main()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/cmake_configure_file.bzl
load( "//tools/skylark:python_env.bzl", "hermetic_python_env", ) # Defines the implementation actions to cmake_configure_file. def _cmake_configure_file_impl(ctx): if len(ctx.files.srcs) == 0: fail("There must be at least one srcs") if len(ctx.files.srcs) != len(ctx.outputs.outs): fail("The number of srcs and outs must be congruent") arguments = [] for src in ctx.files.srcs: arguments += ["--input", src.path] for out in ctx.outputs.outs: arguments += ["--output", out.path] for item in ctx.attr.defines: arguments += ["-D" + item] for item in ctx.attr.undefines: arguments += ["-U" + item] for item in ctx.files.cmakelists: arguments += ["--cmakelists", item.path] if ctx.attr.autoconf: arguments += ["--autoconf"] if ctx.attr.strict: arguments += ["--strict"] ctx.actions.run( inputs = ctx.files.srcs + ctx.files.cmakelists, outputs = ctx.outputs.outs, arguments = arguments, env = ctx.attr.env, executable = ctx.executable.cmake_configure_file_py, ) return [] # Defines the rule to cmake_configure_file. _cmake_configure_file_gen = rule( attrs = { "srcs": attr.label_list(allow_files = True, mandatory = True), "outs": attr.output_list(mandatory = True), "defines": attr.string_list(), "undefines": attr.string_list(), "cmakelists": attr.label_list(allow_files = True), "autoconf": attr.bool(default = False), "strict": attr.bool(default = False), "cmake_configure_file_py": attr.label( cfg = "host", executable = True, default = Label("//tools/workspace:cmake_configure_file"), ), "env": attr.string_dict( mandatory = True, allow_empty = True, ), }, output_to_genfiles = True, implementation = _cmake_configure_file_impl, ) def cmake_configure_file( name, src = None, out = None, defines = None, undefines = None, cmakelists = None, strict = None, **kwargs): """Creates a rule to generate an out= file from a src= file, using CMake's configure_file substitution semantics. This implementation is incomplete, and may not produce the same result as CMake in all cases. Definitions optionally can be passed in directly as defines= strings (with the usual defines= convention of either a name-only "HAVE_FOO", or a key-value "MYSCALAR=DOUBLE"). Definitions optionally can be read from simple CMakeLists files that contain statements of the form "set(FOO_MAJOR_VERSION 1)" and similar. Variables that are known substitutions but which should be undefined can be passed as undefines= strings. When strict is True, any substitution found in src that is not mentioned by either defines, undefines, or cmakelists is an error. When False, anything not mentioned is silently presumed to be undefined. See cmake_configure_file.py for our implementation of the configure_file substitution rules. The CMake documentation of the configure_file macro is: https://cmake.org/cmake/help/latest/command/configure_file.html """ _cmake_configure_file_gen( name = name, srcs = [src], outs = [out], defines = defines, undefines = undefines, cmakelists = cmakelists, strict = strict, env = hermetic_python_env(), **kwargs ) def cmake_configure_files( name, srcs = None, outs = None, defines = None, undefines = None, cmakelists = None, strict = None, **kwargs): """Like cmake_configure_file(), but with itemwise pairs of srcs => outs, instead of just one pair of src => out. When in strict mode, the defines / undefines must be used by *at least one* of the srcs; only a definition that is unused by all srcs is an error. """ _cmake_configure_file_gen( name = name, srcs = srcs, outs = outs, defines = defines, undefines = undefines, cmakelists = cmakelists, strict = strict, env = hermetic_python_env(), **kwargs ) def autoconf_configure_file( name, src = None, out = None, defines = None, undefines = None, strict = None, **kwargs): """Creates a rule to generate an out= file from a src= file, using autoconf substitution semantics. This implementation is incomplete, and may not produce the same result as autoconf in all cases. Definitions are passed as defines= strings (with the usual convention of either a name-only "HAVE_FOO", or a key-value "MYSCALAR=DOUBLE"). Variables that are known substitutions but which should be undefined can be passed as undefines= strings. When strict is True, any substitution found in src that is not mentioned by either defines or undefines is an error. When False, anything not mentioned is silently presumed to be undefined. """ _cmake_configure_file_gen( name = name, srcs = [src], outs = [out], defines = defines, undefines = undefines, strict = strict, autoconf = True, env = hermetic_python_env(), **kwargs )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/__init__.py
# Empty Python module `__init__`, required to make this a module.
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/forward_files.bzl
def forward_files( srcs = [], strip_prefix = None, dest_prefix = None, visibility = None, tags = []): """Forwards files in `srcs` to be physically present in the current packages. Present implementation simply copies the files. @param srcs List of string, pointing *directly* to files as absolute Bazel target paths. This does NOT resolve relative targets, nor does it unpack filegroup targets `$(locations ...)`. @param strip_prefix String to be stripped from source files. Should include trailing slash. @param dest_prefix String to be prepended to target. @param tags Must always include "manual" at a minimum. Files should only be copied when needed, not as part of `:all`. """ if strip_prefix == None or dest_prefix == None: fail("Must supply `strip_prefix` and `dest_prefix`.") if "manual" not in (tags or []): fail("Must set `tags = [\"manual\"]`") outs = [] for src in srcs: if not src.startswith(strip_prefix): fail("'{}' not under '{}'".format(src, strip_prefix)) out = dest_prefix + src[len(strip_prefix):] native.genrule( name = out + ".forward", srcs = [src], outs = [out], cmd = "cp $< $@", tags = tags, visibility = visibility, ) outs.append(out) return outs
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/metadata.bzl
def generate_repository_metadata(repository_ctx, **kwargs): """Helper function to archive load-phase metadata. Writes a file named drake_repository_metadata.json to the root of an external repository. The contents of the file will a JSON dict of the `kwargs`, with one additional dict entry of name="" using the repository_ctx.name. This information can be loaded later using the tools/workspace/metadata.py library. """ repository_ctx.file( "drake_repository_metadata.json", content = struct( name = repository_ctx.name, **kwargs ).to_json(), executable = False, )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/deprecation.bzl
def _impl(repo_ctx): name = repo_ctx.attr.name date = repo_ctx.attr.date cc_aliases = repo_ctx.attr.cc_aliases py_aliases = repo_ctx.attr.py_aliases aliases = repo_ctx.attr.aliases build = "load(\"@drake//tools/skylark:cc.bzl\", \"cc_library\")\n" build += "load(\"@drake//tools/skylark:py.bzl\", \"py_library\")\n" build = "package(default_visibility = [\"//visibility:public\"])\n" deprecation = "".join([ "DRAKE DEPRECATED: The @{} external is deprecated".format(name), " and will be removed from Drake on or after {}.".format(date), ]) for label, actual in cc_aliases.items(): build += "cc_library({})\n".format(", ".join([ "name = " + repr(label), "deps = [" + repr(actual) + "]", "deprecation = " + repr(deprecation), ])) for label, actual in py_aliases.items(): build += "py_library({})\n".format(", ".join([ "name = " + repr(label), "deps = [" + repr(actual) + "]", "deprecation = " + repr(deprecation), ])) for label, actual in aliases.items(): # Unfortunately, Bazel does not obey `deprecation = ...` on an alias(). build += "alias({})\n".format(", ".join([ "name = " + repr(label), "actual = " + repr(actual), ])) if aliases or (not cc_aliases and not py_aliases): # If there are any targets without a deprecation attribute, or if there # are no targets in the first place, then we must deprecated the entire # BUILD file. build += "print(" + repr(deprecation) + ")\n" repo_ctx.file("BUILD.bazel", build) add_deprecation = repository_rule( doc = """Adds a repository rule with deprecated aliases to other targets. This is particularly useful when renaming an external repository. Example: add_deprecation( name = "qhull", date = "2038-01-19", cc_aliases = {"qhull": "@qhull_internal//:qhull"}, ) """, attrs = { "date": attr.string( doc = "Scheduled removal date of the deprecated target(s).", mandatory = True, ), "cc_aliases": attr.string_dict( doc = """ Optional mapping for cc_library deprecations. The keys are deprecated target names, the values are the non-deprecated labels. """, ), "py_aliases": attr.string_dict( doc = """ Optional mapping for py_library deprecations. The keys are deprecated target names, the values are the non-deprecated labels. """, ), "aliases": attr.string_dict( doc = """ Optional mapping for any other deprecations. The keys are deprecated target names, the values are the non-deprecated labels. Note that (in contrast to the cc or py aliases) these labels do NOT generate deprecation warnings when they are used. Instead, the BUILD file will print a warning when it's loaded, even if none of its targets are used as dependencies. """, ), }, implementation = _impl, )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/generate_export_header.bzl
def _make_identifier(s): result = "" for i in range(len(s)): result += s[i] if s[i].isalnum() else "_" return result # Defines the implementation actions to generate_export_header. def _generate_export_header_impl(ctx): output = ctx.outputs.out guard = _make_identifier(output.basename.upper()) content = [ "#ifndef %s" % guard, "#define %s" % guard, "", "#ifdef %s" % ctx.attr.static_define, "# define %s" % ctx.attr.export_macro_name, "# define %s" % ctx.attr.no_export_macro_name, "#else", "# define %s __attribute__((visibility(\"default\")))" % ctx.attr.export_macro_name, # noqa "# define %s __attribute__((visibility(\"hidden\")))" % ctx.attr.no_export_macro_name, # noqa "#endif", "", "#ifndef %s" % ctx.attr.deprecated_macro_name, "# define %s __attribute__ ((__deprecated__))" % ctx.attr.deprecated_macro_name, # noqa "#endif", "", "#ifndef %s" % ctx.attr.export_deprecated_macro_name, "# define %s %s %s" % (ctx.attr.export_deprecated_macro_name, ctx.attr.export_macro_name, ctx.attr.deprecated_macro_name), # noqa "#endif", "", "#ifndef %s" % ctx.attr.no_export_deprecated_macro_name, "# define %s %s %s" % (ctx.attr.no_export_deprecated_macro_name, ctx.attr.no_export_macro_name, ctx.attr.deprecated_macro_name), # noqa "#endif", "", "#endif", ] ctx.actions.write(output = output, content = "\n".join(content) + "\n") # Defines the rule to generate_export_header. _generate_export_header_gen = rule( attrs = { "out": attr.output(mandatory = True), "export_macro_name": attr.string(), "deprecated_macro_name": attr.string(), "export_deprecated_macro_name": attr.string(), "no_export_macro_name": attr.string(), "no_export_deprecated_macro_name": attr.string(), "static_define": attr.string(), }, output_to_genfiles = True, implementation = _generate_export_header_impl, ) def generate_export_header( lib = None, name = None, out = None, export_macro_name = None, deprecated_macro_name = None, export_deprecated_macro_name = None, no_export_macro_name = None, no_export_deprecated_macro_name = None, static_define = None, **kwargs): """Creates a rule to generate an export header for a named library. This is an incomplete implementation of CMake's generate_export_header. (In particular, it assumes a platform that uses __attribute__((visibility("default"))) to decorate exports.) By default, the rule will have a mangled name related to the library name, and will produce "<lib>_export.h". The CMake documentation of the generate_export_header macro is: https://cmake.org/cmake/help/latest/module/GenerateExportHeader.html """ if name == None: name = "__%s_export_h" % lib if out == None: out = "%s_export.h" % lib if export_macro_name == None: export_macro_name = "%s_EXPORT" % lib.upper() if deprecated_macro_name == None: deprecated_macro_name = "%s_DEPRECATED" % lib.upper() if export_deprecated_macro_name == None: export_deprecated_macro_name = "%s_DEPRECATED_EXPORT" % lib.upper() if no_export_macro_name == None: no_export_macro_name = "%s_NO_EXPORT" % lib.upper() if no_export_deprecated_macro_name == None: no_export_deprecated_macro_name = \ "%s_DEPRECATED_NO_EXPORT" % lib.upper() if static_define == None: static_define = "%s_STATIC_DEFINE" % lib.upper() _generate_export_header_gen( name = name, out = out, export_macro_name = export_macro_name, deprecated_macro_name = deprecated_macro_name, export_deprecated_macro_name = export_deprecated_macro_name, no_export_macro_name = no_export_macro_name, no_export_deprecated_macro_name = no_export_deprecated_macro_name, static_define = static_define, **kwargs )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/which.bzl
load("//tools/workspace:execute.bzl", "path", "which") def _impl(repository_ctx): command = repository_ctx.attr.command additional_paths = repository_ctx.attr.additional_search_paths found_command = which(repository_ctx, command, additional_paths) if found_command: repository_ctx.symlink(found_command, command) else: error_message = "Could not find {} on PATH={}".format( command, path(repository_ctx, additional_paths), ) if repository_ctx.attr.allow_missing: repository_ctx.file(command, "\n".join([ "#!/bin/sh", "echo 'ERROR: {}' 1>&2".format(error_message), "false", ]), executable = True) else: fail(error_message) build_file_content = """# DO NOT EDIT: generated by which_repository() # A symlink to {}. exports_files(["{}"]) {} """.format(found_command, command, repository_ctx.attr.build_epilog) repository_ctx.file( "BUILD.bazel", content = build_file_content, executable = False, ) which_repository = repository_rule( attrs = { "command": attr.string(mandatory = True), "additional_search_paths": attr.string_list(), "allow_missing": attr.bool(default = False), "build_epilog": attr.string(), }, local = True, configure = True, implementation = _impl, ) """Alias the result of $(which $command) into a label @$name//:$command (or @$command if name and command match). The PATH is set according to the path() function in execute.bzl. The value of the user's PATH environment variable is ignored. Changes to any WORKSPACE or BUILD.bazel file will cause this rule to be re-evaluated because it sets its local attribute. However, note that if neither WORKSPACE nor **/BUILD.bazel change, then this rule will not be re-evaluated. This means that adding or removing the presence of `command` on some entry in the PATH (as defined above) will not be accounted for until something else changes. Args: command (:obj:`str`): Short name of command, e.g., "cat". additional_search_paths (:obj:`list` of :obj:`str`): List of additional search paths. allow_missing (:obj:`bool`): When True, errors will end up deferred to build time instead of fetch time -- a failure to find the command will still result in a BUILD.bazel target that provides the command, but the target will be missing. build_epilog: (Optional) Extra text to add to the generated BUILD.bazel. """
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/cmake_configure_file.py
"""A re-implementation of CMake's configure_file substitution semantics. This implementation is incomplete, and may not produce the same result as CMake in all (or even many) cases. The CMake documentation of the configure_file macro is: https://cmake.org/cmake/help/latest/command/configure_file.html """ import argparse import os import re import sys from collections import OrderedDict # Looks like "#cmakedefine VAR ..." or "#cmakedefine01 VAR". _cmakedefine = re.compile(r'^(\s*)#cmakedefine(01)? ([^ \r\n]+)(.*?)([\r\n]+)') # Looks like "@VAR@" or "${VAR}". _varsubst = re.compile(r'^(.*?)(@[^ ]+?@|\$\{[^ ]+?\})(.*)([\r\n]*)') # Transform a source code line per CMake's configure_file semantics. # # The 'definitions' provides values for CMake variables. The dict's keys are # the variable names to substitute, and the dict's values are the values to # substitute. (The values can be None, for known-but-undefined variable keys.) # # The configuration semantics are as follows: # # - An input line 'cmakedefine VAR' turns into '#define VAR VALUE' if and only # if the 'definitions' dict has a non-None value VALUE for VAR, otherwise it # turns into '/* #undef VAR */'. When in strict mode, it is an error if # there is no such key in the dict. # # - An input line 'cmakedefine01 VAR' turns into '#define VAR 1' if and only if # the 'definitions' dict has a non-None value for VAR, otherwise it turns # into '#define VAR 0'. When in strict mode, it is an error if there is no # such key in the dict. # # - An input line with a substitution '@VAR@' or '${VAR}' replaces the # substitution token with the value in 'definitions' dict for that VAR, or # else the empty string if the value is None. It is an error if there is no # such key in the dict. def _transform_cmake(*, line, definitions, strict): used_vars = set() # Replace define statements. match = _cmakedefine.match(line) if match: blank, maybe01, var, rest, newline = match.groups() if var not in definitions: defined = False if strict: raise KeyError(var) else: defined = definitions[var] is not None used_vars.add(var) if maybe01: line = blank + '#define ' + var + [' 0', ' 1'][defined] + newline return line, used_vars elif defined: line = blank + '#define ' + var + rest + newline else: line = blank + '/* #undef ' + var + ' */' + newline return line, used_vars # Replace variable substitutions. while True: match = _varsubst.match(line) if not match: break before, xvarx, after, newline = match.groups() if xvarx[0] == '$': assert len(xvarx) >= 4 assert xvarx[1] == '{' assert xvarx[-1] == '}' var = xvarx[2:-1] elif xvarx[0] == '@': assert len(xvarx) >= 3 assert xvarx[-1] == '@' var = xvarx[1:-1] assert len(var) > 0 if var not in definitions: raise KeyError(var) used_vars.add(var) value = definitions.get(var) if value is None: value = '' line = before + value + after + newline return line, used_vars # Looks like "#undef VAR". _autoconf_undef = re.compile(r'^(\s*)#undef +([^ \r\n]+)([\r\n]+)') # Transform a source code line using autoconf format. # The 'definitions' provides variable values, just like _transform_cmake above. def _transform_autoconf(*, line, definitions, strict): used_vars = set() match = _autoconf_undef.match(line) if match: blank, var, newline = match.groups() if var in definitions: used_vars.add(var) value = definitions[var] if value is not None: line = blank + f'#define {var} {value}' + newline else: line = blank + f'/* undef {var} */' + newline elif strict: raise KeyError(f"Missing define or undefine decision for {var}" " when running in strict=True mode") else: line = blank + f'/* missing {var} */' + newline return line, used_vars # Looks like "set(VAR value)", maybe with an end-of-line comment. _set_var = re.compile(r'^\s*set\s*\(\s*(.+)\s+(.+)\s*\)\s*(#.*)?$') # From a line of CMakeLists.txt, return a set(...) key-value pair, if found. def _extract_definition(line, prior_definitions): match = _set_var.match(line) if not match: return dict() var, value, _ = match.groups() try: value, _ = _transform_cmake( line=value, definitions=prior_definitions, strict=False) except KeyError: return dict() if value.startswith('"'): assert value.endswith('"') value = value[1:-1] return {var: value} # Load our definitions dict, given the command-line args: # - A command-line '-Dfoo' will add ('foo', '1') to the result. # - A command-line '-Dfoo=bar' will add ('foo', 'bar') to the result. # - A command-line '-Ufoo' will add ('foo', None) to the result. def _setup_definitions(args): result = OrderedDict() for item in args.defines: if '=' in item: key, value = item.split('=', 1) result[key] = value else: result[item] = '1' for item in args.undefines: result[item] = None cmakelist_keys = set() for filename in args.cmakelists: with open(filename, 'r') as cmakelist: for line in cmakelist.readlines(): definition = _extract_definition(line, result) result.update(definition) cmakelist_keys.update(definition.keys()) return result, cmakelist_keys def main(): parser = argparse.ArgumentParser() parser.add_argument( '--input', metavar='FILE', action='append', default=[]) parser.add_argument( '--output', metavar='FILE', action='append', default=[]) parser.add_argument( '-D', metavar='NAME', dest='defines', action='append', default=[]) parser.add_argument( '-U', metavar='NAME', dest='undefines', action='append', default=[]) parser.add_argument( '--autoconf', action='store_true', help='The input file is in autoconf format, not cmake format.') parser.add_argument( '--cmakelists', action='append', default=[]) parser.add_argument( '--strict', action='store_true') args = parser.parse_args() if len(args.input) == 0: parser.error("There must be at least one --input") if len(args.input) != len(args.output): parser.error("The number of --input and --output must be congruent") definitions, cmakelist_keys = _setup_definitions(args) transformer = _transform_autoconf if args.autoconf else _transform_cmake total_used_vars = set() missing_vars = set() for input_path, output_path in zip(args.input, args.output): with open(input_path, 'r') as input_file: with open(output_path + '.tmp', 'w') as output_file: for input_line in input_file.readlines(): try: output_line, used_vars = transformer( line=input_line, definitions=definitions, strict=args.strict) output_file.write(output_line) total_used_vars |= used_vars except KeyError as e: missing_vars.add(e.args[0]) if missing_vars: raise RuntimeError(f"The definitions of {sorted(missing_vars)} were" " required, but missing.") unused_vars = definitions.keys() - cmakelist_keys - total_used_vars if unused_vars: raise RuntimeError(f"The definitions of {sorted(unused_vars)} were" " ignored and therefore seem like dead code;" " remove them from defines= or undefines=.") for output_path in args.output: os.rename(output_path + '.tmp', output_path) if __name__ == '__main__': main()
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/execute.bzl
def homebrew_prefix(repo_ctx): """Returns the prefix where Homebrew is expected to be found. Fails when called on a non-macOS platform. """ if repo_ctx.os.name != "mac os x": fail("Not a homebrew OS: " + repo_ctx.os_name) if repo_ctx.os.arch == "x86_64": return "/usr/local" else: return "/opt/homebrew" def path(repo_ctx, additional_search_paths = []): """Return the value of the PATH environment variable that would be used by the which() command.""" search_paths = additional_search_paths # N.B. Ensure ${PATH} in each platform `tools/*.bazelrc` matches these # paths. if repo_ctx.os.name == "mac os x": search_paths = search_paths + [homebrew_prefix(repo_ctx) + "/bin"] search_paths = search_paths + ["/usr/bin", "/bin"] if repo_ctx.os.name == "mac os x": search_paths = search_paths + ["/usr/sbin", "/sbin"] return ":".join(search_paths) def which(repo_ctx, program, additional_search_paths = []): """Return the path of the given program or None if there is no such program in the PATH as defined by the path() function above. The value of the user's PATH environment variable is ignored. """ exec_result = repo_ctx.execute(["which", program], environment = { "PATH": path(repo_ctx, additional_search_paths), }) if exec_result.return_code != 0: return None return repo_ctx.path(exec_result.stdout.strip()) def execute_and_return( repo_ctx, command, additional_search_paths = [], **kwargs): """Runs the `command` (list) and returns a status value. The return value is a struct with a field `error` that will be None on success or else a detailed message on command failure. """ if "/" in str(command[0]): program = command[0] else: program = which(repo_ctx, command[0], additional_search_paths) if not program: error = "Could not find a program named '{}'".format( command[0], ) return struct(error = error) exec_result = repo_ctx.execute([program] + command[1:], **kwargs) if exec_result.return_code == 0: error = None else: error = "Failure running " + ( " ".join(["'{}'".format(x) for x in command]) ) if exec_result.stdout: error += "\n" + exec_result.stdout if exec_result.stderr: error += "\n" + exec_result.stderr return struct( error = error, stdout = exec_result.stdout, ) def execute_or_fail(repo_ctx, command, **kwargs): """Runs the `command` (list) and immediately fails on any error. Returns a struct with the stdout value.""" result = execute_and_return(repo_ctx, command, **kwargs) if result.error: fail("Unable to complete setup for @{} repository: {}".format( repo_ctx.name, result.error, )) return result
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/vendor_cxx.bzl
load("//tools/skylark:cc.bzl", "cc_library") load("//tools/workspace:generate_file.bzl", "generate_file") def cc_library_vendored( name, hdrs = None, hdrs_vendored = None, srcs = None, srcs_vendored = None, vendor_tool_args = None, **kwargs): """ Compiles a third-party C++ library using altered include paths and namespaces so that it will not interfere with co-habitating builds of the same library by others. The lists of hdrs and hdrs_vendored paths must be equal in length and correspond as elementwise pairs. The hdrs gives the list of library header file paths as found in the third-party source layout; the hdrs_vendored gives the list of header file paths to use for Drake's vendored build. Typically we will prefix "drake_vendor/" to the path. The lists of srcs and srcs_vendored paths must be equal in length and correspond as elementwise pairs. The srcs gives the list of library source file paths as found in the third-party source layout; the srcs_vendored gives the list of source file paths to use for Drake's vendored build. """ hdrs = hdrs or [] hdrs_vendored = hdrs_vendored or [] srcs = srcs or [] srcs_vendored = srcs_vendored or [] if len(hdrs) != len(hdrs_vendored): fail("The hdrs= and hdrs_vendored= list lengths must match") if len(srcs) != len(srcs_vendored): fail("The srcs= and srcs_vendored= list lengths must match") native.genrule( name = "_{}_vendoring".format(name), srcs = hdrs + srcs, outs = hdrs_vendored + srcs_vendored, cmd = " ".join([ "$(execpath @drake//tools/workspace:vendor_cxx)", ] + (vendor_tool_args or []) + [ "$(execpath {}):$(execpath {})".format(old, new) for old, new in (zip(hdrs, hdrs_vendored) + zip(srcs, srcs_vendored)) ]), tools = ["@drake//tools/workspace:vendor_cxx"], tags = ["manual"], visibility = ["//visibility:private"], ) cc_library( name = name, hdrs = hdrs_vendored, srcs = srcs_vendored, **kwargs ) def generate_vendor_patch(name, srcs, srcs_vendored, extra_prologue = None): """ Creates a patch file named `name` that encompasses the differences beteween `srcs` and `srcs_vendored`. """ if len(srcs) != len(srcs_vendored): fail("The srcs= and srcs_vendored= list lengths must match") for upstream_src, vendor_src in zip(srcs, srcs_vendored): native.genrule( name = "_generate_{}.patch".format(vendor_src), srcs = [upstream_src, vendor_src], outs = [vendor_src + ".patch"], cmd = " ".join([ "(diff -U0", "--label={upstream_src} $(execpath {upstream_src})", "--label={vendor_src} $(execpath {vendor_src})", "> $@ || [[ $$? == 1 ]])", ]).format( upstream_src = upstream_src, vendor_src = vendor_src, ), ) prologue = """ This patch was automatically generated by Drake's vendor_cxx.py tool. It shows the edits made by Drake's build system to adjust the namespace of the software. """ + (extra_prologue or "") generate_file( name = "_prologue_{}".format(name), content = prologue.strip() + "\n\n", ) native.genrule( name = "_generate_{}".format(name), srcs = [ ":_prologue_{}".format(name), ] + [ vendor_src + ".patch" for vendor_src in srcs_vendored ], outs = [name], cmd = "cat $(SRCS) > $@", )
0
/home/johnshepherd/drake/tools
/home/johnshepherd/drake/tools/workspace/generate_include_header.bzl
load("//tools/skylark:pathutils.bzl", "output_path") # Generate a header that includes a set of other headers def _generate_include_header_impl(ctx): # Collect list of headers hdrs = [] for h in ctx.attr.hdrs: for f in h.files.to_list(): hdrs.append(output_path(ctx, f, ctx.attr.strip_prefix)) # Generate include header content = "#pragma once\n" content = content + "\n".join(["#include <%s>" % h for h in hdrs]) ctx.actions.write(output = ctx.outputs.out, content = content) drake_generate_include_header = rule( attrs = { "hdrs": attr.label_list(allow_files = True), "strip_prefix": attr.string_list(default = ["**/include/"]), "out": attr.output(mandatory = True), }, output_to_genfiles = True, implementation = _generate_include_header_impl, ) """Generate a header that includes a set of other headers. This creates a rule to generate a header that includes a list of other headers. The generated file will be of the form:: #include <hdr> #include <hdr> Args: hdrs (:obj:`str`): List of files or file labels of headers that the generated header will include. strip_prefix (:obj:`list` of :obj:`str`): List of prefixes to strip from the header names when forming the ``#include`` directives. """
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/googlebenchmark/BUILD.bazel
# This file exists to make our directory into a Bazel package, so that our # neighboring *.bzl file can be loaded elsewhere. load("//tools/lint:lint.bzl", "add_lint_tests") add_lint_tests()
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/googlebenchmark/repository.bzl
load("//tools/workspace:github.bzl", "github_archive") def googlebenchmark_repository( name, mirrors = None): github_archive( name = name, repository = "google/benchmark", commit = "v1.8.3", sha256 = "6bc180a57d23d4d9515519f92b0c83d61b05b5bab188961f36ac7b06b0d9e9ce", # noqa mirrors = mirrors, patches = [ ":patches/console_allocs.patch", ":patches/remove_overloaded_fixture_set_up.patch", ":patches/string_precision.patch", ], )
0
/home/johnshepherd/drake/tools/workspace/googlebenchmark
/home/johnshepherd/drake/tools/workspace/googlebenchmark/patches/console_allocs.patch
If a memory manager is installed then shows allocs in the summary table. --- src/console_reporter.cc +++ src/console_reporter.cc @@ -53,9 +53,11 @@ bool ConsoleReporter::ReportContext(const Context& context) { } void ConsoleReporter::PrintHeader(const Run& run) { + const bool show_allocs = (run.memory_result != nullptr); std::string str = - FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); + FormatString("%-*s %13s %15s %*s%12s", static_cast<int>(name_field_width_), + "Benchmark", "Time", "CPU", show_allocs ? 10 : 0, + show_allocs ? "Allocs " : "", "Iterations"); if (!run.counters.empty()) { if (output_options_ & OO_Tabular) { for (auto const& c : run.counters) { @@ -163,6 +165,11 @@ void ConsoleReporter::PrintRunData(const Run& result) { } if (!result.report_big_o && !result.report_rms) { + const bool show_allocs = (result.memory_result != nullptr); + if (show_allocs) { + const std::string s = HumanReadableNumber(result.allocs_per_iter, Counter::kIs1000); + printer(Out, COLOR_YELLOW, "%7s ", s.c_str()); + } printer(Out, COLOR_CYAN, "%10lld", result.iterations); }
0
/home/johnshepherd/drake/tools/workspace/googlebenchmark
/home/johnshepherd/drake/tools/workspace/googlebenchmark/patches/string_precision.patch
Actually respect the requested human-readable precision. --- src/string_util.cc +++ src/string_util.cc @@ -55,6 +61,8 @@ void ToExponentAndMantissa(double val, double thresh, int precision, for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { scaled /= one_k; if (scaled <= big_threshold) { + mantissa_stream << std::fixed; + mantissa_stream.precision(precision); mantissa_stream << scaled; *exponent = i + 1; *mantissa = mantissa_stream.str();
0
/home/johnshepherd/drake/tools/workspace/googlebenchmark
/home/johnshepherd/drake/tools/workspace/googlebenchmark/patches/remove_overloaded_fixture_set_up.patch
Avoid GCC warnings about overloaded virtuals We only need the mutable spelling for Drake code. --- include/benchmark/benchmark.h +++ include/benchmark/benchmark.h @@ -1425,12 +1425,8 @@ this->TearDown(st); } - // These will be deprecated ... - virtual void SetUp(const State&) {} - virtual void TearDown(const State&) {} - // ... In favor of these. - virtual void SetUp(State& st) { SetUp(const_cast<const State&>(st)); } - virtual void TearDown(State& st) { TearDown(const_cast<const State&>(st)); } + virtual void SetUp(State&) {} + virtual void TearDown(State&) {} protected: virtual void BenchmarkCase(State&) = 0;
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/osqp_internal/BUILD.bazel
load("//tools/lint:lint.bzl", "add_lint_tests") add_lint_tests()
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/osqp_internal/package.BUILD.bazel
# -*- bazel -*- load("@drake//tools/install:install.bzl", "install") load("@drake//tools/skylark:cc.bzl", "cc_library") load( "@drake//tools/workspace:cmake_configure_file.bzl", "cmake_configure_file", ) licenses(["notice"]) # Apache-2.0 package( default_visibility = ["//visibility:public"], ) # Generates osqp_configure.h based on the defines= we want in Drake. cmake_configure_file( name = "configure_file", src = "configure/osqp_configure.h.in", out = "include/osqp_configure.h", defines = [ "PRINTING", "PROFILING", # Keep the default primitive size of `double` and `int`. Don't define # 'DFLOAT' nor 'DLONG' unless @qdldl_internal and @scs_internal are # also changed to use those primitive sizes. # See drake/tools/workspace/qdldl_internal/README.md. ] + select({ "@drake//tools/cc_toolchain:apple": [ "IS_MAC", ], "@drake//tools/cc_toolchain:linux": [ "IS_LINUX", ], "//conditions:default": [], }), undefines = [ "OSQP_CUSTOM_MEMORY", ], visibility = ["//visibility:private"], ) cc_library( name = "osqp", # Our hdrs match OSQP's ./include/CMakeLists.txt variable `osqp_headers`. hdrs = [ # These headers are always enabled in CMakeLists. "include/auxil.h", "include/constants.h", "include/error.h", "include/glob_opts.h", "include/lin_alg.h", "include/osqp.h", "include/osqp_configure.h", "include/proj.h", "include/scaling.h", "include/types.h", "include/util.h", "include/version.h", # These headers are enabled in CMakeLists only when building in normal # mode. "include/kkt.h", # These headers are enabled in CMakeLists only when building in # non-embedded mode. "include/cs.h", "include/polish.h", "include/lin_sys.h", ], srcs = [ # From ./lin_sys/direct/qdldl/CMakeLists.txt at `qdldl_interface_src`. "lin_sys/direct/qdldl/qdldl_interface.h", "lin_sys/direct/qdldl/qdldl_interface.c", # From ./src/CMakeLists.txt at `osqp_src`. # These sources are always enabled in CMakeLists. "src/auxil.c", "src/error.c", "src/lin_alg.c", "src/osqp.c", "src/proj.c", "src/scaling.c", "src/util.c", # These sources are enabled in CMakeLists only when building in normal # mode. "src/kkt.c", # These sources are enabled in CMakeLists only when building in # non-embedded mode. "src/cs.c", "src/polish.c", "src/lin_sys.c", ], includes = [ "include", "lin_sys/direct/qdldl", ], copts = [ "-fvisibility=hidden", "-w", "-Werror=incompatible-pointer-types", ], linkstatic = 1, deps = [ "@qdldl_internal//:qdldl", "@suitesparse_internal//:amd", ], ) install( name = "install", docs = ["LICENSE"], doc_strip_prefix = ["lin_sys/direct/qdldl"], )
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/osqp_internal/repository.bzl
load("//tools/workspace:github.bzl", "github_archive") def osqp_internal_repository( name, mirrors = None): github_archive( name = name, repository = "osqp/osqp", upgrade_advice = """ When updating this commit, see drake/tools/workspace/qdldl/README.md. """, commit = "v0.6.3", sha256 = "a6b4148019001f87489c27232e2bdbac37c94f38fa37c1b4ee11eaa5654756d2", # noqa build_file = ":package.BUILD.bazel", mirrors = mirrors, )
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/BUILD.bazel
load("//tools/lint:lint.bzl", "add_lint_tests") load("//tools/skylark:drake_cc.bzl", "drake_cc_library") load("//tools/skylark:drake_py.bzl", "drake_py_binary", "drake_py_unittest") drake_cc_library( name = "serialize", hdrs = [":serialize.h"], internal = True, visibility = ["//solvers:__pkg__"], deps = [ "//common:name_value", ], ) drake_py_binary( name = "gen_serialize", srcs = ["gen_serialize.py"], data = [ "@clarabel_cpp_internal//:include/cpp/DefaultSettings.h", ], tags = ["manual"], deps = [ "@rules_python//python/runfiles", ], ) genrule( name = "gen_serialize_h", outs = ["gen_serialize.h"], cmd = "$(execpath :gen_serialize) --output=$@", tags = ["manual"], tools = [":gen_serialize"], ) drake_py_unittest( name = "lint_test", data = [ ":gen_serialize.h", ":serialize.h", ], tags = ["lint"], ) add_lint_tests()
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/package.BUILD.bazel
# -*- bazel -*- load("@drake//tools/install:install.bzl", "install") load("@drake//tools/skylark:cc.bzl", "cc_library") load("@drake//tools/skylark:cc_hidden.bzl", "cc_wrap_static_archive_hidden") load("@drake//tools/workspace:vendor_cxx.bzl", "cc_library_vendored") load("@drake//tools/workspace/crate_universe:defs.bzl", "all_crate_deps") load("@rules_rust//rust:defs.bzl", "rust_static_library") package(default_visibility = ["//visibility:private"]) licenses(["notice"]) # Apache-2.0 exports_files([ "LICENSE.md", "include/cpp/DefaultSettings.h", "rust_wrapper/Cargo.toml", ]) _HDRS = glob(["include/**"], allow_empty = False) cc_library_vendored( name = "hdrs", hdrs = _HDRS, hdrs_vendored = [ x.replace("include/", "drake_hdr/") for x in _HDRS ], defines = [ "FEATURE_SDP", ], includes = ["drake_hdr"], linkstatic = True, ) rust_static_library( name = "clarabel_cpp_rust_wrapper", edition = "2021", srcs = glob(["rust_wrapper/src/**/*.rs"], allow_empty = False), crate_features = ["sdp"], deps = all_crate_deps(), ) cc_wrap_static_archive_hidden( name = "clarabel_cpp_rust_wrapper_hidden", static_archive_name = "clarabel_cpp_rust_wrapper", ) # Combine the public headers with the object code. cc_library( name = "clarabel_cpp", linkstatic = True, deps = [ ":hdrs", ":clarabel_cpp_rust_wrapper_hidden", "@blas", "@eigen", "@lapack", ], visibility = ["//visibility:public"], ) install( name = "install", docs = ["LICENSE.md"], visibility = ["//visibility:public"], )
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/repository.bzl
load("@drake//tools/workspace:github.bzl", "github_archive") def clarabel_cpp_internal_repository( name, mirrors = None): github_archive( name = name, # This dependency is part of a "cohort" defined in # drake/tools/workspace/new_release.py. When practical, all members # of this cohort should be updated at the same time. repository = "oxfordcontrol/Clarabel.cpp", commit = "v0.6.0", sha256 = "281b1cbbe7e15520ad17a74d91f0ef9d83161ee79c0ff1954187f78c4516c8ec", # noqa build_file = ":package.BUILD.bazel", patches = [ ":patches/extern_c.patch", ":patches/git_submodule.patch", ":patches/sdp.patch", ":patches/unicode.patch", ], mirrors = mirrors, )
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/gen_serialize.py
""" Generates the serialize.h header file, containing Clarabel's settings names. """ import argparse from pathlib import Path from python import runfiles _PROLOGUE = """\ #pragma once #include "drake/common/name_value.h" // This file helps Drake's solvers/clarabel_solver.cc set the Clarabel options. // // It is committed to source control to simplify the build process, but can be // mechanically regenerated by running the `gen_serialize` program. A linter // checks that the committed code matches what would be regenerated. namespace clarabel { template <typename Archive> // NOLINTNEXTLINE(runtime/references) void Serialize(Archive* a, DefaultSettings<double>& settings) { #define DRAKE_VISIT(x) a->Visit(drake::MakeNameValue(#x, &(settings.x))) """ _EPILOGUE = """\ #undef DRAKE_VISIT } } // namespace clarabel """ def _settings_names(): """Returns the list of names of Clarabel.cpp's settings.""" # Read the DefaultSettings.h header. manifest = runfiles.Create() headers_dir = "clarabel_cpp_internal/include/cpp" header = manifest.Rlocation(f"{headers_dir}/DefaultSettings.h") with open(header) as f: text = f.read() # Strip away the parts we don't need. needle = "struct DefaultSettings\n{" index = text.find(needle) assert index > 0 text = text[index + len(needle):] needle = "}" index = text.find(needle) assert index > 0 text = text[:index] # Parse the contents of the struct. for line in text.splitlines(): line = line.strip() if not line: continue if line.startswith("static"): continue assert line.endswith(";") line = line[:-1] assert line.count(" ") == 1 _, name = line.split() yield name def _create_header_text(): result = _PROLOGUE for name in _settings_names(): result += f" DRAKE_VISIT({name});\n" result += _EPILOGUE return result def _main(): parser = argparse.ArgumentParser() parser.add_argument("--output", metavar="FILE", required=True) args = parser.parse_args() text = _create_header_text() with open(args.output, "w", encoding="utf-8") as f: f.write(text) assert __name__ == "__main__" _main()
0
/home/johnshepherd/drake/tools/workspace
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/serialize.h
#pragma once #include "drake/common/name_value.h" // This file helps Drake's solvers/clarabel_solver.cc set the Clarabel options. // // It is committed to source control to simplify the build process, but can be // mechanically regenerated by running the `gen_serialize` program. A linter // checks that the committed code matches what would be regenerated. namespace clarabel { template <typename Archive> // NOLINTNEXTLINE(runtime/references) void Serialize(Archive* a, DefaultSettings<double>& settings) { #define DRAKE_VISIT(x) a->Visit(drake::MakeNameValue(#x, &(settings.x))) DRAKE_VISIT(max_iter); DRAKE_VISIT(time_limit); DRAKE_VISIT(verbose); DRAKE_VISIT(max_step_fraction); DRAKE_VISIT(tol_gap_abs); DRAKE_VISIT(tol_gap_rel); DRAKE_VISIT(tol_feas); DRAKE_VISIT(tol_infeas_abs); DRAKE_VISIT(tol_infeas_rel); DRAKE_VISIT(tol_ktratio); DRAKE_VISIT(reduced_tol_gap_abs); DRAKE_VISIT(reduced_tol_gap_rel); DRAKE_VISIT(reduced_tol_feas); DRAKE_VISIT(reduced_tol_infeas_abs); DRAKE_VISIT(reduced_tol_infeas_rel); DRAKE_VISIT(reduced_tol_ktratio); DRAKE_VISIT(equilibrate_enable); DRAKE_VISIT(equilibrate_max_iter); DRAKE_VISIT(equilibrate_min_scaling); DRAKE_VISIT(equilibrate_max_scaling); DRAKE_VISIT(linesearch_backtrack_step); DRAKE_VISIT(min_switch_step_length); DRAKE_VISIT(min_terminate_step_length); DRAKE_VISIT(direct_kkt_solver); DRAKE_VISIT(direct_solve_method); DRAKE_VISIT(static_regularization_enable); DRAKE_VISIT(static_regularization_constant); DRAKE_VISIT(static_regularization_proportional); DRAKE_VISIT(dynamic_regularization_enable); DRAKE_VISIT(dynamic_regularization_eps); DRAKE_VISIT(dynamic_regularization_delta); DRAKE_VISIT(iterative_refinement_enable); DRAKE_VISIT(iterative_refinement_reltol); DRAKE_VISIT(iterative_refinement_abstol); DRAKE_VISIT(iterative_refinement_max_iter); DRAKE_VISIT(iterative_refinement_stop_ratio); DRAKE_VISIT(presolve_enable); #undef DRAKE_VISIT } } // namespace clarabel
0
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/patches/unicode.patch
[Clarabel.cpp] Remove Unicode characters Cherry-pick of https://github.com/oxfordcontrol/Clarabel.cpp/pull/29. We can drop this Drake patch when upstream merges it. diff --git a/include/c/DefaultInfo.h b/include/c/DefaultInfo.h --- include/c/DefaultInfo.h +++ include/c/DefaultInfo.h @@ -9,7 +9,7 @@ // ClarabelDefaultInfo types typedef struct ClarabelDefaultInfo_f64 { - double μ; + double mu; double sigma; double step_length; uint32_t iterations; @@ -28,7 +28,7 @@ typedef struct ClarabelDefaultInfo_f64 typedef struct ClarabelDefaultInfo_f32 { - float μ; + float mu; float sigma; float step_length; uint32_t iterations; --- include/cpp/DefaultInfo.h +++ include/cpp/DefaultInfo.h @@ -13,7 +13,7 @@ struct DefaultInfo { static_assert(std::is_same<T, float>::value || std::is_same<T, double>::value, "T must be float or double"); - T μ; + T mu; T sigma; T step_length; uint32_t iterations;
0
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/patches/extern_c.patch
[Clarabel.cpp] Use per-function extern annotations (instead of grouping) Drake's vendor_cxx tool doesn't know how to parse `extern "C" { ... }` groupings. Work around that by marking all of the individual functions one by one. We should improve vendor_cxx to handle the original file (e.g., rather than trying to upstream this patch). --- include/cpp/DefaultSettings.h +++ include/cpp/DefaultSettings.h @@ -300,10 +300,10 @@ class DefaultSettingsBuilder } }; -extern "C" { +extern "C" DefaultSettings<double> clarabel_DefaultSettings_f64_default(); +extern "C" DefaultSettings<float> clarabel_DefaultSettings_f32_default(); -} template<> inline DefaultSettings<double> DefaultSettings<double>::default_settings() --- include/cpp/DefaultSolver.h +++ include/cpp/DefaultSolver.h @@ -130,7 +130,7 @@ struct DefaultSolver<T>::ConvertedCscMatrix } }; -extern "C" { +extern "C" RustDefaultSolverHandle_f64 clarabel_DefaultSolver_f64_new(const CscMatrix<double> *P, const double *q, const CscMatrix<double> *A, @@ -139,6 +139,7 @@ RustDefaultSolverHandle_f64 clarabel_DefaultSolver_f64_new(const CscMatrix<doubl const SupportedConeT<double> *cones, const DefaultSettings<double> *settings); +extern "C" RustDefaultSolverHandle_f32 clarabel_DefaultSolver_f32_new(const CscMatrix<float> *P, const float *q, const CscMatrix<float> *A, @@ -147,23 +148,30 @@ RustDefaultSolverHandle_f32 clarabel_DefaultSolver_f32_new(const CscMatrix<float const SupportedConeT<float> *cones, const DefaultSettings<float> *settings); +extern "C" void clarabel_DefaultSolver_f64_solve(RustDefaultSolverHandle_f64 solver); +extern "C" void clarabel_DefaultSolver_f32_solve(RustDefaultSolverHandle_f32 solver); +extern "C" void clarabel_DefaultSolver_f64_free(RustDefaultSolverHandle_f64 solver); +extern "C" void clarabel_DefaultSolver_f32_free(RustDefaultSolverHandle_f32 solver); +extern "C" DefaultSolution<double>::ClarabelDefaultSolution clarabel_DefaultSolver_f64_solution(RustDefaultSolverHandle_f64 solver); +extern "C" DefaultSolution<float>::ClarabelDefaultSolution clarabel_DefaultSolver_f32_solution(RustDefaultSolverHandle_f32 solver); +extern "C" DefaultInfo<double> clarabel_DefaultSolver_f64_info(RustDefaultSolverHandle_f64 solver); +extern "C" DefaultInfo<float> clarabel_DefaultSolver_f32_info(RustDefaultSolverHandle_f32 solver); -} // Convert unique_ptr P, A to CscMatrix objects, then init the solver // The CscMatrix objects are only used to pass the information needed to Rust.
0
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/patches/git_submodule.patch
[Clarabel.cpp] Pin Clarabel to the Crate (not the git submodule) --- rust_wrapper/Cargo.toml +++ rust_wrapper/Cargo.toml @@ -5,4 +5,4 @@ [dependencies] -clarabel = { path = "../Clarabel.rs" } +clarabel = "0.7.1"
0
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/patches/sdp.patch
[Clarabel.cpp] Enable the SDP feature by default --- rust_wrapper/Cargo.toml +++ rust_wrapper/Cargo.toml @@ -14,9 +14,10 @@ opt-level = 3 lto = true codegen-units = 1 [features] +default = ["sdp"] # Define features for SDP support in Clarabel.rs -sdp = [] +sdp = ["clarabel/sdp"] sdp-accelerate = ["sdp", "clarabel/sdp", "clarabel/sdp-accelerate"] sdp-netlib = ["sdp", "clarabel/sdp", "clarabel/sdp-netlib"] sdp-openblas = ["sdp", "clarabel/sdp", "clarabel/sdp-openblas"]
0
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal
/home/johnshepherd/drake/tools/workspace/clarabel_cpp_internal/test/lint_test.py
import unittest from pathlib import Path class ClarabelCppInternalLintTest(unittest.TestCase): def _read(self, filename): """Returns the contents of the given filename.""" with open(filename, encoding="utf-8") as f: return f.read() def test_serialize_header(self): """Checks that serialize.h matches the desired gen_serialize.h.""" mydir = Path("tools/workspace/clarabel_cpp_internal") actual = self._read(mydir / "serialize.h") expected = self._read(mydir / "gen_serialize.h") self.maxDiff = None # If the following check fails, run this command (from the root of a # Drake checkout) to re-generate the header file: # # bazel run //tools/workspace/clarabel_cpp_internal:gen_serialize -- \ # --output=$(pwd)/tools/workspace/clarabel_cpp_internal/serialize.h # self.assertMultiLineEqual(expected, actual)
0