From 27bef09a67afa6a6ffd49a66c6b4da153162d0f6 Mon Sep 17 00:00:00 2001 From: Stone Tao Date: Thu, 11 Jan 2024 16:51:54 -0800 Subject: [PATCH] merge updates --- README.md | 2 +- docs/source/concepts/observation.md | 2 +- examples/profile_speed/README.md | 22 -- examples/profile_speed/profile_dmcontrol.py | 34 --- examples/profile_speed/profile_sapien.py | 270 ------------------ examples/submission/Dockerfile | 8 - examples/submission/user_solution.py | 14 - mani_skill2/envs/misc/avoid_obstacles.py | 3 +- .../envs/pick_and_place/pick_single.py | 95 +----- mani_skill2/envs/sapien_env.py | 1 + mani_skill2/examples/motionplanning/README.md | 4 +- .../motionplanning/grasp_pose/gen_ycb.py | 72 +++++ .../motionplanning/grasp_pose/utils.py | 258 +++++++++++++++++ .../examples/motionplanning/motionplanner.py | 76 +++-- .../motionplanning/ms2_tasks/lift_cube.py | 84 ++++++ .../ms2_tasks/panda_avoid_obstacles.py | 82 ++++++ .../peg_insertion_side.py} | 57 ++-- .../motionplanning/ms2_tasks/pick_clutter.py | 159 +++++++++++ .../motionplanning/ms2_tasks/pick_cube.py | 84 ++++++ .../motionplanning/ms2_tasks/plug_charger.py | 109 +++++++ .../motionplanning/ms2_tasks/stack_cube.py | 117 ++++++++ .../examples/motionplanning/record_demos.py | 118 ++++++++ mani_skill2/utils/download_asset.py | 11 +- mani_skill2/utils/download_demo.py | 8 - .../utils/scene_builder/ai2thor/constants.py | 4 + .../scene_builder/ai2thor/scene_builder.py | 20 +- mani_skill2/utils/wrappers/record.py | 1 + setup.py | 2 +- 28 files changed, 1200 insertions(+), 517 deletions(-) delete mode 100644 examples/profile_speed/README.md delete mode 100644 examples/profile_speed/profile_dmcontrol.py delete mode 100644 examples/profile_speed/profile_sapien.py delete mode 100644 examples/submission/Dockerfile delete mode 100644 examples/submission/user_solution.py create mode 100644 mani_skill2/examples/motionplanning/grasp_pose/gen_ycb.py create mode 100644 mani_skill2/examples/motionplanning/grasp_pose/utils.py create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/lift_cube.py create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/panda_avoid_obstacles.py rename mani_skill2/examples/motionplanning/{mplib_peg_insertion_side.py => ms2_tasks/peg_insertion_side.py} (68%) create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/pick_clutter.py create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/pick_cube.py create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/plug_charger.py create mode 100644 mani_skill2/examples/motionplanning/ms2_tasks/stack_cube.py create mode 100644 mani_skill2/examples/motionplanning/record_demos.py diff --git a/README.md b/README.md index b625bbc2..5e733a15 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ From pip: ```bash # NOTE: This is a beta build at the moment. To install the beta build run -pip install mani-skill2==0.6.0.dev2 +pip install mani-skill2==0.6.0.dev3 ``` From github: diff --git a/docs/source/concepts/observation.md b/docs/source/concepts/observation.md index 8da3b4ab..3d440f5b 100644 --- a/docs/source/concepts/observation.md +++ b/docs/source/concepts/observation.md @@ -96,7 +96,7 @@ For `obs_mode="pointcloud"`: An "actor" is a fundamental object that represents a physical entity (rigid body) that can be simulated in SAPIEN (the backend of ManiSkill2). An articulated object is a collection of links interconnected by joints, and each link is also an actor. In SAPIEN, `scene.get_all_actors()` will return all the actors that are not links of articulated objects. The examples are the ground, the cube in [PickCube](./environments.md#pickcube-v0), and the YCB objects in [PickSingleYCB](./environments.md#picksingleycb-v0). `scene.get_all_articulations()` will return all the articulations. The examples are the robots, the cabinets in [OpenCabinetDoor](./environments.md#opencabinetdoor-v1), and the chairs in [PushChair](./environments.md#pushchair-v1). Below is an example of how to get actors and articulations in SAPIEN. ```python -import sapien.core as sapien +import sapien scene: sapien.Scene = ... actors = scene.get_all_actors() # actors excluding links diff --git a/examples/profile_speed/README.md b/examples/profile_speed/README.md deleted file mode 100644 index e3956841..00000000 --- a/examples/profile_speed/README.md +++ /dev/null @@ -1,22 +0,0 @@ -``` -CPU: Intel(R) Core(TM) i9-7960X CPU @ 2.80GHz -GPU: NVIDIA GeForce GTX 1080 Ti -``` - -``` -python profile_sapien.py - -Num steps: 10000 -Simulation time: 0.8207485675811768 FPS: 12183.999333036842 -Render time: 4.390702962875366 FPS: 2277.53962965676 -``` - -To install DMControl, run `pip install dm-control==1.0.4` - -``` -python profile_dmcontrol.py - -Num steps: 10000 -Simulation time: 1.160520315170288 FPS: 8616.82459951824 -Render time: 19.261722803115845 FPS: 519.1643604372899 -``` \ No newline at end of file diff --git a/examples/profile_speed/profile_dmcontrol.py b/examples/profile_speed/profile_dmcontrol.py deleted file mode 100644 index 330f395b..00000000 --- a/examples/profile_speed/profile_dmcontrol.py +++ /dev/null @@ -1,34 +0,0 @@ -import time - -import matplotlib.pyplot as plt -import numpy as np -import PIL.Image as Image -import tqdm -from dm_control import suite - -num_steps = 10000 - -width = 84 -height = 84 -images = [] - -env = suite.load(domain_name="cartpole", task_name="swingup_sparse") - -action_spec = env.action_spec() -time_step = env.reset() - -sim_time, render_time = 0, 0 -for i in range(num_steps): - start = time.time() - action = np.random.uniform( - action_spec.minimum, action_spec.maximum, size=action_spec.shape - ) - sim_time += time.time() - start - - start = time.time() - images.append(env.physics.render(height, width, camera_id=0)) - render_time += time.time() - start - -print("Num steps:", num_steps) -print("Simulation time:", sim_time, "FPS:", num_steps / sim_time) -print("Render time:", render_time, "FPS:", num_steps / render_time) diff --git a/examples/profile_speed/profile_sapien.py b/examples/profile_speed/profile_sapien.py deleted file mode 100644 index 79842bd5..00000000 --- a/examples/profile_speed/profile_sapien.py +++ /dev/null @@ -1,270 +0,0 @@ -"""CartPole-Swingup.""" - -import cv2 -import gymnasium as gym -import numpy as np -import PIL.Image as Image -import sapien.core as sapien -from gymnasium import spaces -from gymnasium.utils import seeding -from sapien.core import Pose -from sapien.utils.viewer import Viewer -from transforms3d.euler import euler2quat -from transforms3d.quaternions import axangle2quat - - -class TestSapienEnv(gym.Env): - """Superclass for Sapien environments.""" - - def __init__(self, control_freq, timestep): - self.control_freq = control_freq # alias: frame_skip in mujoco_py - self.timestep = timestep - - self._engine = sapien.Engine() - self._renderer = sapien.VulkanRenderer(offscreen_only=False) - self._engine.set_renderer(self._renderer) - self._scene = self._engine.create_scene() - self._scene.set_timestep(timestep) - self._scene.add_ground(-1.0) - self._build_world() - self.viewer = None - self.seed() - - def _build_world(self): - raise NotImplementedError() - - def _setup_viewer(self): - raise NotImplementedError() - - # ---------------------------------------------------------------------------- # - # Override gym functions - # ---------------------------------------------------------------------------- # - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def close(self): - if self.viewer is not None: - self.viewer.close() # release viewer - - def render(self, mode="human"): - if self.viewer is None: - self._setup_viewer() - if mode == "human": - self._scene.update_render() - self.viewer.render() - else: - raise NotImplementedError("Unsupported render mode {}.".format(mode)) - - # ---------------------------------------------------------------------------- # - # Utilities - # ---------------------------------------------------------------------------- # - def get_actor(self, name) -> sapien.ArticulationBase: - all_actors = self._scene.get_all_actors() - print(all_actors) - actor = [x for x in all_actors if x.name == name] - if len(actor) > 1: - raise RuntimeError(f"Not a unique name for actor: {name}") - elif len(actor) == 0: - raise RuntimeError(f"Actor not found: {name}") - return actor[0] - - def get_articulation(self, name) -> sapien.ArticulationBase: - all_articulations = self._scene.get_all_articulations() - articulation = [x for x in all_articulations if x.name == name] - if len(articulation) > 1: - raise RuntimeError(f"Not a unique name for articulation: {name}") - elif len(articulation) == 0: - raise RuntimeError(f"Articulation not found: {name}") - return articulation[0] - - @property - def dt(self): - return self.timestep * self.control_freq - - -def create_cartpole(scene: sapien.Scene) -> sapien.Articulation: - builder = scene.create_articulation_builder() - - base = builder.create_link_builder() - base.set_name("base") - - cart = builder.create_link_builder(base) - cart.set_name("cart") - cart.set_joint_name("cart_joint") - cart_half_size = np.array([0.04, 0.25, 0.125]) - cart.add_box_collision(half_size=cart_half_size, density=100) - cart.add_box_visual(half_size=cart_half_size, color=[0.8, 0.6, 0.4]) - - cart.set_joint_properties( - "prismatic", - limits=[[-5, 5]], - pose_in_parent=sapien.Pose( - p=[0, 0, 0], q=axangle2quat([0, 0, 1], np.deg2rad(90)) - ), - pose_in_child=sapien.Pose( - p=[0, 0, 0], q=axangle2quat([0, 0, 1], np.deg2rad(90)) - ), - ) - - rod = builder.create_link_builder(cart) - rod.set_name("rod") - rod.set_joint_name("rod_joint") - rod_half_size = np.array([0.016, 0.016, 0.5]) - rod.add_box_collision(half_size=rod_half_size, density=100) - rod.add_box_visual(half_size=rod_half_size, color=[0.8, 0.6, 0.4]) - - # The x-axis of the joint frame is the rotation axis of a revolute joint. - rod.set_joint_properties( - "revolute", - limits=[[-np.inf, np.inf]], - pose_in_parent=sapien.Pose( - p=[-(cart_half_size[0] + 1e-3), 0, (cart_half_size[2])], - q=[1, 0, 0, 0], - ), - pose_in_child=sapien.Pose( - p=[rod_half_size[0] + 1e-3, 0.0, -rod_half_size[2]], - q=[1, 0, 0, 0], - ), - ) - - cartpole = builder.build(fix_root_link=True) - cartpole.set_name("cartpole") - - return cartpole - - -class CartPoleSwingUpEnv(TestSapienEnv): - def __init__(self): - super().__init__(control_freq=1, timestep=0.01) - - self.cartpole = self.get_articulation("cartpole") - self.cart = self.cartpole.get_links()[1] - self.pole = self.cartpole.get_links()[2] - - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=[4], dtype=np.float32 - ) - self.action_space = spaces.Box( - low=-10.0, high=10.0, shape=[1], dtype=np.float32 - ) - - self.theta_threshold = np.deg2rad(12) - self._setup_camera() - - # ---------------------------------------------------------------------------- # - # Simulation world - # ---------------------------------------------------------------------------- # - def _build_world(self): - # frictionless - phy_mtl = self._scene.create_physical_material(0.0, 0.0, 0.0) - self._scene.default_physical_material = phy_mtl - create_cartpole(self._scene) - self._setup_lighting() - - # ---------------------------------------------------------------------------- # - # RL - # ---------------------------------------------------------------------------- # - def step(self, action): - for _ in range(self.control_freq): - self.cartpole.set_qf([action[0], 0]) - self._scene.step() - - obs = self._get_obs() - - x, theta = obs[0], obs[1] - - reward = "__TODO17__" - - if theta < -np.pi: - theta += np.pi * 2 - if theta > np.pi: - theta -= np.pi * 2 - success = -self.theta_threshold <= theta <= self.theta_threshold - - done = False - - return obs, reward, done, {"success": success} - - def reset(self): - self.cartpole.set_qpos([0, np.pi]) - self.cartpole.set_qvel([0, 0]) - self._scene.step() - return self._get_obs(), {} - - def _get_obs(self): - qpos = self.cartpole.get_qpos() - qvel = self.cartpole.get_qvel() - return np.hstack([qpos, qvel]) - - # ---------------------------------------------------------------------------- # - # Visualization - # ---------------------------------------------------------------------------- # - - def _setup_lighting(self): - self._scene.set_ambient_light([0.4, 0.4, 0.4]) - self._scene.add_directional_light([0, 1, -1], [0.5, 0.5, 0.5], shadow=True) - self._scene.add_point_light([2, 2, 2], [1, 1, 1]) - self._scene.add_point_light([2, -2, 2], [1, 1, 1]) - self._scene.add_point_light([-2, 0, 2], [1, 1, 1]) - - def _setup_viewer(self): - self.viewer = Viewer(self._renderer) - self.viewer.set_scene(self._scene) - self.viewer.set_camera_xyz(-4, 0, 4) - self.viewer.set_camera_rpy(0, -0.7, 0) - self.viewer.window.set_camera_parameters(near=0.01, far=100, fovy=1) - self.viewer.focus_entity(self.cart) - - def _setup_camera(self): - self.camera = self._scene.add_mounted_camera( - name="main_camera", - actor=self.cart, - pose=sapien.Pose([-3, 0, 0]), - width=84, - height=84, - fovy=1, - near=0.01, - far=100, - ) - - def render(self, mode="human"): - if mode == "human": - super().render() - self.camera.take_picture() - rgba = self.camera.get_float_texture("Color") - else: - self._scene.update_render() - self.camera.take_picture() - rgba = self.camera.get_float_texture("Color") - - -def main(): - import time - - env = CartPoleSwingUpEnv() - env.reset() - sim_time = 0.0 - render_time = 0.0 - num_steps = 10000 - for step in range(num_steps): - # env.render(mode="human") - action = env.action_space.sample() - t = time.time() - obs, reward, done, info = env.step(action) - sim_time += time.time() - t - t = time.time() - env.render("rgbd") - render_time += time.time() - t - if done: - obs, _ = env.reset() - - print("Num steps:", num_steps) - print("Simulation time:", sim_time, "FPS:", num_steps / sim_time) - print("Render time:", render_time, "FPS:", num_steps / render_time) - - env.close() - - -if __name__ == "__main__": - main() diff --git a/examples/submission/Dockerfile b/examples/submission/Dockerfile deleted file mode 100644 index ada64b4e..00000000 --- a/examples/submission/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM haosulab/mani-skill2:latest - -# Install additional python packages you need -RUN pip install torch==1.12.1 - -# Copy your codes (including user_solution.py) and model weights -COPY ${YOUR_CODES_AND_WEIGHTS} /root/ -ENV PYTHONPATH ${YOUR_CODES_AND_WEIGHTS}:$PYTHONPATH diff --git a/examples/submission/user_solution.py b/examples/submission/user_solution.py deleted file mode 100644 index 73c9d882..00000000 --- a/examples/submission/user_solution.py +++ /dev/null @@ -1,14 +0,0 @@ -from mani_skill2.evaluation.solution import BasePolicy - - -class UserPolicy(BasePolicy): - def act(self, observations): - return self.action_space.sample() - - @classmethod - def get_obs_mode(cls, env_id: str) -> str: - return "rgbd" - - @classmethod - def get_control_mode(cls, env_id: str) -> str: - return "pd_joint_delta_pos" diff --git a/mani_skill2/envs/misc/avoid_obstacles.py b/mani_skill2/envs/misc/avoid_obstacles.py index 8d9ad999..0d6ce4b4 100644 --- a/mani_skill2/envs/misc/avoid_obstacles.py +++ b/mani_skill2/envs/misc/avoid_obstacles.py @@ -8,7 +8,6 @@ from mani_skill2 import format_path from mani_skill2.agents.robots.panda import PandaRealSensed435 -from mani_skill2.agents.robots.panda.panda import Panda from mani_skill2.envs.sapien_env import BaseEnv from mani_skill2.sensors.camera import CameraConfig from mani_skill2.utils.building.ground import build_tesselated_square_floor @@ -171,7 +170,7 @@ def _get_obs_extra(self) -> OrderedDict: def evaluate(self, **kwargs) -> dict: tcp_pose_at_goal = self.goal_pose.inv() * self.tcp.pose pos_dist = np.linalg.norm(tcp_pose_at_goal.p) - ang_dist = np.arccos(tcp_pose_at_goal.q[0]) * 2 + ang_dist = np.arccos(np.clip(tcp_pose_at_goal.q[0], -1, 1)) * 2 if ang_dist > np.pi: # [0, 2 * pi] -> [-pi, pi] ang_dist = ang_dist - 2 * np.pi ang_dist = np.abs(ang_dist) diff --git a/mani_skill2/envs/pick_and_place/pick_single.py b/mani_skill2/envs/pick_and_place/pick_single.py index dcc83906..9e2700d5 100644 --- a/mani_skill2/envs/pick_and_place/pick_single.py +++ b/mani_skill2/envs/pick_and_place/pick_single.py @@ -432,13 +432,12 @@ def build_actor_ycb( builder = scene.create_actor_builder() model_dir = Path(root_dir) / "models" / model_id - collision_file = str(model_dir / "collision.obj") + collision_file = str(model_dir / "collision.ply") builder.add_multiple_convex_collisions_from_file( filename=collision_file, scale=[scale] * 3, material=physical_material, density=density, - decomposition="coacd", ) visual_file = str(model_dir / "textured.obj") @@ -464,10 +463,10 @@ def _check_assets(self): "`python -m mani_skill2.utils.download_asset ycb`." ) - collision_file = model_dir / "collision.obj" + collision_file = model_dir / "collision.ply" if not collision_file.exists(): raise FileNotFoundError( - "convex.obj has been renamed to collision.obj. " + "convex.obj and collision.obj has been renamed to collision.ply. " "Please re-download YCB models." ) @@ -491,91 +490,3 @@ def _initialize_agent(self): return super()._initialize_agent_v1() else: return super()._initialize_agent() - - -# ---------------------------------------------------------------------------- # -# EGAD -# ---------------------------------------------------------------------------- # -def build_actor_egad( - model_id: str, - scene: sapien.Scene, - scale: float = 1.0, - physical_material: physx.PhysxMaterial = None, - density=100, - render_material: sapien.render.RenderMaterial = None, - root_dir=ASSET_DIR / "mani_skill2_egad", -): - builder = scene.create_actor_builder() - # A heuristic way to infer split - split = "train" if "_" in model_id else "eval" - - collision_file = Path(root_dir) / f"egad_{split}_set_coacd" / f"{model_id}.obj" - builder.add_multiple_convex_collisions_from_file( - filename=str(collision_file), - scale=[scale] * 3, - material=physical_material, - decomposition="coacd", - density=density, - ) - - visual_file = Path(root_dir) / f"egad_{split}_set" / f"{model_id}.obj" - builder.add_visual_from_file( - filename=str(visual_file), scale=[scale] * 3, material=render_material - ) - - actor = builder.build() - return actor - - -@register_env("PickSingleEGAD-v0", max_episode_steps=200, obj_init_rot=0.2) -class PickSingleEGADEnv(PickSingleEnv): - DEFAULT_ASSET_ROOT = "{ASSET_DIR}/mani_skill2_egad" - DEFAULT_MODEL_JSON = "info_pick_train_v0.json" - - def _check_assets(self): - splits = set() - for model_id in self.model_ids: - split = "train" if "_" in model_id else "eval" - splits.add(split) - - for split in splits: - collision_dir = self.asset_root / f"egad_{split}_set_coacd" - visual_dir = self.asset_root / f"egad_{split}_set" - if not (collision_dir.exists() and visual_dir.exists()): - raise FileNotFoundError( - f"{collision_dir} or {visual_dir} is not found. " - "Please download (ManiSkill2) EGAD models:" - "`python -m mani_skill2.utils.download_asset egad`." - ) - - def _load_model(self): - mat = self._renderer.create_material() - color = self._episode_rng.uniform(0.2, 0.8, 3) - color = np.hstack([color, 1.0]) - mat.set_base_color(color) - mat.metallic = 0.0 - mat.roughness = 0.1 - - self.obj = build_actor_egad( - self.model_id, - self._scene, - scale=self.model_scale, - render_material=mat, - density=100, - root_dir=self.asset_root, - ) - self.obj.name = self.model_id - - def _get_init_z(self): - bbox_min = self.model_db[self.model_id]["bbox"]["min"] - return -bbox_min[2] * self.model_scale + 0.05 - - def _initialize_actors(self): - super()._initialize_actors() - - # Some objects need longer time to settle - obj_comp = self.obj.find_component_by_type(physx.PhysxRigidDynamicComponent) - lin_vel = np.linalg.norm(obj_comp.linear_velocity) - ang_vel = np.linalg.norm(obj_comp.angular_velocity) - if lin_vel > 1e-3 or ang_vel > 1e-2: - self._settle(0.5) diff --git a/mani_skill2/envs/sapien_env.py b/mani_skill2/envs/sapien_env.py index 2da846e2..cd741466 100644 --- a/mani_skill2/envs/sapien_env.py +++ b/mani_skill2/envs/sapien_env.py @@ -558,6 +558,7 @@ def _clear_sim_state(self): component.set_linear_velocity([0, 0, 0]) component.set_angular_velocity([0, 0, 0]) for articulation in self._scene.get_all_articulations(): + articulation: physx.PhysxArticulation articulation.set_qvel(np.zeros(articulation.dof)) articulation.set_root_velocity([0, 0, 0]) articulation.set_root_angular_velocity([0, 0, 0]) diff --git a/mani_skill2/examples/motionplanning/README.md b/mani_skill2/examples/motionplanning/README.md index d932fc98..3486691a 100644 --- a/mani_skill2/examples/motionplanning/README.md +++ b/mani_skill2/examples/motionplanning/README.md @@ -1,6 +1,8 @@ # Motion Planning Examples -First install the maniskill2 motion planning library +This folder has example code for running motion planning to solve various ManiSkill tasks. These are also used to generate some of the demonstration datasets + +To get started, after installing ManiSkill, then install the following motion planning library ``` pip install mplib ``` \ No newline at end of file diff --git a/mani_skill2/examples/motionplanning/grasp_pose/gen_ycb.py b/mani_skill2/examples/motionplanning/grasp_pose/gen_ycb.py new file mode 100644 index 00000000..3cb4e9a3 --- /dev/null +++ b/mani_skill2/examples/motionplanning/grasp_pose/gen_ycb.py @@ -0,0 +1,72 @@ +import numpy as np +from utils import compute_grasp_poses + +from mani_skill2 import ASSET_DIR, PACKAGE_ASSET_DIR +from mani_skill2.envs.pick_and_place.pick_single import load_json + +YCB_DIR = ASSET_DIR / "mani_skill2_ycb" + + +def main(): + model_db = load_json(YCB_DIR / "info_pick_v0.json") + model_ids = [ + "019_pitcher_base", + "024_bowl", + "025_mug", + "035_power_drill", + "052_extra_large_clamp", + "065-f_cups", + "065-g_cups", + "065-h_cups", + "065-i_cups", + "065-j_cups", + "072-a_toy_airplane", + "072-b_toy_airplane", + ] + output_dir = YCB_DIR / "grasp_poses_info_pick_v0_panda_v2" + output_dir.mkdir(exist_ok=True) + + gripper_urdf = str(PACKAGE_ASSET_DIR / "robots/panda/panda_v2_gripper.urdf") + gripper_srdf = str(PACKAGE_ASSET_DIR / "robots/panda/panda_v2.srdf") + + for model_id in model_ids: + scales = model_db[model_id]["scales"] + assert len(scales) == 1 + mesh_path = str(YCB_DIR / f"models/{model_id}/collision.obj") + + n_pts = 2048 + n_angles = 12 + if model_id == "019_pitcher_base": + n_angles = 24 + if model_id == "072-a_toy_airplane": + n_angles = 24 + + while True: + grasp_poses = compute_grasp_poses( + mesh_path, + int(n_pts), + gripper_urdf, + gripper_srdf, + "panda_hand_tcp", + n_angles=n_angles, + mesh_scale=scales[0], + octree_resolution=0.005, + add_ground=True, + # vis=True, + ) + if len(grasp_poses) < 16: + n_pts *= 2 + elif len(grasp_poses) > 100: + n_pts /= 2 + else: + break + print(n_pts) + if n_pts > 8096 or n_pts < 512: + break + + print(model_id, len(grasp_poses)) + np.save(output_dir / f"{model_id}.npy", grasp_poses) + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/grasp_pose/utils.py b/mani_skill2/examples/motionplanning/grasp_pose/utils.py new file mode 100644 index 00000000..80ca10dd --- /dev/null +++ b/mani_skill2/examples/motionplanning/grasp_pose/utils.py @@ -0,0 +1,258 @@ +import numpy as np +import open3d as o3d +import tqdm +import transforms3d +from pymp.robot import RobotWrapper +from pymp.utils import toSE3 +from scipy.spatial.distance import cdist +from scipy.spatial.transform import Rotation + + +def norm_vec(x, eps=1e-6): + norm = np.linalg.norm(x, axis=-1, keepdims=True) + return np.where(norm < eps, 0, x / np.maximum(norm, eps)) + + +def rotate_transform(R): + out = np.zeros(R.shape[:-2] + (4, 4)) + out[..., :3, :3] = R + out[..., 3, 3] = 1 + return out + + +def compute_antipodal_contact_points(points, normals, max_width, score_thresh): + dist = cdist(points, points) + + # Distance between two contact points should be larger than gripper width. + mask1 = dist <= max_width + + # [n, n, 3], direction between two surface points + direction = norm_vec(points[None, :] - points[:, None]) + # [n, n] + cos_angle = np.squeeze(direction @ normals[..., None], -1) + + # Heuristic from S4G + score = np.abs(cos_angle * cos_angle.T) + mask2 = score >= score_thresh + # print(score[0:5, 0:5]) + + row, col = np.nonzero(np.triu(np.logical_and(mask1, mask2), k=1)) + return row, col, score[row, col] + + +def initialize_grasp_poses(points, row, col): + # Assume the grasp frame is approaching (x), closing (y), ortho (z). + # The origin is the center of two contact points. + # Please convert to the gripper you use. + + # The closing vector is segment between two contact points + displacement = points[col] - points[row] + closing_vec = norm_vec(displacement) # [m, 3] + # Approaching and orthogonal vectors should be searched later. + U, _, _ = np.linalg.svd(closing_vec[..., None]) # [m, 3, 3] + approaching_vec = U[..., 1] # [m, 3] + assert np.all(np.einsum("nd,nd->n", approaching_vec, closing_vec) <= 1e-6) + center = (points[col] + points[row]) * 0.5 + + grasp_frames = np.tile(np.eye(4), [len(row), 1, 1]) + grasp_frames[:, 0:3, 0] = approaching_vec + grasp_frames[:, 0:3, 1] = closing_vec + grasp_frames[:, 0:3, 2] = np.cross(approaching_vec, closing_vec) + grasp_frames[:, 0:3, 3] = center + return grasp_frames, np.linalg.norm(displacement, axis=-1) * 0.5 + + +def augment_grasp_poses(grasp_poses, angles): + Rs = Rotation.from_euler("y", angles).as_matrix() # [A, 3, 3] + Ts = rotate_transform(Rs) # [A, 4, 4] + out = np.einsum("nij,mjk->nmik", grasp_poses, Ts) + return out + + +def compute_grasp_poses( + mesh_path, + n_pts, + gripper_urdf, + gripper_srdf, + tcp_link_name, + mesh_scale=1.0, + gripper_width=0.08, + open_ratio=0.95, + score_thresh=0.97, + n_angles=36, + octree_resolution=0.001, + open_widths=None, + add_ground=False, + seed=0, + vis=False, +): + # Load shape + mesh = o3d.io.read_triangle_mesh(str(mesh_path)) + mesh.compute_vertex_normals() + mesh.scale(mesh_scale, center=(0, 0, 0)) + min_bound = mesh.get_min_bound() + # Open3d interpolates normals for sampled points. + pcd = mesh.sample_points_uniformly(number_of_points=n_pts) + points, normals = np.array(pcd.points), np.array(pcd.normals) + + # 1. Generate contact point pairs + row, col, score = compute_antipodal_contact_points( + points, normals, gripper_width * open_ratio, score_thresh + ) + print("#contact points", len(row)) + + # 2. Build initial grasp poses + grasp_poses, closing_dists = initialize_grasp_poses(points, row, col) + + # 3. Search feasible grasp poses + angles = np.linspace(0, 2 * np.pi, n_angles) + grasp_poses = augment_grasp_poses(grasp_poses, angles) + print(grasp_poses.shape, closing_dists.shape) + grasp_poses = np.reshape(grasp_poses, [-1, 4, 4]) + closing_dists = np.repeat(closing_dists, n_angles, axis=0) + + # Load gripper urdf + gripper = RobotWrapper.loadFromURDF(str(gripper_urdf), floating=True) + gripper.initCollisionPairs() + gripper.removeCollisionPairsFromSRDF(str(gripper_srdf)) + gripper.addOctree(points, octree_resolution) + + if add_ground: + gripper.addBox( + [1, 1, octree_resolution], + toSE3([0, 0, min_bound[2] - octree_resolution * 2]), + name="ground", + ) + + if vis: + gripper.addMeshVisual(np.array(mesh.vertices), np.array(mesh.triangles)) + + # TCP frame + tcp_frame = gripper.get_frame(tcp_link_name) + root_to_tcp = np.array(tcp_frame.placement.inverse()) + + # 4. Search + valid_grasp_poses = [] + valid_grasp_qpos = [] + pbar = tqdm.tqdm(total=len(grasp_poses)) + if open_widths is None: + open_widths = [0.01, 0.02, 0.03, 0.04] + + for grasp_pose, closing_dist in zip(grasp_poses, closing_dists): + root_pose = grasp_pose @ root_to_tcp + pos = root_pose[:3, 3] + quat = Rotation.from_matrix(root_pose[:3, :3]).as_quat() + for w in open_widths: + if w < closing_dist: + continue + grasp_qpos = np.hstack([pos, quat, w, w]) + if gripper.isCollisionFree(grasp_qpos): + valid_grasp_qpos.append(grasp_qpos) + # sapien format + pq = np.hstack( + [ + grasp_pose[:3, 3], + transforms3d.quaternions.mat2quat(grasp_pose[:3, :3]), + ] + ) + valid_grasp_poses.append(np.hstack([pq, w])) + break + pbar.update() + + valid_grasp_poses = np.array(valid_grasp_poses) + valid_grasp_qpos = np.array(valid_grasp_qpos) + print("#valid grasp poses", len(valid_grasp_poses)) + + if len(valid_grasp_poses) > 0 and vis: + gripper.initMeshcatDisplay(None) + gripper.play(valid_grasp_qpos, dt=0.5) + + return valid_grasp_poses + + +def main(): + from mani_skill2 import DESCRIPTION_DIR + from mani_skill2.envs.pick_and_place.pick_single import YCB_DIR + + # Load shape + # mesh_path = str(YCB_DIR / "models/002_master_chef_can/convex.obj") + # mesh_path = str(YCB_DIR / "models/065-h_cups/convex.obj") + mesh_path = str(YCB_DIR / "models/072-a_toy_airplane/convex.obj") + mesh = o3d.io.read_triangle_mesh(mesh_path) + mesh.compute_vertex_normals() + # Open3d interpolates normals for sampled points. + pcd = mesh.sample_points_uniformly(number_of_points=2048, seed=0) + points, normals = np.array(pcd.points), np.array(pcd.normals) + + # vis_pcd = o3d.geometry.PointCloud() + # vis_pcd.points = o3d.utility.Vector3dVector(points) + # vis_pcd.normals = o3d.utility.Vector3dVector(normals) + # o3d.visualization.draw_geometries([vis_pcd]) + + # 1. Generate contact point pairs + row, col, score = compute_antipodal_contact_points( + points, normals, 0.08 * 0.95, 0.97 + ) + print("#contact points", len(row)) + + # 2. Build initial grasp poses + grasp_poses, closing_dists = initialize_grasp_poses(points, row, col) + + # 3. Search feasible grasp poses + n_angles = 36 + angles = np.linspace(0, 2 * np.pi, n_angles) + grasp_poses = augment_grasp_poses(grasp_poses, angles) + print(grasp_poses.shape) + grasp_poses = np.reshape(grasp_poses, [-1, 4, 4]) + closing_dists = np.repeat(closing_dists, n_angles, axis=0) + + gripper_urdf = str(DESCRIPTION_DIR / "panda_v2_gripper.urdf") + gripper_srdf = str(DESCRIPTION_DIR / "panda_v2.srdf") + gripper = RobotWrapper.loadFromURDF(gripper_urdf, floating=True) + # print(gripper.nq, gripper.nv) + # print(gripper.model.names) + # print(gripper.model.frames) + + gripper.initCollisionPairs() + gripper.removeCollisionPairsFromSRDF(gripper_srdf) + gripper.addOctree(points, 0.001) + gripper.addMeshVisual(np.array(mesh.vertices), np.array(mesh.triangles)) + # # Add a ground + # gripper.addBox([1, 1, 0.005], toSE3([0, 0, -0.005]), name="ground") + + # TCP frame + tcp_frame = gripper.get_frame("panda_hand_tcp") + root_to_tcp = np.array(tcp_frame.placement.inverse()) + + # 4. Search + valid_grasp_poses = [] + pbar = tqdm.tqdm(total=len(grasp_poses)) + w_step = 0.005 + widths = np.arange(0.01, 0.04 + w_step * 0.5, w_step) + + for grasp_pose, closing_dist in zip(grasp_poses, closing_dists): + root_pose = grasp_pose @ root_to_tcp + pos = root_pose[:3, 3] + quat = Rotation.from_matrix(root_pose[:3, :3]).as_quat() + for w in widths: + if w < closing_dist: + continue + grasp_qpos = np.hstack([pos, quat, w, w]) + if gripper.isCollisionFree(grasp_qpos): + valid_grasp_poses.append(grasp_qpos) + break + pbar.update() + + valid_grasp_poses = np.array(valid_grasp_poses) + print("#valid grasp poses", len(valid_grasp_poses)) + + if len(valid_grasp_poses) == 0: + exit(0) + + # Visualize + gripper.initMeshcatDisplay(None) + gripper.play(np.array(valid_grasp_poses).T, dt=0.5) + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/motionplanner.py b/mani_skill2/examples/motionplanning/motionplanner.py index c0256ae0..aba423ef 100644 --- a/mani_skill2/examples/motionplanning/motionplanner.py +++ b/mani_skill2/examples/motionplanning/motionplanner.py @@ -1,8 +1,8 @@ import mplib import numpy as np import sapien +import trimesh -from mani_skill2 import PACKAGE_ASSET_DIR from mani_skill2.agents.base_agent import BaseAgent from mani_skill2.envs.sapien_env import BaseEnv @@ -23,13 +23,16 @@ def __init__( joint_acc_limits=0.9, ): self.env = env - self.env_agent: BaseAgent = self.env.unwrapped - self.robot = self.env.agent.robot + self.env_agent: BaseAgent = self.env.unwrapped.agent + self.robot = self.env_agent.robot self.joint_vel_limits = joint_vel_limits self.joint_acc_limits = joint_acc_limits + + self.base_pose = base_pose + self.planner = self.setup_planner() self.control_mode = self.env.unwrapped.control_mode - self.base_pose = base_pose + self.debug = debug self.vis = vis self.print_env_info = print_env_info @@ -43,6 +46,10 @@ def __init__( self.grasp_pose_visual.set_pose(env.unwrapped.agent.tcp.entity_pose) self.elapsed_steps = 0 + self.use_point_cloud = False + self.collision_pts_changed = False + self.all_collision_pts = None + def render_wait(self): if not self.vis or not self.debug: return @@ -57,21 +64,23 @@ def setup_planner(self): link_names = [link.get_name() for link in self.robot.get_links()] joint_names = [joint.get_name() for joint in self.robot.get_active_joints()] planner = mplib.Planner( - urdf=self.env.agent.urdf_path, + urdf=self.env_agent.urdf_path, + srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"), user_link_names=link_names, user_joint_names=joint_names, move_group="panda_hand_tcp", joint_vel_limits=np.ones(7) * self.joint_acc_limits, joint_acc_limits=np.ones(7) * self.joint_vel_limits, ) + planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q])) return planner - def follow_path(self, result): + def follow_path(self, result, refine_steps: int = 0): n_step = result["position"].shape[0] - for i in range(n_step): - qpos = result["position"][i] + for i in range(n_step + refine_steps): + qpos = result["position"][min(i, n_step - 1)] if self.control_mode == "pd_joint_pos_vel": - qvel = result["velocity"][i] + qvel = result["velocity"][(i, n_step - 1)] action = np.hstack([qpos, qvel, self.gripper_state]) else: action = np.hstack([qpos, self.gripper_state]) @@ -85,23 +94,31 @@ def follow_path(self, result): self.env.unwrapped.render_human() return obs, reward, terminated, truncated, info - def move_to_pose_with_RRTConnect(self, pose): + def move_to_pose_with_RRTConnect( + self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0 + ): if self.grasp_pose_visual is not None: self.grasp_pose_visual.set_pose(pose) - pose = sapien.Pose(p=pose.p - self.base_pose.p, q=pose.q) - result = self.planner.plan( + pose = sapien.Pose(p=pose.p, q=pose.q) + result = self.planner.plan_qpos_to_pose( np.concatenate([pose.p, pose.q]), self.robot.get_qpos(), time_step=self.env.unwrapped.control_timestep, + use_point_cloud=self.use_point_cloud, + wrt_world=True, ) if result["status"] != "Success": print(result["status"]) self.render_wait() return -1 self.render_wait() - return self.follow_path(result) + if dry_run: + return result + return self.follow_path(result, refine_steps=refine_steps) - def move_to_pose_with_screw(self, pose: sapien.Pose): + def move_to_pose_with_screw( + self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0 + ): # try screw two times before giving up if self.grasp_pose_visual is not None: self.grasp_pose_visual.set_pose(pose) @@ -110,19 +127,23 @@ def move_to_pose_with_screw(self, pose: sapien.Pose): np.concatenate([pose.p, pose.q]), self.robot.get_qpos(), time_step=self.env.unwrapped.control_timestep, + use_point_cloud=self.use_point_cloud, ) if result["status"] != "Success": - result = self.planner.plan( - np.concatenate([pose.p, pose.q]), + result = self.planner.plan_screw( + np.concatenate([pose.p - self.base_pose.p, pose.q]), self.robot.get_qpos(), time_step=self.env.unwrapped.control_timestep, + use_point_cloud=self.use_point_cloud, ) if result["status"] != "Success": print(result["status"]) self.render_wait() return -1 self.render_wait() - return self.follow_path(result) + if dry_run: + return result + return self.follow_path(result, refine_steps=refine_steps) def open_gripper(self): self.gripper_state = OPEN @@ -160,6 +181,27 @@ def close_gripper(self, t=6): self.env.unwrapped.render_human() return obs, reward, terminated, truncated, info + def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose): + self.use_point_cloud = True + box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix()) + pts, _ = trimesh.sample.sample_surface(box, 256) + if self.all_collision_pts is None: + self.all_collision_pts = pts + else: + self.all_collision_pts = np.vstack([self.all_collision_pts, pts]) + self.planner.update_point_cloud(self.all_collision_pts) + + def add_collision_pts(self, pts: np.ndarray): + if self.all_collision_pts is None: + self.all_collision_pts = pts + else: + self.all_collision_pts = np.vstack([self.all_collision_pts, pts]) + self.planner.update_point_cloud(self.all_collision_pts) + + def clear_collisions(self): + self.all_collision_pts = None + self.use_point_cloud = False + def close(self): if self.grasp_pose_visual is not None: self.grasp_pose_visual.remove_from_scene() diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/lift_cube.py b/mani_skill2/examples/motionplanning/ms2_tasks/lift_cube.py new file mode 100644 index 00000000..efe15c55 --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/lift_cube.py @@ -0,0 +1,84 @@ +import gymnasium as gym +import numpy as np +import sapien.core as sapien +from tqdm import tqdm + +from mani_skill2.envs.pick_and_place.pick_cube import LiftCubeEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) + + +def main(): + env: LiftCubeEnv = gym.make( + "LiftCube-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: LiftCubeEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + ) + + FINGER_LENGTH = 0.025 + env = env.unwrapped + obb = get_actor_obb(env.obj) + + approaching = np.array([0, 0, -1]) + target_closing = env.agent.tcp.entity_pose.to_transformation_matrix()[:3, 1] + grasp_info = compute_grasp_info_by_obb( + obb, + approaching=approaching, + target_closing=target_closing, + depth=FINGER_LENGTH, + ) + closing, center = grasp_info["closing"], grasp_info["center"] + grasp_pose = env.agent.build_grasp_pose(approaching, closing, center) + + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # + reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05]) + planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # + planner.move_to_pose_with_screw(grasp_pose) + planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Move to goal pose + # -------------------------------------------------------------------------- # + goal_pose = sapien.Pose(env.goal_pos + [0, 0, 0.01], grasp_pose.q) + res = planner.move_to_pose_with_screw(goal_pose) + + planner.close() + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/panda_avoid_obstacles.py b/mani_skill2/examples/motionplanning/ms2_tasks/panda_avoid_obstacles.py new file mode 100644 index 00000000..71a6a04c --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/panda_avoid_obstacles.py @@ -0,0 +1,82 @@ +from collections import OrderedDict + +import gymnasium as gym +import numpy as np +import sapien.core as sapien +from tqdm import tqdm + +from mani_skill2.envs.misc.avoid_obstacles import PandaAvoidObstaclesEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + CLOSED, + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) + + +def main(): + env: PandaAvoidObstaclesEnv = gym.make( + "PandaAvoidObstacles-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: PandaAvoidObstaclesEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + orig_env = env + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + joint_vel_limits=0.4, + joint_acc_limits=0.4, + ) + env = env.unwrapped + + episode_config = env.episode_config + for i, cfg in enumerate(episode_config["obstacles"]): + planner.add_box_collision( + np.array(cfg["half_size"]) * 2 + 0.01, + pose=sapien.Pose(cfg["pose"][:3], cfg["pose"][3:]), + ) + planner.add_box_collision([1, 1, 1], sapien.Pose(p=[0, 0, -0.51])) + planner.add_box_collision( + np.array(episode_config["wall"]["half_size"]) * 2 + 0.01, + sapien.Pose( + p=episode_config["wall"]["pose"][:3], q=episode_config["wall"]["pose"][3:] + ), + ) + + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # + planner.gripper_state = CLOSED + for i in range(10): + res = planner.move_to_pose_with_RRTConnect(env.goal_pose, refine_steps=5) + if res != -1: + break + planner.close() + if res == -1: + return OrderedDict(), 0, False, False, {"success": False, "elapsed_steps": 0} + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/mplib_peg_insertion_side.py b/mani_skill2/examples/motionplanning/ms2_tasks/peg_insertion_side.py similarity index 68% rename from mani_skill2/examples/motionplanning/mplib_peg_insertion_side.py rename to mani_skill2/examples/motionplanning/ms2_tasks/peg_insertion_side.py index 8a494091..36a5bab0 100644 --- a/mani_skill2/examples/motionplanning/mplib_peg_insertion_side.py +++ b/mani_skill2/examples/motionplanning/ms2_tasks/peg_insertion_side.py @@ -1,43 +1,29 @@ import gymnasium as gym import numpy as np -import pymp import sapien.core as sapien -from mani_skill2 import ASSET_DIR from mani_skill2.envs.assembly.peg_insertion_side import PegInsertionSideEnv - -# isort: off +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) from mani_skill2.examples.motionplanning.utils import ( compute_grasp_info_by_obb, get_actor_obb, ) -from mani_skill2.examples.motionplanning.motionplanner import ( - PandaArmMotionPlanningSolver, -) -from mani_skill2.utils.wrappers import RecordEpisode -import trimesh.creation -import trimesh.sample def main(): env: PegInsertionSideEnv = gym.make( "PegInsertionSide-v0", obs_mode="none", - control_mode="pd_joint_pos_vel", + control_mode="pd_joint_pos", render_mode="rgb_array", reward_mode="dense", - shader_dir="rt-fast", + # shader_dir="rt-fast", ) - env = RecordEpisode( - env, - output_dir="videos/peg_insertion_side", - trajectory_name="trajectory", - video_fps=60, - info_on_video=True, - ) - for seed in range(5, 10): - solve(env, seed=seed, debug=False, vis=False) - + for seed in range(100): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) env.close() @@ -52,6 +38,7 @@ def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False): debug=debug, vis=vis, base_pose=env.unwrapped.agent.robot.pose, + print_env_info=False, joint_vel_limits=0.5, joint_acc_limits=0.5, ) @@ -61,11 +48,8 @@ def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False): obb = get_actor_obb(env.peg) approaching = np.array([0, 0, -1]) target_closing = env.agent.tcp.pose.to_transformation_matrix()[:3, 1] - peg_size = np.array(env.peg_half_size) * 2 - import copy - - peg_init_pose = copy.deepcopy(env.peg.pose) + peg_init_pose = env.peg.pose grasp_info = compute_grasp_info_by_obb( obb, approaching=approaching, target_closing=target_closing, depth=FINGER_LENGTH @@ -75,10 +59,22 @@ def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False): offset = sapien.Pose([-max(0.05, env.peg_half_size[0] / 2 + 0.01), 0, 0]) grasp_pose = grasp_pose * (offset) + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # reach_pose = grasp_pose * (sapien.Pose([0, 0, -0.05])) planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # planner.move_to_pose_with_screw(grasp_pose) planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Align Peg + # -------------------------------------------------------------------------- # + # align the peg with the hole insert_pose = env.goal_pose * peg_init_pose.inv() * grasp_pose offset = sapien.Pose([-0.01 - env.peg_half_size[0], 0, 0]) @@ -90,11 +86,12 @@ def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False): pre_insert_pose = delta_pose * pre_insert_pose planner.move_to_pose_with_screw(pre_insert_pose) - obs, reward, terminated, truncated, info = planner.move_to_pose_with_screw( - insert_pose * (sapien.Pose([0.05, 0, 0])) - ) - print(info) + # -------------------------------------------------------------------------- # + # Insert + # -------------------------------------------------------------------------- # + res = planner.move_to_pose_with_screw(insert_pose * (sapien.Pose([0.05, 0, 0]))) planner.close() + return res if __name__ == "__main__": diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/pick_clutter.py b/mani_skill2/examples/motionplanning/ms2_tasks/pick_clutter.py new file mode 100644 index 00000000..6122b2ee --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/pick_clutter.py @@ -0,0 +1,159 @@ +from pathlib import Path + +import gymnasium as gym +import numpy as np +import sapien.core as sapien +from tqdm import tqdm +from transforms3d.euler import euler2quat + +from mani_skill2 import ASSET_DIR +from mani_skill2.envs.pick_and_place.pick_clutter import PickClutterEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) + + +def main(): + env: PickClutterEnv = gym.make( + "PickClutterYCB-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: PickClutterEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + joint_vel_limits=0.5, + joint_acc_limits=0.5, + ) + + FINGER_LENGTH = 0.025 + env = env.unwrapped + obb = get_actor_obb(env.obj) + + approaching = np.array([0, 0, -1]) + target_closing = env.agent.tcp.entity_pose.to_transformation_matrix()[:3, 1] + grasp_info = compute_grasp_info_by_obb( + obb, + approaching=approaching, + target_closing=target_closing, + depth=FINGER_LENGTH, + ) + closing, center = grasp_info["closing"], grasp_info["center"] + orig_grasp_pose = env.agent.build_grasp_pose(approaching, closing, center) + + # -------------------------------------------------------------------------- # + # Search grasp poses + # -------------------------------------------------------------------------- # + def search_grasp_poses(grasp_poses, n=None): + rng = np.random.RandomState(seed) + inds = rng.permutation(len(grasp_poses))[:n] + init_qpos = env.agent.robot.get_qpos() + for grasp_pose in grasp_poses[inds]: + p = grasp_pose[:3] + q = grasp_pose[3:7] + w = grasp_pose[7] + grasp_pose = env.obj.pose * sapien.Pose(p, q) + + # Symmetry + grasp_closing = grasp_pose.to_transformation_matrix()[:3, 1] + if (grasp_closing @ target_closing) < 0: + grasp_pose = grasp_pose * sapien.Pose(q=euler2quat(0, 0, np.pi)) + + res = planner.move_to_pose_with_screw(grasp_pose, dry_run=True) + # If plan succeeded and is not too long, proceed + if res != -1 and len(res["position"]) < 200: + return grasp_pose, w, res + return None, None + + model_id = env.obj.name + grasp_poses_path = ( + ASSET_DIR + / "mani_skill2_ycb/grasp_poses_info_pick_v0_panda_v2" + / f"{model_id}.npy" + ) + if not grasp_poses_path.exists(): + grasp_poses_path = None + + # -------------------------------------------------------------------------- # + # Compute usable grasp pose and reach + # -------------------------------------------------------------------------- # + grasp_pose2 = None + if grasp_poses_path is not None: + grasp_poses = np.load(grasp_poses_path) + grasp_pose2, w, res = search_grasp_poses(grasp_poses, 512) + if grasp_pose2 is not None: + grasp_pose = grasp_pose2 + # hardcode + CLEARANCE = 0.002 + OPEN_GRIPPER_POS = ((w + 0.01 + CLEARANCE) / 0.05) * 2 - 1 + planner.gripper_state = OPEN_GRIPPER_POS + reach_pose = grasp_pose * (sapien.Pose([0, 0, -0.05])) + planner.follow_path(res) + + if grasp_pose2 is None: + grasp_pose = orig_grasp_pose + reach_pose = grasp_pose * (sapien.Pose([0, 0, -0.05])) + planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # + planner.move_to_pose_with_screw(grasp_pose) + planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Move to goal + # -------------------------------------------------------------------------- # + # NOTE(jigu): The goal position is defined by center of mass. + offset = env.goal_pos - env.obj_pose.p + goal_pose = sapien.Pose(grasp_pose.p + offset, grasp_pose.q) + res = planner.move_to_pose_with_screw(goal_pose) + if res == -1: + print("Try to search goal pose") + center_pose = sapien.Pose(p=env.goal_pos) + rng = np.random.RandomState(seed) + for _ in range(20): + center_pose = sapien.Pose(p=env.goal_pos) + q = euler2quat( + 0, rng.uniform(-np.pi / 2, 0), rng.uniform(-np.pi / 3, np.pi / 3) + ) + delta_pose = sapien.Pose(q=q) + goal_pose = center_pose * delta_pose * center_pose.inv() * goal_pose + res = planner.move_to_pose_with_screw(goal_pose) + if res != -1: + break + else: + print("Fail to find a goal pose") + + # refine a bit more + res = planner.move_to_pose_with_screw(goal_pose) + + planner.close() + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/pick_cube.py b/mani_skill2/examples/motionplanning/ms2_tasks/pick_cube.py new file mode 100644 index 00000000..284d3766 --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/pick_cube.py @@ -0,0 +1,84 @@ +import gymnasium as gym +import numpy as np +import sapien.core as sapien +from tqdm import tqdm + +from mani_skill2.envs.pick_and_place.pick_cube import PickCubeEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) + + +def main(): + env: PickCubeEnv = gym.make( + "PickCube-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: PickCubeEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + ) + + FINGER_LENGTH = 0.025 + env = env.unwrapped + obb = get_actor_obb(env.obj) + + approaching = np.array([0, 0, -1]) + target_closing = env.agent.tcp.entity_pose.to_transformation_matrix()[:3, 1] + grasp_info = compute_grasp_info_by_obb( + obb, + approaching=approaching, + target_closing=target_closing, + depth=FINGER_LENGTH, + ) + closing, center = grasp_info["closing"], grasp_info["center"] + grasp_pose = env.agent.build_grasp_pose(approaching, closing, center) + + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # + reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05]) + planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # + planner.move_to_pose_with_screw(grasp_pose) + planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Move to goal pose + # -------------------------------------------------------------------------- # + goal_pose = sapien.Pose(env.goal_pos, grasp_pose.q) + res = planner.move_to_pose_with_screw(goal_pose) + + planner.close() + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/plug_charger.py b/mani_skill2/examples/motionplanning/ms2_tasks/plug_charger.py new file mode 100644 index 00000000..f99a69d5 --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/plug_charger.py @@ -0,0 +1,109 @@ +import gymnasium as gym +import numpy as np +import sapien.core as sapien +import trimesh +from tqdm import tqdm +from transforms3d.euler import euler2quat + +from mani_skill2.envs.assembly.plug_charger import PlugChargerEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) + + +def main(): + env: PlugChargerEnv = gym.make( + "PlugCharger-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: PlugChargerEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + joint_vel_limits=0.5, + joint_acc_limits=0.5, + ) + + FINGER_LENGTH = 0.025 + env = env.unwrapped + charger_base_pose = env.charger_base_pose + charger_base_size = np.array(env.unwrapped._base_size) * 2 + + obb = trimesh.primitives.Box( + extents=charger_base_size, + transform=charger_base_pose.to_transformation_matrix(), + ) + + approaching = np.array([0, 0, -1]) + target_closing = env.agent.tcp.entity_pose.to_transformation_matrix()[:3, 1] + grasp_info = compute_grasp_info_by_obb( + obb, + approaching=approaching, + target_closing=target_closing, + depth=FINGER_LENGTH, + ) + closing, center = grasp_info["closing"], grasp_info["center"] + grasp_pose = env.agent.build_grasp_pose(approaching, closing, center) + + # add a angle to grasp + grasp_angle = np.deg2rad(15) + grasp_pose = grasp_pose * sapien.Pose(q=euler2quat(0, grasp_angle, 0)) + + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # + reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05]) + planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # + planner.move_to_pose_with_screw(grasp_pose) + planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Align + # -------------------------------------------------------------------------- # + pre_insert_pose = ( + env.goal_pose + * sapien.Pose([-0.05, 0.0, 0.0]) + * env.charger.pose.inv() + * env.agent.tcp.entity_pose + ) + insert_pose = env.goal_pose * env.charger.pose.inv() * env.agent.tcp.entity_pose + planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=0) + planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=5) + # -------------------------------------------------------------------------- # + # Insert + # -------------------------------------------------------------------------- # + res = planner.move_to_pose_with_screw(insert_pose) + + planner.close() + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/ms2_tasks/stack_cube.py b/mani_skill2/examples/motionplanning/ms2_tasks/stack_cube.py new file mode 100644 index 00000000..0e16f336 --- /dev/null +++ b/mani_skill2/examples/motionplanning/ms2_tasks/stack_cube.py @@ -0,0 +1,117 @@ +import gymnasium as gym +import numpy as np +import sapien.core as sapien +from tqdm import tqdm +from transforms3d.euler import euler2quat + +from mani_skill2.envs.pick_and_place.stack_cube import StackCubeEnv +from mani_skill2.examples.motionplanning.motionplanner import ( + PandaArmMotionPlanningSolver, +) +from mani_skill2.examples.motionplanning.utils import ( + compute_grasp_info_by_obb, + get_actor_obb, +) +from mani_skill2.utils.wrappers import RecordEpisode + + +def main(): + env: StackCubeEnv = gym.make( + "StackCube-v0", + obs_mode="none", + control_mode="pd_joint_pos", + render_mode="rgb_array", + reward_mode="sparse", + # shader_dir="rt-fast", + ) + env = RecordEpisode( + env, + output_dir=f"videos/StackCube-v0", + trajectory_name="trajectory", + save_video=False, + info_on_video=True, + ) + for seed in tqdm(range(100)): + res = solve(env, seed=seed, debug=False, vis=True) + print(res[-1]) + env.close() + + +def solve(env: StackCubeEnv, seed=None, debug=False, vis=False): + env.reset(seed=seed) + assert env.unwrapped.control_mode in [ + "pd_joint_pos", + "pd_joint_pos_vel", + ], env.unwrapped.control_mode + planner = PandaArmMotionPlanningSolver( + env, + debug=debug, + vis=vis, + base_pose=env.unwrapped.agent.robot.pose, + visualize_target_grasp_pose=vis, + print_env_info=False, + ) + + FINGER_LENGTH = 0.025 + env = env.unwrapped + obb = get_actor_obb(env.cubeA) + + approaching = np.array([0, 0, -1]) + target_closing = env.agent.tcp.entity_pose.to_transformation_matrix()[:3, 1] + grasp_info = compute_grasp_info_by_obb( + obb, + approaching=approaching, + target_closing=target_closing, + depth=FINGER_LENGTH, + ) + closing, center = grasp_info["closing"], grasp_info["center"] + grasp_pose = env.agent.build_grasp_pose(approaching, closing, center) + + # Search a valid pose + angles = np.arange(0, np.pi * 2 / 3, np.pi / 2) + angles = np.repeat(angles, 2) + angles[1::2] *= -1 + for angle in angles: + delta_pose = sapien.Pose(q=euler2quat(0, 0, angle)) + grasp_pose2 = grasp_pose * delta_pose + res = planner.move_to_pose_with_screw(grasp_pose2, dry_run=True) + if res == -1: + continue + grasp_pose = grasp_pose2 + break + else: + print("Fail to find a valid grasp pose") + + # -------------------------------------------------------------------------- # + # Reach + # -------------------------------------------------------------------------- # + reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05]) + planner.move_to_pose_with_screw(reach_pose) + + # -------------------------------------------------------------------------- # + # Grasp + # -------------------------------------------------------------------------- # + planner.move_to_pose_with_screw(grasp_pose) + planner.close_gripper() + + # -------------------------------------------------------------------------- # + # Lift + # -------------------------------------------------------------------------- # + lift_pose = sapien.Pose([0, 0, 0.1]) * grasp_pose + planner.move_to_pose_with_screw(lift_pose) + + # -------------------------------------------------------------------------- # + # Stack + # -------------------------------------------------------------------------- # + goal_pos = env.cubeB.pose.p + [0, 0, env.box_half_size[2] * 2] + offset = goal_pos - env.cubeA.pose.p + align_pose = sapien.Pose(lift_pose.p + offset, lift_pose.q) + planner.move_to_pose_with_screw(align_pose) + + res = planner.open_gripper() + planner.close() + return res + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/examples/motionplanning/record_demos.py b/mani_skill2/examples/motionplanning/record_demos.py new file mode 100644 index 00000000..c3990c91 --- /dev/null +++ b/mani_skill2/examples/motionplanning/record_demos.py @@ -0,0 +1,118 @@ +""" +Code to mass generate demonstrations via motion planning solutions for various tasks + +Run python mani_skill2/examples/motionplanning/record_demos.py to test it out, which will generate one successful video per task with a MP solution + +Run python mani_skill2/examples/motionplanning/record_demos.py --no-video -n 1000 to mass generate a demo dataset + +""" + + +import argparse +import os.path as osp + +import gymnasium as gym +from tqdm import tqdm + +from mani_skill2.examples.motionplanning.ms2_tasks.lift_cube import ( + solve as lift_cube_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.panda_avoid_obstacles import ( + solve as panda_avoid_obstacles_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.peg_insertion_side import ( + solve as peg_insertion_side_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.pick_clutter import ( + solve as pick_clutter_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.pick_cube import ( + solve as pick_cube_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.plug_charger import ( + solve as plug_charger_solve, +) +from mani_skill2.examples.motionplanning.ms2_tasks.stack_cube import ( + solve as stack_cube_solve, +) +from mani_skill2.utils.wrappers.record import RecordEpisode + + +def record_ms2_motion_planned_demonstrations(args): + MS2_TASKS = [ + ("PickCube-v0", pick_cube_solve), + ("StackCube-v0", stack_cube_solve), + ("LiftCube-v0", lift_cube_solve), + ("PandaAvoidObstacles-v0", panda_avoid_obstacles_solve), + ("PegInsertionSide-v0", peg_insertion_side_solve), + ("PlugCharger-v0", plug_charger_solve), + ] + for task, solve in MS2_TASKS: + env = gym.make( + task, + obs_mode=args.obs_mode, + control_mode=args.control_mode, + render_mode=args.render_mode, + reward_mode="sparse", + shader_dir=args.shader_dir, + ) + env = RecordEpisode( + env, + output_dir=osp.join(args.record_dir, "rigid_body", task), + trajectory_name="trajectory", + save_video=(not args.no_video), + save_on_reset=False, + ) + print(f"Generating demonstrations for {task}") + pbar = tqdm(total=args.num_episodes) + n_success = 0 + n = 0 + seed = args.start_seed + while n_success < args.num_episodes: + res = solve(env, seed=seed, debug=False, vis=False) + if res == -1: + continue + else: + final_obs, reward, terminated, truncated, info = res + success = info["success"] + if not args.allow_timeout: + success = success and (not truncated) + # Save video + if not args.no_video and (success or args.allow_failed_videos): + elapsed_steps = info["elapsed_steps"] + suffix = "seed={}-success={}-steps={}".format( + seed, success, elapsed_steps + ) + env.flush_video(suffix, verbose=True) + # Save trajectory + if success or args.allow_failure: + env.flush_trajectory(verbose=False) + pbar.update() + + n_success += int(success) + n += 1 + seed += 1 + print(f"{task} Success Rate:", n_success / n) + env.close() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-o", "--obs-mode", type=str, default="none") + parser.add_argument("-c", "--control-mode", type=str, default="pd_joint_pos") + parser.add_argument("-r", "--record-dir", type=str, default="videos") + parser.add_argument("-n", "--num-episodes", type=int, default=1) + parser.add_argument("--render-mode", type=str, default="cameras") + parser.add_argument("--no-traj", action="store_true") + parser.add_argument("--no-video", action="store_true") + parser.add_argument("--allow-failure", action="store_true") + parser.add_argument("--allow-failed-videos", action="store_true") + parser.add_argument("--allow-timeout", action="store_true") + parser.add_argument("--start-seed", type=int, default=0) + parser.add_argument("--shader-dir", type=str, default="default") + args = parser.parse_args() + record_ms2_motion_planned_demonstrations(args) + + +if __name__ == "__main__": + main() diff --git a/mani_skill2/utils/download_asset.py b/mani_skill2/utils/download_asset.py index f051d014..dabefb13 100644 --- a/mani_skill2/utils/download_asset.py +++ b/mani_skill2/utils/download_asset.py @@ -24,19 +24,12 @@ def initialize_sources(): DATA_SOURCES["ycb"] = dict( - url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/mani_skill2_ycb_v2.zip", + url="https://huggingface.co/datasets/haosulab/ManiSkill2/resolve/main/data/mani_skill2_ycb.zip", target_path="mani_skill2_ycb", - checksum="b83afbe9c38a780f5625f2c97afad712db49fcb533d4814ac0c827b0514a504b", + checksum="174001ba1003cc0c5adda6453f4433f55ec7e804f0f0da22d015d525d02262fb", ) DATA_GROUPS["PickSingleYCB-v0"] = ["ycb"] - DATA_SOURCES["egad"] = dict( - url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/mani_skill2_egad_v1.zip", - target_path="mani_skill2_egad", - checksum="4b5d841256e0151c2a615a98d4e92afa12ad6d795e2565b364586e3940d3aa36", - ) - DATA_GROUPS["PickSingleEGAD-v0"] = ["egad"] - DATA_SOURCES["pick_clutter_ycb"] = dict( url="https://storage1.ucsd.edu/datasets/ManiSkill2022-assets/pick_clutter/ycb_train_5k.json.gz", output_dir=ASSET_DIR / "pick_clutter", diff --git a/mani_skill2/utils/download_demo.py b/mani_skill2/utils/download_demo.py index 2f6eddc2..99a821c2 100644 --- a/mani_skill2/utils/download_demo.py +++ b/mani_skill2/utils/download_demo.py @@ -46,14 +46,6 @@ ], latest_version=0, ) -DATASET_SOURCES["PickSingleEGAD-v0"] = dict( - env_type="rigid_body", - object_paths=[ - "rigid_body/PickSingleEGAD-v0/trajectory.h5", - "rigid_body/PickSingleEGAD-v0/trajectory.json", - ], - latest_version=0, -) DATASET_SOURCES["PickSingleYCB-v0"] = dict( env_type="rigid_body", object_paths=[ diff --git a/mani_skill2/utils/scene_builder/ai2thor/constants.py b/mani_skill2/utils/scene_builder/ai2thor/constants.py index 2948194e..a465798d 100644 --- a/mani_skill2/utils/scene_builder/ai2thor/constants.py +++ b/mani_skill2/utils/scene_builder/ai2thor/constants.py @@ -6,6 +6,7 @@ from mani_skill2 import ASSET_DIR + @dataclass class SceneConfig: config_file: str @@ -16,6 +17,8 @@ class SceneConfig: class SceneDataset: metadata_path: str dataset_path: str + + def load_ai2thor_metadata(): with open( str( @@ -137,6 +140,7 @@ def load_ai2thor_metadata(): ] return OBJECT_SEMANTIC_ID_MAPPING, SEMANTIC_ID_OBJECT_MAPPING, MOVEABLE_OBJECT_IDS + # This maps a scene set e.g. ProcTHOR to an adapter, metadata, and where the scenes are saved to. The adapter is a class that can load the scene set SCENE_SOURCE_TO_DATASET: Dict[str, SceneDataset] = { "ProcTHOR": SceneDataset( diff --git a/mani_skill2/utils/scene_builder/ai2thor/scene_builder.py b/mani_skill2/utils/scene_builder/ai2thor/scene_builder.py index a0be52f4..ce7989e1 100644 --- a/mani_skill2/utils/scene_builder/ai2thor/scene_builder.py +++ b/mani_skill2/utils/scene_builder/ai2thor/scene_builder.py @@ -19,11 +19,7 @@ from mani_skill2 import ASSET_DIR from mani_skill2.utils.scene_builder import SceneBuilder -from .constants import ( - load_ai2thor_metadata, - SCENE_SOURCE_TO_DATASET, - SceneConfig, -) +from .constants import SCENE_SOURCE_TO_DATASET, SceneConfig, load_ai2thor_metadata DATASET_CONFIG_DIR = osp.join(osp.dirname(__file__), "metadata") @@ -31,7 +27,13 @@ dict() ) # cached results mapping scene dataset ID to a list of scene configurations -OBJECT_SEMANTIC_ID_MAPPING, SEMANTIC_ID_OBJECT_MAPPING, MOVEABLE_OBJECT_IDS = None, None, None +OBJECT_SEMANTIC_ID_MAPPING, SEMANTIC_ID_OBJECT_MAPPING, MOVEABLE_OBJECT_IDS = ( + None, + None, + None, +) + + class AI2THORBaseSceneBuilder(SceneBuilder): """ The Base AI2THOR scene builder class. Subclasses @@ -41,7 +43,11 @@ class AI2THORBaseSceneBuilder(SceneBuilder): def __init__(self): global OBJECT_SEMANTIC_ID_MAPPING, SEMANTIC_ID_OBJECT_MAPPING, MOVEABLE_OBJECT_IDS - OBJECT_SEMANTIC_ID_MAPPING, SEMANTIC_ID_OBJECT_MAPPING, MOVEABLE_OBJECT_IDS = load_ai2thor_metadata() + ( + OBJECT_SEMANTIC_ID_MAPPING, + SEMANTIC_ID_OBJECT_MAPPING, + MOVEABLE_OBJECT_IDS, + ) = load_ai2thor_metadata() self._scene_configs: List[SceneConfig] = [] if self.scene_dataset not in ALL_SCENE_CONFIGS: dataset_path = SCENE_SOURCE_TO_DATASET[self.scene_dataset].metadata_path diff --git a/mani_skill2/utils/wrappers/record.py b/mani_skill2/utils/wrappers/record.py index c5077cfa..1786aa05 100644 --- a/mani_skill2/utils/wrappers/record.py +++ b/mani_skill2/utils/wrappers/record.py @@ -360,6 +360,7 @@ def close(self) -> None: self.flush_trajectory(ignore_empty_transition=True) if self.clean_on_close: clean_trajectories(self._h5_file, self._json_data) + dump_json(self._json_path, self._json_data, indent=2) self._h5_file.close() if self.save_video: if self.save_on_reset: diff --git a/setup.py b/setup.py index f662796a..622c0740 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name="mani_skill2", - version="0.6.0.dev2", + version="0.6.0.dev3", description="ManiSkill2: A Unified Benchmark for Generalizable Manipulation Skills", long_description=long_description, long_description_content_type="text/markdown",