diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0aa0b03 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.env +AlohaMini.pem \ No newline at end of file diff --git a/docs/remote_simulation_workflow.md b/docs/remote_simulation_workflow.md index ef32fc7..bfaf88c 100644 --- a/docs/remote_simulation_workflow.md +++ b/docs/remote_simulation_workflow.md @@ -5,7 +5,7 @@ This guide outlines how to develop, train, and test AlohaMini using a remote clo ## Architecture * **Cloud Server (The "Lab")**: Runs NVIDIA Isaac Sim. Handles physics, rendering, and training. -* **Local Machine (The "Mission Control")**: Runs the Dashboard and Teleoperation scripts. Connects to the cloud via SSH. +* **Local Machine (The "Mission Control")**: Runs the Teleoperation script with Rerun visualization. Connects to the cloud via SSH. ## Prerequisites @@ -51,23 +51,22 @@ This guide outlines how to develop, train, and test AlohaMini using a remote clo ssh -L 5555:localhost:5555 -L 5556:localhost:5556 ubuntu@ ``` -3. **Launch Dashboard (Local)** - Start the web dashboard to see what the robot sees. +3. **Launch Teleop & Visualization (Local)** + Start the teleop script which includes Rerun for visualization. ```bash # On Local Mac - python software/dashboard/app.py + python software/examples/alohamini/remote_teleop_rerun.py ``` - Open `http://localhost:5001` in your browser. + This will open the Rerun viewer and a terminal interface for control. 4. **Teleoperate & Record** - * Use the Dashboard to see the camera feed. - * Run the teleop script in another terminal to control the robot with your keyboard: - ```bash - python software/examples/alohamini/standalone_teleop.py --ip 127.0.0.1 - ``` - * **To Record**: Click the **"Start Recording"** button on the Dashboard. + * **Controls**: + * WASD: Move Base + * Q/E: Rotate Base + * U/J: Lift Up/Down + * **To Record**: Press **'r'** in the terminal window to toggle recording. * Perform the task (e.g., pick up the object). - * Click **"Stop Recording"**. + * Press **'r'** again to stop recording. * Repeat 50-100 times. The data is saved to `AlohaMini/data_sim/` on the **Cloud Server**. ### Phase 2: Training @@ -102,7 +101,7 @@ Test the trained model in the simulator to see if it works. * *Coming Soon: `eval_sim.py` which loads the safetensor and drives the ZMQ robot.* 3. **Watch (Local)**: - Use the Dashboard to watch the robot perform the task autonomously. + Use `software/examples/alohamini/remote_teleop_rerun.py` (without touching keys) to watch the robot perform the task autonomously. ## Troubleshooting diff --git a/scripts/launch_remote_sim.py b/scripts/launch_remote_sim.py new file mode 100755 index 0000000..9227d5c --- /dev/null +++ b/scripts/launch_remote_sim.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +import subprocess +import sys +import time +import threading +import os +import signal +import re + +# Configuration +PEM_FILE = "AlohaMini.pem" +REMOTE_HOST = "ubuntu@64.181.241.126" +REMOTE_IP = "64.181.241.126" + +# Commands +CMD_SIM_DOCKER = [ + "ssh", "-i", PEM_FILE, REMOTE_HOST, "-t", + 'sudo docker run --name isaac-sim --privileged --entrypoint bash -it --gpus all ' + '-e "ACCEPT_EULA=Y" -e "NVIDIA_VISIBLE_DEVICES=all" -e "NVIDIA_DRIVER_CAPABILITIES=all" ' + '--rm --network=host -v ~/AlohaMini:/isaac-sim/AlohaMini nvcr.io/nvidia/isaac-sim:4.2.0 ' + '-c "cd /isaac-sim && ./python.sh -m pip install \\"numpy<2.0.0\\" opencv-python-headless pyzmq && ' + './python.sh AlohaMini/software/examples/alohamini/isaac_sim/isaac_alohamini_env.py ' + '--/app/livestream/enabled=true --/app/livestream/proto=ws"' +] + +CMD_PORT_FWD = [ + "ssh", "-i", PEM_FILE, "-N", + "-L", "5555:localhost:5555", + "-L", "5556:localhost:5556", + "-L", "8211:localhost:8211", + REMOTE_HOST +] + +CMD_LOCAL_TELEOP = [ + sys.executable, "software/examples/alohamini/remote_teleop_rerun.py" +] + +def stream_reader(pipe, prefix, stop_event, on_match=None, match_pattern=None): + """Reads from a pipe and prints to stdout, optionally triggering a callback on match.""" + try: + for line in iter(pipe.readline, ''): + if stop_event.is_set(): + break + if not line: + break + # Print output with prefix + # Clean up the line: handle \r and whitespace + # Use \r\n to ensure we move to the next line properly + clean_line = line.replace('\r', '').strip() + + if clean_line: + print(f"[{prefix}] {clean_line}\r") + + if match_pattern and on_match: + if match_pattern in line: + on_match() + # We only trigger once + on_match = None + except Exception: + pass + +def cleanup_remote(): + """Stops the remote docker container and ensures no lingering processes.""" + print("Cleaning up remote environment...") + cmd = ["ssh", "-i", PEM_FILE, REMOTE_HOST, "sudo docker stop isaac-sim || true && sudo docker rm isaac-sim || true"] + try: + subprocess.run(cmd, check=False, timeout=10, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + except Exception as e: + print(f"Warning: Remote cleanup failed: {e}") + +def sync_code(): + print("Syncing local code to remote server...") + # Sync software folder to ensure remote has latest changes + # This creates ~/AlohaMini/software if it doesn't exist, or updates it + cmd = ["scp", "-i", PEM_FILE, "-r", "software", f"{REMOTE_HOST}:~/AlohaMini/"] + try: + subprocess.run(cmd, check=True) + print("Sync complete.") + except subprocess.CalledProcessError as e: + print(f"Error syncing code: {e}") + sys.exit(1) + +def main(): + if not os.path.exists(PEM_FILE): + print(f"Error: {PEM_FILE} not found in current directory.") + print("Please run this script from the project root where the .pem file is located.") + sys.exit(1) + + print("Starting Remote Isaac Sim Workflow...") + print("-------------------------------------") + + # Initial cleanup to ensure fresh state + cleanup_remote() + + # Sync code + sync_code() + + # Events and State + stop_event = threading.Event() + sim_ready_event = threading.Event() + + processes = [] + + def on_sim_ready(): + print("\n>>> SIMULATION READY DETECTED! Starting Port Forwarding... <<<\n") + sim_ready_event.set() + + # 1. Start Simulation (Remote Docker) + print(f"Step 1: Launching Isaac Sim container on {REMOTE_HOST}...") + sim_process = subprocess.Popen( + CMD_SIM_DOCKER, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1 + ) + processes.append(sim_process) + + # Start monitoring thread for Sim output + sim_thread = threading.Thread( + target=stream_reader, + args=(sim_process.stdout, "REMOTE", stop_event, on_sim_ready, "Isaac Sim AlohaMini running") + ) + sim_thread.daemon = True + sim_thread.start() + + try: + # Wait for Sim to be ready + print("Waiting for simulation to initialize (this may take a minute)...") + while not sim_ready_event.is_set(): + if sim_process.poll() is not None: + print("Error: Simulation process exited prematurely.") + sys.exit(1) + time.sleep(1) + + # 2. Start Port Forwarding + print("Step 2: Starting SSH Port Forwarding...") + fwd_process = subprocess.Popen( + CMD_PORT_FWD, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + processes.append(fwd_process) + + # Give it a moment to establish + time.sleep(3) + if fwd_process.poll() is not None: + print("Error: Port forwarding failed to start.") + # Check stderr + print(fwd_process.stderr.read().decode()) + # Continue anyway? No, teleop needs it. + sys.exit(1) + else: + print("Port forwarding established.") + + # 3. Start Local Teleop + print("Step 3: Starting Local Teleop...") + print("-------------------------------------") + print("Use the keyboard commands below to control the robot.") + + teleop_process = subprocess.Popen(CMD_LOCAL_TELEOP) + processes.append(teleop_process) + + # Wait for teleop to finish (user quits) + teleop_process.wait() + + except KeyboardInterrupt: + print("\nStopping workflow...") + finally: + stop_event.set() + print("Cleaning up processes...") + + # Kill all started processes + for p in processes: + if p.poll() is None: + p.terminate() + try: + p.wait(timeout=2) + except subprocess.TimeoutExpired: + p.kill() + + # Explicit remote cleanup + cleanup_remote() + + print("Done.") + +if __name__ == "__main__": + main() diff --git a/scripts/setup_remote_sim.sh b/scripts/setup_remote_sim.sh new file mode 100644 index 0000000..1739c3e --- /dev/null +++ b/scripts/setup_remote_sim.sh @@ -0,0 +1,58 @@ +#!/bin/bash +set -e + +# Configuration +REPO_URL="https://github.com/blankey1337/AlohaMini.git" +ISAAC_IMAGE="nvcr.io/nvidia/isaac-sim:2023.1.1" + +echo ">>> Starting Remote Setup..." + +# 1. Install Docker & NVIDIA Container Toolkit if missing +if ! command -v docker > /dev/null 2>&1; then + echo ">>> Installing Docker..." + curl -fsSL https://get.docker.com -o get-docker.sh + sudo sh get-docker.sh + sudo usermod -aG docker "$USER" +fi + +if ! dpkg -l | grep -q nvidia-container-toolkit; then + echo ">>> Installing NVIDIA Container Toolkit..." + curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + sudo apt-get update + sudo apt-get install -y nvidia-container-toolkit + sudo nvidia-ctk runtime configure --runtime=docker + sudo systemctl restart docker +fi + +# 2. Docker Login +if [ -z "$NVIDIA_API_KEY" ]; then + echo ">>> Please export NVIDIA_API_KEY before running this script." + exit 1 +fi + +echo ">>> Logging into NGC..." +echo "$NVIDIA_API_KEY" | sudo docker login nvcr.io -u '$oauthtoken' --password-stdin + +# 3. Clone Repo +if [ -d "AlohaMini" ]; then + if [ -d "AlohaMini/.git" ]; then + echo ">>> AlohaMini already cloned. Pulling latest..." + cd AlohaMini && git pull && cd .. + else + echo ">>> AlohaMini directory exists but is not a git repo. Removing and cloning..." + rm -rf AlohaMini + git clone "$REPO_URL" + fi +else + echo ">>> Cloning AlohaMini..." + git clone "$REPO_URL" +fi + +# 4. Pull Isaac Sim Image +echo ">>> Pulling Isaac Sim Image ($ISAAC_IMAGE)..." +sudo docker pull "$ISAAC_IMAGE" + +echo ">>> Setup Complete!" diff --git a/software/examples/alohamini/__pycache__/remote_teleop_rerun.cpython-310.pyc b/software/examples/alohamini/__pycache__/remote_teleop_rerun.cpython-310.pyc new file mode 100644 index 0000000..d6b8501 Binary files /dev/null and b/software/examples/alohamini/__pycache__/remote_teleop_rerun.cpython-310.pyc differ diff --git a/software/examples/alohamini/evaluate_bi.py b/software/examples/alohamini/evaluate_bi.py index fc8bfa3..0a2e4ca 100644 --- a/software/examples/alohamini/evaluate_bi.py +++ b/software/examples/alohamini/evaluate_bi.py @@ -1,5 +1,12 @@ #!/usr/bin/env python3 +import sys +import os + +# Add repo root to path +repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) +sys.path.append(repo_root) + from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import hw_to_dataset_features from lerobot.policies.act.modeling_act import ACTPolicy diff --git a/software/examples/alohamini/isaac_sim/__pycache__/isaac_alohamini_env.cpython-310.pyc b/software/examples/alohamini/isaac_sim/__pycache__/isaac_alohamini_env.cpython-310.pyc new file mode 100644 index 0000000..d44f032 Binary files /dev/null and b/software/examples/alohamini/isaac_sim/__pycache__/isaac_alohamini_env.cpython-310.pyc differ diff --git a/software/examples/alohamini/isaac_sim/isaac_alohamini_env.py b/software/examples/alohamini/isaac_sim/isaac_alohamini_env.py index 91a912f..5782c85 100644 --- a/software/examples/alohamini/isaac_sim/isaac_alohamini_env.py +++ b/software/examples/alohamini/isaac_sim/isaac_alohamini_env.py @@ -17,9 +17,10 @@ "height": 720, "window_width": 1920, "window_height": 1080, - "headless": False, - "renderer": "RayTracedLighting", + "headless": True, + # "renderer": "RayTracedLighting", # Disable explicit RT to fallback to default (Raster) for stability "display_options": 3286, # Show Grid + "livesync_usd": None, } # Start SimulationApp @@ -32,8 +33,11 @@ from omni.isaac.core import World # noqa: E402 from omni.isaac.core.articulations import ArticulationSubset # noqa: E402 from omni.isaac.core.robots import Robot # noqa: E402 +from omni.isaac.core.prims import XFormPrim from omni.isaac.core.utils.rotations import euler_angles_to_quat # noqa: E402 +from omni.isaac.core.utils.nucleus import get_assets_root_path # noqa: E402 from omni.isaac.sensor import Camera # noqa: E402 +from omni.isaac.core.objects import DynamicCuboid, DynamicSphere, DynamicCylinder, FixedCuboid # noqa: E402 from pxr import Gf, UsdGeom # noqa: F401 # Add repo root to path @@ -98,28 +102,166 @@ def __init__(self, world, urdf_path): self.robot_prim_path = "/World/AlohaMini" self.robot = None self.cameras = {} + self.camera_prims = {} + self.camera_pitch = 0.0 # deg self.setup_scene() + self.setup_environment() - def setup_scene(self): - # Import URDF - from omni.kit.commands import execute - - success, prim_path = execute( - "URDFParseAndImportFile", - urdf_path=self.urdf_path, - import_config={ - "merge_fixed_joints": False, - "fix_base": False, - "make_default_prim": False, - "create_physics_scene": True, - }, + def setup_environment(self): + # Try to load a realistic environment from Nucleus + assets_root_path = get_assets_root_path() + env_loaded = False + + if assets_root_path: + # Common realistic environments + # simple_room_path = assets_root_path + "/Isaac/Environments/Simple_Room/simple_room.usd" + # Or a modern office + env_path = assets_root_path + "/Isaac/Environments/Simple_Room/simple_room.usd" + + try: + # We use add_reference_to_stage to load the USD into the current stage + stage_utils.add_reference_to_stage(env_path, "/World/Environment") + print(f"Loaded environment from {env_path}") + env_loaded = True + + # Move the environment so the robot (at 0,0) isn't inside a table + # The Simple Room often has a table at the center + env_prim = XFormPrim("/World/Environment") + env_prim.set_world_pose(translation=np.array([0.0, 2.0, 0.0])) # Move room 2m to the side + + except Exception as e: + print(f"Failed to load environment asset: {e}") + + if not env_loaded: + print("Nucleus assets not found or failed to load. Using procedural environment.") + # Fallback: Add ground plane and walls + self.world.scene.add_default_ground_plane() + + wall_back = FixedCuboid( + prim_path="/World/WallBack", + name="wall_back", + position=np.array([-0.5, 0.0, 1.0]), + scale=np.array([0.1, 3.0, 2.0]), + color=np.array([0.9, 0.9, 0.9]) + ) + self.world.scene.add(wall_back) + + # Always add the manipulation task elements (Desk + Objects) + # We might need to adjust their position if the room has walls at 0,0 + + # 1. Desk (In front of robot) + # Shift desk forward (positive x) so robot doesn't spawn inside it or too close + # Lower the desk to ~15cm height so it's visible to the low-mounted camera + desk_height = 0.15 + desk_z = desk_height / 2.0 + + desk = FixedCuboid( + prim_path="/World/Desk", + name="desk", + position=np.array([1.5, 0.0, desk_z]), # Moved from 1.0 to 1.5 + scale=np.array([0.5, 1.0, desk_height]), + color=np.array([0.4, 0.3, 0.2]) + ) + self.world.scene.add(desk) + + # 2. Objects on Desk + # Ball + ball = DynamicSphere( + prim_path="/World/Ball", + name="ball", + position=np.array([1.5, 0.2, desk_height + 0.04]), # Adjusted z to sit on desk + radius=0.04, + color=np.array([0.8, 0.1, 0.1]), + mass=0.1 + ) + self.world.scene.add(ball) + + # Notepad + notepad = DynamicCuboid( + prim_path="/World/Notepad", + name="notepad", + position=np.array([1.5, -0.2, desk_height + 0.01]), # Adjusted z + scale=np.array([0.15, 0.2, 0.02]), + color=np.array([0.9, 0.9, 0.9]), + mass=0.1 + ) + self.world.scene.add(notepad) + + # Pencil + pencil = DynamicCylinder( + prim_path="/World/Pencil", + name="pencil", + position=np.array([1.5, -0.2, desk_height + 0.02]), # Adjusted z + radius=0.005, + height=0.15, + color=np.array([0.9, 0.8, 0.1]), + mass=0.02, + orientation=euler_angles_to_quat(np.radians(np.array([0, 90, 0]))) # Rotate to lay flat ) + self.world.scene.add(pencil) + + # 3. Walls (Moved to fallback) + + # Add a DistantLight to ensure the scene is lit even if Environment fails or is dark + # This is critical for headless rendering without RayTracing + from omni.isaac.core.prims import XFormPrim + from pxr import UsdLux + + # Create a distant light + stage = self.world.stage + light_prim_path = "/World/defaultLight" + if not stage.GetPrimAtPath(light_prim_path): + light = UsdLux.DistantLight.Define(stage, light_prim_path) + light.CreateIntensityAttr(3000) # High intensity for raster + light.CreateAngleAttr(0.53) + light.CreateColorAttr(Gf.Vec3f(1.0, 1.0, 1.0)) + + # Orient it to shine down and forward + xform = UsdGeom.Xformable(light) + # Rotate around X to shine down + xform.AddRotateXOp().Set(-60) + + + def setup_scene(self): + # Import URDF using omni.importer.urdf API directly for 4.x compatibility + # The command interface changed between 2023.x and 4.x + + import omni.kit.commands + + # In Isaac Sim 4.0+, we can often use the URDFImporter extension directly + # or use the command with updated arguments. + # But to be robust, let's use the extension interface if possible, or try the command with correct args. + + # Try the command with the new signature if known, or use the lower level API. + # The error showed: import_robot(self, assetRoot, assetName, robot, importConfig, ...) + # We need a UrdfRobot object if we use that low level function. + + # Easiest path: Use the high-level 'URDFParseAndImportFile' command but ensure arguments are correct. + # The error suggests the command wrapper is failing to map arguments correctly. + + # Let's try direct Extension API usage which bypasses the Command wrapper ambiguity. + from omni.importer.urdf import _urdf + + urdf_interface = _urdf.acquire_urdf_interface() + + import_config = _urdf.ImportConfig() + import_config.merge_fixed_joints = False + import_config.fix_base = False + import_config.make_default_prim = False + import_config.create_physics_scene = True + + root_path = os.path.dirname(self.urdf_path) + file_name = os.path.basename(self.urdf_path) - if success: - # Move to correct location - # The importer might put it at /alohamini, we want it at /World/AlohaMini usually or just reference it - # Let's assume it imported to the stage root with the robot name + # 1. Parse + imported_robot = urdf_interface.parse_urdf(root_path, file_name, import_config) + + # 2. Import + dest_path = "" # Import to root or default prim + prim_path = urdf_interface.import_robot(root_path, file_name, imported_robot, import_config, dest_path) + + if prim_path: pass else: print(f"Failed to import URDF from {self.urdf_path}") @@ -128,18 +270,86 @@ def setup_scene(self): # Find the robot prim (assuming name 'alohamini' from URDF) # We wrap it in an Articulation self.robot = Robot(prim_path="/alohamini", name="alohamini") + + # Explicitly set the robot position to avoid floor clipping + # Wheels are at Z=0.05 with radius 0.05, so Z=0 should be fine, but let's lift it 1cm to be safe. + self.robot.set_world_pose(position=np.array([0.0, 0.0, 0.01])) + self.world.scene.add(self.robot) # Add Cameras - self.add_camera("head_front", "/alohamini/base_link/front_cam", np.array([0.2, 0, 0.2]), np.array([0, 0, 0])) - self.add_camera("head_top", "/alohamini/base_link/top_cam", np.array([0, 0, 0.5]), np.array([0, 90, 0])) + # 1. Robot-mounted + self.add_camera("head_front", "/alohamini/base_link/front_cam", np.array([0.2, 0, 0.2]), np.array([0., 0., 0.])) + # Top cam: Positioned higher (0.8m) and looking down (90 deg pitch) + self.add_camera("head_top", "/alohamini/base_link/top_cam", np.array([0, 0, 0.8]), np.array([0., 90., 0.])) + + # 2. Third-person (World fixed) + # Back view (Driver's view) + self.add_camera("cam_high_back", "/World/CamBack", np.array([-1.2, 0, 1.2]), np.array([0., 25., 0.]), resolution=(640, 480)) + # Front view (Looking at robot) + self.add_camera("cam_front_view", "/World/CamFront", np.array([2.5, 0, 1.0]), np.array([0., 15., 180.]), resolution=(640, 480)) + + def configure_controllers(self): + # Configure joint drives (stiffness/damping) + # Especially for lift_axis to prevent falling + # This must be done after the robot is added to the scene and reset + + # Initialize the robot view to ensure internal handles are created + self.robot.initialize() + + # We need dof indices to set gains correctly + # Initialize indices immediately + # Check if num_dof is valid (it might be 0 if not initialized) + if self.robot.num_dof > 0: + if hasattr(self.robot, "dof_names"): + # Isaac Sim 4.x + self.dof_names = self.robot.dof_names + else: + # Older versions + self.dof_names = [self.robot.get_dof_name(i) for i in range(self.robot.num_dof)] + + self.dof_indices = {name: i for i, name in enumerate(self.dof_names)} + + # Default gains + kps = np.ones(self.robot.num_dof) * 1000.0 + kds = np.ones(self.robot.num_dof) * 100.0 + + # Higher gains for lift to fight gravity + if "lift_axis" in self.dof_indices: + lift_idx = self.dof_indices["lift_axis"] + kps[lift_idx] = 10000.0 + kds[lift_idx] = 1000.0 + + # Try to set gains using the ArticulationController (standard way) + try: + controller = self.robot.get_articulation_controller() + controller.set_gains(kps, kds) + except Exception as e: + print(f"Failed to set gains via ArticulationController: {e}") + print("Available methods on Robot:", dir(self.robot)) + # Attempt direct property setting if controller fails (fallback for some versions) + try: + # Check for direct attribute access (PhysX bindings) + # This is highly version dependent + pass + except: + pass + else: + print("Warning: Robot has 0 DOFs or is not initialized correctly.") + self.dof_names = [] + self.dof_indices = {} - def add_camera(self, name, prim_path, translation, rotation_euler_deg): + def add_camera(self, name, prim_path, translation, rotation_euler_deg, resolution=(640, 480)): + # Ensure float type for in-place addition + rotation_euler_deg = rotation_euler_deg.astype(float) + # Apply domain randomization to camera position # Small random perturbation to translation (+- 2cm) and rotation (+- 2 deg) # This helps the model become robust to slight calibration errors in the real world - translation += np.random.uniform(-0.02, 0.02, size=3) - rotation_euler_deg += np.random.uniform(-2, 2, size=3) + # Only randomize if it's a robot camera (heuristic: name starts with head) + if name.startswith("head"): + translation += np.random.uniform(-0.02, 0.02, size=3) + rotation_euler_deg += np.random.uniform(-2, 2, size=3) # rotation in sim is usually quaternion # rotation_euler_deg: [x, y, z] @@ -149,11 +359,24 @@ def add_camera(self, name, prim_path, translation, rotation_euler_deg): prim_path=prim_path, position=translation, frequency=30, - resolution=(640, 480), + resolution=resolution, orientation=rot_quat ) camera.initialize() + + # Set clipping range to avoid near plane clipping objects + # Especially critical for the front camera which is close to the desk + # Default is often 0.1 or 1.0 which is too large for manipulation + camera.set_clipping_range(0.01, 100.0) + self.cameras[name] = camera + + # We used to store XFormPrim here for cheating camera pose, but we removed it + # because the real robot has fixed cameras. + # self.camera_prims[name] = XFormPrim(prim_path) + + # Removed set_camera_pose because real robot cameras are fixed. + # To fix orientation issues, we should adjust the rotation in add_camera. def set_joint_positions(self, joint_positions: dict): # joint_positions: dict of joint_name -> position @@ -166,7 +389,10 @@ def set_joint_positions(self, joint_positions: dict): current_joint_pos = self.robot.get_joint_positions() # This requires known order. Let's build a map once initialized if not hasattr(self, "dof_indices"): - self.dof_names = [self.robot.get_dof_name(i) for i in range(self.robot.num_dof)] + if hasattr(self.robot, "dof_names"): + self.dof_names = self.robot.dof_names + else: + self.dof_names = [self.robot.get_dof_name(i) for i in range(self.robot.num_dof)] self.dof_indices = {name: i for i, name in enumerate(self.dof_names)} # Construct target array @@ -181,9 +407,32 @@ def set_joint_positions(self, joint_positions: dict): self.robot.set_joint_positions(target_pos) def set_base_velocity(self, vx, vy, vtheta): - # Set root velocity - # chassis frame: x forward, y left - self.robot.set_linear_velocity(np.array([vx, vy, 0])) + # Set root velocity in ROBOT frame + # We need to rotate vx, vy by the robot's current yaw + + # 1. Get current pose + pose = self.robot.get_world_pose() + # pose[0] is position, pose[1] is quaternion [w, x, y, z] + quat = pose[1] + + # 2. Convert to rotation matrix or just get yaw + # A simple way for 2D rotation: + # q = [w, x, y, z] + w, x, y, z = quat + # Yaw (rotation around Z) calculation from quaternion + # yaw = atan2(2(wz + xy), 1 - 2(y^2 + z^2)) + siny_cosp = 2 * (w * z + x * y) + cosy_cosp = 1 - 2 * (y * y + z * z) + yaw = np.arctan2(siny_cosp, cosy_cosp) + + # 3. Rotate velocity vector + # World VX = vx * cos(yaw) - vy * sin(yaw) + # World VY = vx * sin(yaw) + vy * cos(yaw) + + world_vx = vx * np.cos(yaw) - vy * np.sin(yaw) + world_vy = vx * np.sin(yaw) + vy * np.cos(yaw) + + self.robot.set_linear_velocity(np.array([world_vx, world_vy, 0])) self.robot.set_angular_velocity(np.array([0, 0, vtheta])) def get_observations(self): @@ -192,7 +441,12 @@ def get_observations(self): # Joints joint_pos = self.robot.get_joint_positions() if not hasattr(self, "dof_names"): - self.dof_names = [self.robot.get_dof_name(i) for i in range(self.robot.num_dof)] + # For Isaac Sim 4.x, dof_names is a property, usually on the Articulation view + if hasattr(self.robot, "dof_names"): + self.dof_names = self.robot.dof_names + else: + # Fallback for older versions or if wrapped differently + self.dof_names = [self.robot.get_dof_name(i) for i in range(self.robot.num_dof)] for i, name in enumerate(self.dof_names): obs[f"{name}.pos"] = float(joint_pos[i]) @@ -205,23 +459,31 @@ def get_observations(self): # Cameras for name, cam in self.cameras.items(): - if cam.is_new_frame_available(): + # In Isaac Sim 4.x, just try to get the frame. + # Some older methods like is_new_frame_available are deprecated/removed. + + # Use get_rgba() directly. It should return None or old frame if not ready. + # Or better, check render_product availability if needed, but simple access is usually safe. + try: rgba = cam.get_rgba()[:, :, :3] # Drop alpha - # Convert to BGR for compatibility with cv2/existing pipeline if needed, - # but existing pipeline seems to just encode to jpg. - # cv2 uses BGR, isaac returns RGB. bgr = cv2.cvtColor(rgba, cv2.COLOR_RGB2BGR) obs[name] = bgr + except Exception: + # Frame might not be ready yet + pass return obs def main(): world = World(stage_units_in_meters=1.0) - world.scene.add_default_ground_plane() + # world.scene.add_default_ground_plane() # Moved to setup_environment to avoid conflict with loaded rooms aloha = IsaacAlohaMini(world, URDF_PATH) world.reset() + + # Configure after reset to ensure physics handles are valid + aloha.configure_controllers() # ZMQ Setup # Ports from standalone_sim.py @@ -231,19 +493,23 @@ def main(): context = zmq.Context() socket_pub = context.socket(zmq.PUB) socket_pub.setsockopt(zmq.CONFLATE, 1) - socket_pub.bind(f"tcp://*:{PORT_OBS}") + socket_pub.bind(f"tcp://0.0.0.0:{PORT_OBS}") socket_sub = context.socket(zmq.PULL) socket_sub.setsockopt(zmq.CONFLATE, 1) - socket_sub.bind(f"tcp://*:{PORT_CMD}") + socket_sub.bind(f"tcp://0.0.0.0:{PORT_CMD}") print(f"Isaac Sim AlohaMini running. Ports: OBS={PORT_OBS}, CMD={PORT_CMD}") + sys.stdout.flush() recorder = DatasetRecorder(root_dir="data_sim") while simulation_app.is_running(): world.step(render=True) + if world.current_time_step_index % 100 == 0: + print(f"Sim Step: {world.current_time_step_index}, Time: {world.current_time}") + if not world.is_playing(): continue @@ -269,6 +535,7 @@ def main(): # Reset print("Resetting robot...") world.reset() + aloha.configure_controllers() # Re-apply gains joint_cmds = {name: 0.0 for name in aloha.dof_names} aloha.set_joint_positions(joint_cmds) continue @@ -276,6 +543,12 @@ def main(): if k.endswith(".pos"): joint_name = k.replace(".pos", "") joint_cmds[joint_name] = v + + # Mirror gripper commands for the second finger + if joint_name == "arm_left_gripper": + joint_cmds["arm_left_gripper_2"] = v + elif joint_name == "arm_right_gripper": + joint_cmds["arm_right_gripper_2"] = v elif k == "x.vel": vx = v elif k == "y.vel": @@ -283,7 +556,8 @@ def main(): elif k == "theta.vel": vth = v elif k == "lift_axis.height_mm": - joint_cmds["lift_axis"] = v + # Convert mm to meters + joint_cmds["lift_axis"] = v / 1000.0 aloha.set_joint_positions(joint_cmds) aloha.set_base_velocity(vx, vy, vth) @@ -315,6 +589,10 @@ def main(): for k, v in obs.items(): if isinstance(v, np.ndarray): # Is image + # Debug print for black image check + if np.max(v) == 0: + print(f"Warning: Camera {k} is returning all black pixels!") + ret, buffer = cv2.imencode(".jpg", v, [int(cv2.IMWRITE_JPEG_QUALITY), 90]) if ret: encoded_obs[k] = base64.b64encode(buffer).decode("utf-8") diff --git a/software/examples/alohamini/raise_the_roof.py b/software/examples/alohamini/raise_the_roof.py new file mode 100644 index 0000000..1a15c65 --- /dev/null +++ b/software/examples/alohamini/raise_the_roof.py @@ -0,0 +1,215 @@ +import argparse +import time +import math +import xml.etree.ElementTree as ET +import numpy as np +import rerun as rr +from scipy.spatial.transform import Rotation as R +import os + +def parse_urdf(urdf_path): + tree = ET.parse(urdf_path) + root = tree.getroot() + + links = {} + joints = [] + + for link in root.findall('link'): + name = link.get('name') + visual = link.find('visual') + geometry_info = None + color_info = None + origin_info = None + + if visual is not None: + geo = visual.find('geometry') + if geo is not None: + box = geo.find('box') + cylinder = geo.find('cylinder') + if box is not None: + geometry_info = {'type': 'box', 'size': [float(x) for x in box.get('size').split()]} + elif cylinder is not None: + geometry_info = {'type': 'cylinder', 'length': float(cylinder.get('length')), 'radius': float(cylinder.get('radius'))} + + mat = visual.find('material') + if mat is not None: + color_info = mat.get('name') # Simplified + + orig = visual.find('origin') + if orig is not None: + xyz = [float(x) for x in orig.get('xyz', '0 0 0').split()] + rpy = [float(x) for x in orig.get('rpy', '0 0 0').split()] + origin_info = {'xyz': xyz, 'rpy': rpy} + else: + origin_info = {'xyz': [0,0,0], 'rpy': [0,0,0]} + + links[name] = { + 'visual': geometry_info, + 'color': color_info, + 'visual_origin': origin_info + } + + for joint in root.findall('joint'): + name = joint.get('name') + type_ = joint.get('type') + parent = joint.find('parent').get('link') + child = joint.find('child').get('link') + + origin = joint.find('origin') + xyz = [float(x) for x in origin.get('xyz', '0 0 0').split()] if origin is not None else [0,0,0] + rpy = [float(x) for x in origin.get('rpy', '0 0 0').split()] if origin is not None else [0,0,0] + + axis_elem = joint.find('axis') + axis = [float(x) for x in axis_elem.get('xyz', '1 0 0').split()] if axis_elem is not None else [1,0,0] + + limit = joint.find('limit') + limits = None + if limit is not None: + limits = (float(limit.get('lower', -3.14)), float(limit.get('upper', 3.14))) + + joints.append({ + 'name': name, + 'type': type_, + 'parent': parent, + 'child': child, + 'xyz': xyz, + 'rpy': rpy, + 'axis': axis, + 'limits': limits + }) + + return links, joints + +def get_transform(xyz, rpy): + rot = R.from_euler('xyz', rpy).as_matrix() + T = np.eye(4) + T[:3, :3] = rot + T[:3, 3] = xyz + return T + +def visualize(urdf_path): + print("Starting visualize...") + rr.init("raise_the_roof", spawn=True) + print("Rerun initialized.") + + links, joints = parse_urdf(urdf_path) + + # Build tree structure + joint_map = {} + for j in joints: + p = j['parent'] + if p not in joint_map: + joint_map[p] = [] + joint_map[p].append(j) + + t = 0.0 + + while True: + # "Raise the roof" motion + # Cycle time: 1 second up, 1 second down + cycle = math.sin(t * 8) # Speed up a bit more + + # Lift Axis: Oscillate between 0.1 and 0.5 + # Range of motion: 0.4 + # Center: 0.3 + lift_height = 0.3 + 0.2 * cycle + + # Static Hands Up Pose ("Goal Post" / "Surrender" pose) + # Arms out to the sides, forearms pointing up. + + # Shoulder Pan: Out to sides (+/- 90 degrees) + left_pan = 1.57 + right_pan = -1.57 + + # Shoulder Lift: Horizontal (0.0) + shoulder_lift_angle = 0.0 + + # Elbow: Bent 90 degrees to point forearms up + elbow_angle = 1.57 + + joint_values = {} + for j in joints: + name = j['name'] + + # Prismatic Lift Axis + if name == 'lift_axis': + joint_values[name] = lift_height + + elif 'shoulder_lift' in name: + joint_values[name] = shoulder_lift_angle + + elif 'elbow_flex' in name: + joint_values[name] = elbow_angle + + elif 'shoulder_pan' in name: + if 'left' in name: + joint_values[name] = left_pan + else: + joint_values[name] = right_pan + + elif 'wrist' in name: + joint_values[name] = 0.0 + + elif j['type'] == 'prismatic' and name != 'lift_axis': + joint_values[name] = 0.0 + elif j['type'] not in ['revolute', 'continuous', 'prismatic']: + joint_values[name] = 0.0 + elif name not in joint_values: + joint_values[name] = 0.0 + + # FK + stack = [('base_link', np.eye(4))] + + while stack: + link_name, T_parent = stack.pop() + + # Log link visual + link_data = links.get(link_name) + if link_data and link_data['visual']: + v_orig = link_data['visual_origin'] + T_visual_offset = get_transform(v_orig['xyz'], v_orig['rpy']) + T_visual = T_parent @ T_visual_offset + + rr.set_time_seconds("sim_time", t) + + trans = T_visual[:3, 3] + rot = R.from_matrix(T_visual[:3, :3]).as_quat() # xyzw + + entity_path = f"robot/{link_name}" + + geo = link_data['visual'] + if geo['type'] == 'box': + rr.log(entity_path, rr.Boxes3D(half_sizes=[s/2 for s in geo['size']], centers=[0,0,0]), rr.Transform3D(translation=trans, rotation=rr.Quaternion(xyzw=rot))) + elif geo['type'] == 'cylinder': + rr.log(entity_path, rr.Boxes3D(half_sizes=[geo['radius'], geo['radius'], geo['length']/2], centers=[0,0,0]), rr.Transform3D(translation=trans, rotation=rr.Quaternion(xyzw=rot))) + + # Children + children_joints = joint_map.get(link_name, []) + for j in children_joints: + T_static = get_transform(j['xyz'], j['rpy']) + + T_joint = np.eye(4) + if j['type'] in ['revolute', 'continuous']: + angle = joint_values.get(j['name'], 0) + axis = np.array(j['axis']) + rot_j = R.from_rotvec(axis * angle).as_matrix() + T_joint[:3, :3] = rot_j + elif j['type'] == 'prismatic': + dist = joint_values.get(j['name'], 0) + axis = np.array(j['axis']) + T_joint[:3, 3] = axis * dist + + T_child = T_parent @ T_static @ T_joint + stack.append((j['child'], T_child)) + + t += 0.05 + time.sleep(0.05) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Path relative to this script + default_urdf = os.path.join(os.path.dirname(__file__), "../../src/lerobot/robots/alohamini/alohamini.urdf") + parser.add_argument("--urdf", default=default_urdf) + args = parser.parse_args() + + visualize(args.urdf) diff --git a/software/examples/alohamini/remote_teleop_rerun.py b/software/examples/alohamini/remote_teleop_rerun.py new file mode 100644 index 0000000..3e32e2e --- /dev/null +++ b/software/examples/alohamini/remote_teleop_rerun.py @@ -0,0 +1,281 @@ +import argparse +import base64 +import json +import select +import sys +import termios +import threading +import time +import tty +import os + +import cv2 +import numpy as np +import zmq + +# Add repo root to path +repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../..")) +sys.path.append(repo_root) + +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data +import rerun as rr + +# --- Config --- +CMD_PORT = 5555 +OBS_PORT = 5556 +DEFAULT_IP = "127.0.0.1" + +msg = """ +Remote Teleop & Rerun Visualization +----------------------------------- +Moving around: + q w e + a s d + z x c + +w/x : increase/decrease linear x speed (Forward/Backward) +a/d : increase/decrease linear y speed (Left/Right) +q/e : increase/decrease angular speed (Rotate Left/Right) + +u/j : increase/decrease lift height + +Arm Control (Left): +z/x : Shoulder Pan (+/-) +c/v : Shoulder Lift (+/-) +b/n : Elbow Flex (+/-) +m/, : Gripper (Open/Close) + +r : Toggle Recording (Start/Stop) + +space, k : force stop +CTRL-C to quit +""" + +# Key mappings +MOVE_BINDINGS = { + 'w': ('x.vel', 0.05), + 's': ('x.vel', -0.05), + 'a': ('y.vel', 0.05), + 'd': ('y.vel', -0.05), + 'q': ('theta.vel', 1.0), # Reduced rotation speed for better control + 'e': ('theta.vel', -1.0), +} + +LIFT_BINDINGS = { + 'u': ('lift_axis.height_mm', 2.0), + 'j': ('lift_axis.height_mm', -2.0), +} + +# Arm Control (Left Arm mainly for now) +# Joint names from URDF: +# arm_left_shoulder_pan, arm_left_shoulder_lift, arm_left_elbow_flex, +# arm_left_wrist_flex, arm_left_wrist_roll, arm_left_gripper +ARM_BINDINGS = { + # Shoulder Pan + 'z': ('arm_left_shoulder_pan.pos', 0.1), + 'x': ('arm_left_shoulder_pan.pos', -0.1), + # Shoulder Lift + 'c': ('arm_left_shoulder_lift.pos', 0.1), + 'v': ('arm_left_shoulder_lift.pos', -0.1), + # Elbow + 'b': ('arm_left_elbow_flex.pos', 0.1), + 'n': ('arm_left_elbow_flex.pos', -0.1), + # Gripper + 'm': ('arm_left_gripper.pos', 0.1), + ',': ('arm_left_gripper.pos', -0.1), +} + +STOP_KEYS = [' ', 'k'] + +def getKey(): + tty.setraw(sys.stdin.fileno()) + rlist, _, _ = select.select([sys.stdin], [], [], 0.1) + if rlist: + key = sys.stdin.read(1) + else: + key = '' + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings) + return key + +def limit(val, min_val, max_val): + return max(min(val, max_val), min_val) + +def zmq_listener(ip, port, stop_event): + context = zmq.Context() + socket = context.socket(zmq.SUB) + socket.setsockopt(zmq.SUBSCRIBE, b"") + socket.connect(f"tcp://{ip}:{port}") + socket.setsockopt(zmq.CONFLATE, 1) + + print(f"Listening for observations on {ip}:{port}...") + + while not stop_event.is_set(): + try: + if socket.poll(timeout=100): + msg = socket.recv_string() + data = json.loads(msg) + + obs_decoded = {} + for k, v in data.items(): + # Heuristic to detect base64 encoded images + if isinstance(v, str) and len(v) > 1000: + try: + img_bytes = base64.b64decode(v) + nparr = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + if img is not None: + # Convert BGR to RGB for Rerun + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # Prefix with "camera/" to match expected Rerun hierarchy if not already + if not k.startswith("camera/"): + # Rename keys like 'head_top' -> 'camera/head_top' + obs_decoded[f"camera/{k}"] = img + # Optional: Clear the old key to avoid duplication if it exists (though here we're building a new dict) + # Rerun persists entities, so we need to manually clear them if we want them gone from the viewer + rr.log(f"observation.{k}", rr.Clear(recursive=False)) + else: + obs_decoded[k] = img + except Exception: + obs_decoded[k] = v + else: + obs_decoded[k] = v + + log_rerun_data(observation=obs_decoded) + + except Exception as e: + # print(f"Error in ZMQ listener: {e}") + pass + + socket.close() + context.term() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--ip", default=DEFAULT_IP, help="IP address of the robot/simulation") + args = parser.parse_args() + + global settings + settings = termios.tcgetattr(sys.stdin) + + # Init Rerun + init_rerun(session_name="alohamini_remote_teleop") + + # Start ZMQ Listener Thread + stop_event = threading.Event() + listener_thread = threading.Thread(target=zmq_listener, args=(args.ip, OBS_PORT, stop_event), daemon=True) + listener_thread.start() + + # Command Socket + context = zmq.Context() + cmd_socket = context.socket(zmq.PUSH) + cmd_socket.setsockopt(zmq.CONFLATE, 1) + cmd_socket.connect(f"tcp://{args.ip}:{CMD_PORT}") + + print(f"Connected to command port {CMD_PORT} at {args.ip}") + + # State + target_state = { + "x.vel": 0.0, + "y.vel": 0.0, + "theta.vel": 0.0, + "lift_axis.height_mm": 0.0, + # Initialize arm joints to 0 + "arm_left_shoulder_pan.pos": 0.0, + "arm_left_shoulder_lift.pos": 0.0, + "arm_left_elbow_flex.pos": 0.0, + "arm_left_wrist_flex.pos": 0.0, + "arm_left_wrist_roll.pos": 0.0, + "arm_left_gripper.pos": 0.0, + } + + is_recording = False + + try: + print(msg) + while True: + key = getKey() + + if key == '\x03': # CTRL-C + break + + # Recording Toggle + if key == 'p': # Changed from 'r' to 'p' for record to free up 'r' for roll + is_recording = not is_recording + cmd_key = "start_recording" if is_recording else "stop_recording" + cmd_socket.send_string(json.dumps({cmd_key: True})) + print(f"Recording: {is_recording}") + rr.log("is_recording", rr.Scalars(1.0 if is_recording else 0.0)) + continue + + # Reset + if key == '0': + print("Sending RESET command...") + cmd_socket.send_string(json.dumps({"reset": True})) + # Reset local state too + target_state = { + "x.vel": 0.0, + "y.vel": 0.0, + "theta.vel": 0.0, + "lift_axis.height_mm": 0.0, + "arm_left_shoulder_pan.pos": 0.0, + "arm_left_shoulder_lift.pos": 0.0, + "arm_left_elbow_flex.pos": 0.0, + "arm_left_wrist_flex.pos": 0.0, + "arm_left_wrist_roll.pos": 0.0, + "arm_left_gripper.pos": 0.0, + } + continue + + # Movement + if key in MOVE_BINDINGS: + attr, val = MOVE_BINDINGS[key] + target_state[attr] += val + print(f"Cmd: {attr} = {target_state[attr]:.2f}") + + elif key in LIFT_BINDINGS: + attr, val = LIFT_BINDINGS[key] + target_state[attr] += val + print(f"Lift: {target_state[attr]:.2f}") + + elif key in ARM_BINDINGS: + attr, val = ARM_BINDINGS[key] + target_state[attr] += val + print(f"Arm: {attr} = {target_state[attr]:.2f}") + + elif key in STOP_KEYS: + target_state["x.vel"] = 0.0 + target_state["y.vel"] = 0.0 + target_state["theta.vel"] = 0.0 + print("STOPPED") + + # Limits + target_state["x.vel"] = limit(target_state["x.vel"], -0.5, 0.5) + target_state["y.vel"] = limit(target_state["y.vel"], -0.5, 0.5) + # target_state["theta.vel"] = limit(target_state["theta.vel"], -5.0, 5.0) + + # Clamp lift to valid range (0 to 600mm) + target_state["lift_axis.height_mm"] = limit(target_state["lift_axis.height_mm"], 0.0, 600.0) + + # Send Command + cmd_socket.send_string(json.dumps(target_state)) + + # Log Action to Rerun + log_rerun_data(action=target_state) + + except Exception as e: + print(e) + + finally: + stop_event.set() + # Stop robot + final_stop = {k: 0.0 for k in target_state} + final_stop["lift_axis.height_mm"] = target_state["lift_axis.height_mm"] + cmd_socket.send_string(json.dumps(final_stop)) + + cmd_socket.close() + context.term() + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings) + +if __name__ == "__main__": + main() diff --git a/software/examples/debug/visualize_urdf.py b/software/examples/debug/visualize_urdf.py index 94f03d5..cf82408 100644 --- a/software/examples/debug/visualize_urdf.py +++ b/software/examples/debug/visualize_urdf.py @@ -87,7 +87,9 @@ def get_transform(xyz, rpy): return T def visualize(urdf_path): + print("Starting visualize...") rr.init("urdf_visualizer", spawn=True) + print("Rerun initialized.") links, joints = parse_urdf(urdf_path) @@ -176,6 +178,7 @@ def visualize(urdf_path): stack.append((j['child'], T_child)) t += 0.05 + print(f"Looping {t}") time.sleep(0.05) if __name__ == "__main__": diff --git a/software/src/lerobot/robots/alohamini/alohamini.urdf b/software/src/lerobot/robots/alohamini/alohamini.urdf index a42d401..261c5bd 100644 --- a/software/src/lerobot/robots/alohamini/alohamini.urdf +++ b/software/src/lerobot/robots/alohamini/alohamini.urdf @@ -17,6 +17,11 @@ + + + + + @@ -32,7 +37,7 @@ - + @@ -41,11 +46,17 @@ + + + + + + - + @@ -57,11 +68,17 @@ + + + + + + - + @@ -73,6 +90,12 @@ + + + + + + @@ -81,7 +104,6 @@ - @@ -91,6 +113,12 @@ + + + + + + @@ -100,6 +128,11 @@ + + + + + @@ -107,6 +140,12 @@ + + + + + + @@ -129,6 +168,9 @@ + + + @@ -140,6 +182,7 @@ + @@ -155,6 +198,10 @@ + + + + @@ -170,6 +217,10 @@ + + + + @@ -180,7 +231,14 @@ - + + + + + + + + @@ -192,25 +250,58 @@ - - + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -227,6 +318,9 @@ + + + @@ -238,6 +332,7 @@ + @@ -253,6 +348,10 @@ + + + + @@ -268,6 +367,10 @@ + + + + @@ -278,7 +381,14 @@ - + + + + + + + + @@ -290,25 +400,58 @@ - - + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + \ No newline at end of file diff --git a/software/src/lerobot/robots/alohamini/meshes/gripper_base.stl b/software/src/lerobot/robots/alohamini/meshes/gripper_base.stl new file mode 100644 index 0000000..5e3ff90 Binary files /dev/null and b/software/src/lerobot/robots/alohamini/meshes/gripper_base.stl differ diff --git a/software/src/lerobot/robots/alohamini/meshes/gripper_finger.stl b/software/src/lerobot/robots/alohamini/meshes/gripper_finger.stl new file mode 100644 index 0000000..78d8cc4 Binary files /dev/null and b/software/src/lerobot/robots/alohamini/meshes/gripper_finger.stl differ diff --git a/software/src/lerobot/robots/alohamini/meshes/wrist_flex.stl b/software/src/lerobot/robots/alohamini/meshes/wrist_flex.stl new file mode 100644 index 0000000..076ed76 Binary files /dev/null and b/software/src/lerobot/robots/alohamini/meshes/wrist_flex.stl differ