diff --git a/.gitignore b/.gitignore index 73cfe4b8..e1da9df7 100644 --- a/.gitignore +++ b/.gitignore @@ -132,8 +132,8 @@ data kujiale_data interiornav_data images/ -internutopia -internutopia_extension +# internutopia +# internutopia_extension *.pyc pre-commit* diff --git a/internnav/agent/base.py b/internnav/agent/base.py index 44b28ff7..42bf6c70 100644 --- a/internnav/agent/base.py +++ b/internnav/agent/base.py @@ -22,6 +22,8 @@ def register(cls, agent_type: str): """ def decorator(agent_class): + if agent_type in cls.agents: + raise ValueError(f"Agent {agent_type} already registered.") cls.agents[agent_type] = agent_class return decorator diff --git a/internnav/agent/cma_agent.py b/internnav/agent/cma_agent.py index d77ad5b8..d7c8788e 100644 --- a/internnav/agent/cma_agent.py +++ b/internnav/agent/cma_agent.py @@ -5,10 +5,9 @@ from gym import spaces from internnav.agent.base import Agent +from internnav.agent.utils.common import batch_obs, set_seed_model from internnav.configs.agent import AgentCfg from internnav.configs.model.base_encoders import ModelCfg -from internnav.evaluator.utils.common import set_seed_model -from internnav.evaluator.utils.models import batch_obs from internnav.model import get_config, get_policy @@ -22,6 +21,7 @@ class CmaAgent(Agent): ) def __init__(self, agent_config: AgentCfg): + super().__init__(agent_config) self._model_settings = ModelCfg(**agent_config.model_settings) model_settings = self._model_settings @@ -119,7 +119,7 @@ def inference(self, obs): dtype=torch.bool, ) end = time.time() - print(f'CmaAgent step time: {round(end-start,4)}s') + print(f'CmaAgent step time: {round(end-start, 4)}s') return actions.cpu().numpy().tolist() def step(self, obs): @@ -127,5 +127,12 @@ def step(self, obs): start = time.time() action = self.inference(obs) end = time.time() - print(f'Time: {round(end-start,4)}s') - return action + print(f'Time: {round(end-start, 4)}s') + + # convert from [[x],[y]] to [{'action': [x],'ideal_flag':True}, {'action': [y],'ideal_flag':True}] + actions = [] + for a in action: + if not isinstance(a, list): + a = [a] + actions.append({'action': a, 'ideal_flag': True}) + return actions diff --git a/internnav/agent/rdp_agent.py b/internnav/agent/rdp_agent.py index bb2dd542..c3dc6678 100644 --- a/internnav/agent/rdp_agent.py +++ b/internnav/agent/rdp_agent.py @@ -1,4 +1,3 @@ -import random import time import numpy as np @@ -6,9 +5,9 @@ from gym import spaces from internnav.agent.base import Agent +from internnav.agent.utils.common import batch_obs, set_seed_model from internnav.configs.agent import AgentCfg from internnav.configs.model.base_encoders import ModelCfg -from internnav.evaluator.utils.models import batch_obs from internnav.model import get_config, get_policy from internnav.model.basemodel.LongCLIP.model import longclip from internnav.model.basemodel.rdp.utils import ( @@ -25,17 +24,9 @@ extract_image_features, extract_instruction_tokens, ) -from internnav.utils import common_log_util from internnav.utils.common_log_util import common_logger as log -def set_random_seed(seed): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - @Agent.register('rdp') class RdpAgent(Agent): observation_space = spaces.Box( @@ -47,7 +38,7 @@ class RdpAgent(Agent): def __init__(self, config: AgentCfg): super().__init__(config) - set_random_seed(0) + set_seed_model(0) self._model_settings = self.config.model_settings self._model_settings = ModelCfg(**self._model_settings) env_num = getattr(self._model_settings, 'env_num', 1) @@ -348,5 +339,12 @@ def step(self, obs): start = time.time() action = self.inference(obs) end = time.time() - print(f'总时间: {round(end-start,4)}s') - return action + print(f'总时间: {round(end-start, 4)}s') + + # convert from [[a1],[a2]] to [{'action': [a1],'ideal_flag':True}, {'action': [a2],'ideal_flag':True}] + actions = [] + for a in action: + if not isinstance(a, list): + a = [a] + actions.append({'action': a, 'ideal_flag': True}) + return actions diff --git a/internnav/evaluator/utils/models.py b/internnav/agent/utils/common.py similarity index 76% rename from internnav/evaluator/utils/models.py rename to internnav/agent/utils/common.py index 0239fc3b..872ecd8c 100644 --- a/internnav/evaluator/utils/models.py +++ b/internnav/agent/utils/common.py @@ -1,11 +1,25 @@ from collections import defaultdict from typing import DefaultDict, List, Optional +import numpy as np import torch from .tensor_dict import TensorDict +def set_seed_model(seed): + import random + + import torch + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = False + + def batch_obs( observations, device: Optional[torch.device] = None, diff --git a/internnav/evaluator/utils/tensor_dict.py b/internnav/agent/utils/tensor_dict.py similarity index 100% rename from internnav/evaluator/utils/tensor_dict.py rename to internnav/agent/utils/tensor_dict.py diff --git a/internnav/configs/evaluator/__init__.py b/internnav/configs/evaluator/__init__.py index 8017c327..ab770c50 100644 --- a/internnav/configs/evaluator/__init__.py +++ b/internnav/configs/evaluator/__init__.py @@ -56,11 +56,12 @@ class EvalDatasetCfg(BaseModel): class EvalCfg(BaseModel): + eval_type: Optional[str] = None + eval_settings: Optional[Dict[str, Any]] = {} agent: Optional[AgentCfg] = None env: EnvCfg task: TaskCfg dataset: EvalDatasetCfg - eval_settings: Optional[Dict[str, Any]] = {} __all__ = [ diff --git a/internnav/dist.py b/internnav/dist.py deleted file mode 100644 index 0519ecba..00000000 --- a/internnav/dist.py +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/internnav/env/__init__.py b/internnav/env/__init__.py index e4c22ee7..798723ec 100644 --- a/internnav/env/__init__.py +++ b/internnav/env/__init__.py @@ -1,5 +1,4 @@ from internnav.env.base import Env -from internnav.env.vln_pe_env import VlnPeEnv -from internnav.env.vln_multi_env import VlnMultiEnv +from internnav.env.internutopia_env import InternutopiaEnv -__all__ = ['Env', 'VlnPeEnv', 'VlnMultiEnv'] +__all__ = ['Env', 'InternutopiaEnv'] diff --git a/internnav/env/base.py b/internnav/env/base.py index 05a32825..a5b9d6ee 100644 --- a/internnav/env/base.py +++ b/internnav/env/base.py @@ -39,6 +39,8 @@ def register(cls, env_type: str): """ def decorator(env_class): + if env_type in cls.envs: + raise ValueError(f"Env {env_type} already registered.") cls.envs[env_type] = env_class return decorator diff --git a/internnav/env/vln_pe_env.py b/internnav/env/internutopia_env.py similarity index 63% rename from internnav/env/vln_pe_env.py rename to internnav/env/internutopia_env.py index fc6645e1..ad7fcf1d 100644 --- a/internnav/env/vln_pe_env.py +++ b/internnav/env/internutopia_env.py @@ -1,20 +1,24 @@ from typing import Any, Dict, List -from internutopia.core.config import Config, SimConfig -from internutopia.core.config.distribution import RayDistributionCfg -from internutopia.core.vec_env import Env - from internnav.configs.evaluator import EnvCfg, TaskCfg from internnav.env import base -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import ( - VLNEvalTaskCfg, -) -from internnav.projects.internutopia_vln_extension import import_extensions -@base.Env.register('vln_pe') -class VlnPeEnv(base.Env): +@base.Env.register('internutopia') +class InternutopiaEnv(base.Env): def __init__(self, env_config: EnvCfg, task_config: TaskCfg): + try: + from internutopia.core.config import Config, SimConfig + from internutopia.core.config.distribution import RayDistributionCfg + from internutopia.core.vec_env import Env + + from internnav.env.utils.internutopia_extension import import_extensions + except ImportError as e: + raise RuntimeError( + "InternUtopia modules could not be imported. " + "Make sure both repositories are installed and on PYTHONPATH." + ) from e + super().__init__(env_config, task_config) env_settings = self.env_config.env_settings task_settings = self.task_config.task_settings @@ -25,8 +29,10 @@ def __init__(self, env_config: EnvCfg, task_config: TaskCfg): task_configs=task_settings['episodes'], ) if 'distribution_config' in env_settings: - distribution_config=RayDistributionCfg(**env_settings['distribution_config']) + distribution_config = RayDistributionCfg(**env_settings['distribution_config']) config = config.distribute(distribution_config) + + # register all extensions import_extensions() self.env = Env(config) diff --git a/internnav/env/realworld_agilex_env.py b/internnav/env/realworld_agilex_env.py new file mode 100644 index 00000000..d00c2435 --- /dev/null +++ b/internnav/env/realworld_agilex_env.py @@ -0,0 +1,85 @@ +import threading +import time + +from internnav.env import Env +from internnav.env.utils.agilex_extensions.cam import AlignedRealSense +from internnav.env.utils.agilex_extensions.control import DiscreteRobotController + + +@Env.register('realworld') +class RealWorldEnv(Env): + def __init__( + self, + fps: int = 30, + duration: float = 0.5, + distance: float = 0.25, + angle: int = 15, + turn_speed: float = 0.5, + move_speed: float = 0.3, + ): + + self.node = DiscreteRobotController() + self.cam = AlignedRealSense() + self.latest_obs = None + self.lock = threading.Lock() + self.stop_flag = threading.Event() + self.fps = fps + + # 启动相机 + self.cam.start() + # 启动采集线程 + self.thread = threading.Thread(target=self._capture_loop, daemon=True) + self.thread.start() + + # control setting + self.duration = duration + self.distance = distance + self.angle = angle + self.turn_speed = turn_speed # rad/s + self.move_speed = move_speed # m/s + + def reverse(self): + self.distance = -self.distance + + def _capture_loop(self): + """keep capturing frames""" + interval = 1.0 / self.fps + while not self.stop_flag.is_set(): + t0 = time.time() + try: + obs = self.cam.get_observation(timeout_ms=1000) + with self.lock: + self.latest_obs = obs + except Exception as e: + print("Camera capture failed:", e) + time.sleep(0.05) + dt = time.time() - t0 + if dt < interval: + time.sleep(interval - dt) + + def get_observation(self): + """return most recent frame""" + with self.lock: + return self.latest_obs + + def step(self, action: int): + """ + action: + 0: stand still + 1: move forward + 2: turn left + 3: turn right + """ + if action == 0: + self.node.stand_still(self.duration) + elif action == 1: + self.node.move_feedback(self.distance, self.move_speed) + elif action == 2: + self.node.turn(self.angle, self.turn_speed) + elif action == 3: + self.node.turn(self.angle, -self.turn_speed) + + def close(self): + self.stop_flag.set() + self.thread.join(timeout=1.0) + self.cam.stop() diff --git a/internnav/projects/internutopia_vln_extension/configs/__init__.py b/internnav/env/utils/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/__init__.py rename to internnav/env/utils/__init__.py diff --git a/internnav/env/utils/agilex_extensions/cam.py b/internnav/env/utils/agilex_extensions/cam.py new file mode 100644 index 00000000..b76caa68 --- /dev/null +++ b/internnav/env/utils/agilex_extensions/cam.py @@ -0,0 +1,122 @@ +# aligned_realsense.py +import time +from typing import Dict, Optional, Tuple + +import cv2 +import numpy as np +import pyrealsense2 as rs +from save_obs import save_obs + + +class AlignedRealSense: + def __init__( + self, + serial_no: Optional[str] = None, + color_res: Tuple[int, int, int] = (640, 480, 30), # (w,h,fps) + depth_res: Tuple[int, int, int] = (640, 480, 30), + warmup_frames: int = 15, + ): + self.serial_no = serial_no + self.color_res = color_res + self.depth_res = depth_res + self.warmup_frames = warmup_frames + + self.pipeline: Optional[rs.pipeline] = None + self.align: Optional[rs.align] = None + self.depth_scale: Optional[float] = None + self.started = False + + def start(self): + if self.started: + return + self.pipeline = rs.pipeline() + cfg = rs.config() + if self.serial_no: + cfg.enable_device(self.serial_no) + + cw, ch, cfps = self.color_res + dw, dh, dfps = self.depth_res + + # open stream for color and depth + cfg.enable_stream(rs.stream.color, cw, ch, rs.format.bgr8, cfps) + cfg.enable_stream(rs.stream.depth, dw, dh, rs.format.z16, dfps) + + profile = self.pipeline.start(cfg) + + # 深度缩放(将 z16 转米) + depth_sensor = profile.get_device().first_depth_sensor() + self.depth_scale = float(depth_sensor.get_depth_scale()) + + # align to color + self.align = rs.align(rs.stream.color) + + # warm up + for _ in range(self.warmup_frames): + self.pipeline.wait_for_frames() + + # align check + frames = self.pipeline.wait_for_frames() + frames = self.align.process(frames) + color = frames.get_color_frame() + depth = frames.get_depth_frame() + assert color and depth, "warm up align failed" + rgb = np.asanyarray(color.get_data()) + depth_raw = np.asanyarray(depth.get_data()) + if depth_raw.shape != rgb.shape[:2]: + depth_raw = cv2.resize(depth_raw, (rgb.shape[1], rgb.shape[0]), interpolation=cv2.INTER_NEAREST) + self.started = True + + def stop(self): + if self.pipeline: + self.pipeline.stop() + self.pipeline = None + self.started = False + + def __enter__(self): + self.start() + return self + + def __exit__(self, et, ev, tb): + self.stop() + + def get_observation(self, timeout_ms: int = 1000) -> Dict: + """ + Returns: + { + "rgb": uint8[H,W,3] (BGR), + "depth": float32[H,W] (meters), + "timestamp_s": float + } + """ + if not self.started: + self.start() + + frames = self.pipeline.wait_for_frames(timeout_ms) + frames = self.align.process(frames) + + color = frames.get_color_frame() + depth = frames.get_depth_frame() + if not color or not depth: + raise RuntimeError("can not align color/depth frame") + + bgr = np.asanyarray(color.get_data()) # HxWx3, uint8 (BGR) + rgb = bgr[..., ::-1] # HxWx3, uint8 (convert to RGB) + depth_raw = np.asanyarray(depth.get_data()) # HxW, uint16 + if depth_raw.shape != rgb.shape[:2]: + # Extreme fallback (theoretically should be consistent after alignment). + depth_raw = cv2.resize(depth_raw, (rgb.shape[1], rgb.shape[0]), interpolation=cv2.INTER_NEAREST) + + depth_m = depth_raw.astype(np.float32) * float(self.depth_scale) + ts_ms = color.get_timestamp() or frames.get_timestamp() + ts_s = float(ts_ms) / 1000.0 if ts_ms is not None else time.time() + + return {"rgb": rgb, "depth": depth_m, "timestamp_s": ts_s} + + +if __name__ == "__main__": + with AlignedRealSense(serial_no=None) as cam: + obs = cam.get_observation() + print("RGB:", obs["rgb"].shape, obs["rgb"].dtype) + print("Depth:", obs["depth"].shape, obs["depth"].dtype, "(meters)") + meta = save_obs(obs, outdir="./captures", prefix="rs") + print("Saved:", meta) diff --git a/internnav/env/utils/agilex_extensions/control.py b/internnav/env/utils/agilex_extensions/control.py new file mode 100644 index 00000000..062a13c4 --- /dev/null +++ b/internnav/env/utils/agilex_extensions/control.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +import math + +import rospy +from geometry_msgs.msg import Twist +from nav_msgs.msg import Odometry +from tf.transformations import euler_from_quaternion + + +class Turn90Degrees: + def __init__(self): + rospy.init_node('turn_90_degrees_node', anonymous=True) + + self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) + + self.odom_sub = rospy.Subscriber('/ranger_base_node/odom', Odometry, self.odom_callback) + + self.current_yaw = 0.0 + self.start_yaw = None + self.turning = False + self.turn_angle = math.radians(90) # angle + self.angular_speed = -0.2 # dirention and speed + self.rate = rospy.Rate(10) # 10Hz + + def odom_callback(self, msg): + # Get the yaw angle from a quaternion. + orientation = msg.pose.pose.orientation + quaternion = [orientation.x, orientation.y, orientation.z, orientation.w] + _, _, yaw = euler_from_quaternion(quaternion) + + self.current_yaw = yaw + + # initialize the start yaw + if self.start_yaw is None and not self.turning: + self.start_yaw = yaw + rospy.loginfo(f"start yaw: {math.degrees(self.start_yaw):.2f}") + + # --- position (new) + p = msg.pose.pose.position + self.current_xy = (p.x, p.y) + + def execute_turn(self): + if self.start_yaw is None: + rospy.loginfo("wait for init yaw state...") + return False + + if not self.turning: + self.turning = True + rospy.loginfo("start to turn") + + # compute turned angle + current_angle = self.current_yaw - self.start_yaw + + # normalize the angle + if current_angle > math.pi: + current_angle -= 2 * math.pi + elif current_angle < -math.pi: + current_angle += 2 * math.pi + + # compute remaining angle + remaining_angle = self.turn_angle - abs(current_angle) + + # create Twist msg + twist = Twist() + + # if not reach the goal angle, keep turning + if remaining_angle > 0.05: # allow some diff (about 2.86 degree) + twist.angular.z = self.angular_speed * min(1.0, remaining_angle * 6) + print(f"twist.angular.z {twist.angular.z} remaining_angle {remaining_angle}") + self.cmd_vel_pub.publish(twist) + return False + else: + twist.angular.z = 0.0 + self.cmd_vel_pub.publish(twist) + rospy.loginfo(f"finish turn, final yaw: {math.degrees(self.current_yaw):.2f}") + return True + + def run(self): + while not rospy.is_shutdown(): + if self.execute_turn(): + rospy.loginfo("Task finished") + break + self.rate.sleep() + + +class DiscreteRobotController(Turn90Degrees): + """ + Extends Turn90Degree to allow discrete step-based control. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) # initialize parent class + + def stand_still(self, duration=0.2): + twist = Twist() + twist.linear.x = 0.0 + twist.angular.z = 0.0 + self.cmd_vel_pub.publish(twist) + rospy.sleep(duration) # Maintain stand still for a short duration + rospy.loginfo("Stand still command executed.") + + def move_forward(self, distance=0.5, speed=0.2): + twist = Twist() + twist.linear.x = speed # Forward speed + twist.angular.z = 0.0 + duration = distance / twist.linear.x # Time to move forward the specified distance + + rospy.loginfo(f"Moving forward for {duration:.2f} seconds.") + end_time = rospy.Time.now() + rospy.Duration(duration) + + while rospy.Time.now() < end_time and not rospy.is_shutdown(): + self.cmd_vel_pub.publish(twist) + self.rate.sleep() + + # self.stand_still() # Stop after moving forward + rospy.loginfo("Move forward command executed.") + + # ---- NEW: feedback-controlled forward motion + + +def move_feedback(self, distance=0.25, speed=0.5, tol=0.02, timeout=None): + """ + 里程计闭环直线移动:支持正/负 distance。 + - distance: 目标路程(米)。>0 前进,<0 倒车。 + - speed: 名义线速度(m/s),可给正/负,最终会按正数取绝对值。 + - tol: 终止距离容差(米) + - timeout: 超时(秒);默认 max(3*|distance|/speed, 3.0) + """ + # 等待位姿 + while self.current_xy is None and not rospy.is_shutdown(): + rospy.loginfo_throttle(2.0, "Waiting for /odom...") + self.rate.sleep() + if rospy.is_shutdown(): + return + + # 方向与目标 + direction = 1.0 if distance >= 0.0 else -1.0 + target = abs(distance) + speed = abs(speed) if speed is not None else 0.5 + + start_xy = self.current_xy + start_time = rospy.Time.now() + + if timeout is None: + timeout = max(3.0, 3.0 * (target / max(speed, 0.05))) + + twist = Twist() + + # 简单 P 控制,越靠近越慢 + Kp = 1.5 + min_speed = 0.06 # 防止轮子停转 + + rospy.loginfo( + f"move_linear: target={target:.3f} m, dir={'forward' if direction>0 else 'backward'}, " + f"speed≈{speed:.2f} m/s, tol={tol:.3f} m" + ) + + try: + while not rospy.is_shutdown(): + # 超时 + if (rospy.Time.now() - start_time).to_sec() > timeout: + rospy.logwarn("move_linear timeout reached; stopping.") + break + + # 走过的距离(欧式距离,不区分前后,目标用 abs) + cx, cy = self.current_xy + sx, sy = start_xy + traveled = math.hypot(cx - sx, cy - sy) + remaining = target - traveled + + # 达标退出 + if remaining <= tol: + rospy.loginfo(f"Reached distance: traveled={traveled:.3f} m (tol {tol} m)") + break + + # 速度控制(带方向) + v = Kp * remaining + v = max(min(v, speed), min_speed) # [min_speed, speed] + twist.linear.x = direction * v # 关键:按方向加符号 + twist.angular.z = 0.0 + self.cmd_vel_pub.publish(twist) + + rospy.loginfo_throttle( + 1.0, f"traveled={traveled:.3f} m, remaining={remaining:.3f} m, v={twist.linear.x:.2f} m/s" + ) + self.rate.sleep() + + finally: + # 停车 + twist.linear.x = 0.0 + twist.angular.z = 0.0 + self.cmd_vel_pub.publish(twist) + + rospy.loginfo("Move linear command executed.") + + def turn(self, angle=15, speed=0.5): + self.turn_angle = math.radians(angle) # update angle + self.angular_speed = speed # Set positive angular speed for left turn + self.start_yaw = None # Reset start yaw to current position + self.turning = False # Reset turning flag + self.run() + rospy.loginfo("Turn left command executed.") + + +if __name__ == '__main__': + try: + control = DiscreteRobotController() + control.turn(15, 0.5) # left + control.turn(15, -0.5) # right + control.move_feedback(0.25, 0.5) + control.move_feedback(-0.25, 0.5) + + except rospy.ROSInterruptException: + pass diff --git a/internnav/env/utils/agilex_extensions/save_obs.py b/internnav/env/utils/agilex_extensions/save_obs.py new file mode 100644 index 00000000..adb1d8bd --- /dev/null +++ b/internnav/env/utils/agilex_extensions/save_obs.py @@ -0,0 +1,152 @@ +# save_obs.py +import json +import os +import time +from datetime import datetime +from pathlib import Path + +import cv2 +import numpy as np + + +def save_obs( + obs: dict, + outdir: str = "./captures", + prefix: str = None, + max_depth_m: float = 3.0, + save_rgb: bool = True, + save_depth_16bit: bool = True, + save_depth_vis: bool = True, +): + """ + save obs = {"rgb": HxWx3 uint8 (BGR), "depth": HxW float32 (m), "timestamp_s": float, "intrinsics": {...}} + """ + Path(outdir).mkdir(parents=True, exist_ok=True) + + ts = float(obs.get("timestamp_s", time.time())) + stamp = datetime.fromtimestamp(ts).strftime("%Y%m%d_%H%M%S_%f") + prefix = prefix or f"{stamp}" + + rgb = obs.get("rgb", None) + depth_m = obs.get("depth", None) + + # 1) save RGB(BGR,cv2) + rgb_path = None + if save_rgb and rgb is not None: + rgb_path = os.path.join(outdir, f"{prefix}_rgb.jpg") + cv2.imwrite(rgb_path, rgb) + + # 2) save 16-bit depth(unit: mm) + depth16_path = None + vis_path = None + if depth_m is not None and (save_depth_16bit or save_depth_vis): + d = np.nan_to_num(depth_m, nan=0.0, posinf=0.0, neginf=0.0) # 清 NaN/Inf + if save_depth_16bit: + depth_mm = np.clip(np.round(d * 1000.0), 0, 65535).astype(np.uint16) + depth16_path = os.path.join(outdir, f"{prefix}_depth_mm.png") + cv2.imwrite(depth16_path, depth_mm) + + # 3) save depth vis + if save_depth_vis: + d_clip = np.clip(d, 0.0, max_depth_m) + # Brighten the near field: first normalize to 0–255, then invert and apply a colormap. + d_norm = (d_clip / max_depth_m * 255.0).astype(np.uint8) + depth_color = cv2.applyColorMap(255 - d_norm, cv2.COLORMAP_JET) + vis_path = os.path.join(outdir, f"{prefix}_depth_vis.png") + cv2.imwrite(vis_path, depth_color) + + # 4) meta info + meta = { + "timestamp_s": ts, + "paths": { + "rgb": rgb_path, + "depth_mm": depth16_path, + "depth_vis": vis_path, + }, + "intrinsics": obs.get("intrinsics", {}), + "notes": "depth_mm.png 是以毫米存储的 16-bit PNG, depth_vis.png 仅用于可视化。", + } + with open(os.path.join(outdir, f"{prefix}_meta.json"), "w") as f: + json.dump(meta, f, indent=2, ensure_ascii=False) + + return meta + + +# load_obs.py +from glob import glob +from typing import Dict, List, Optional, Tuple + + +def _resolve(base: str, p: Optional[str]) -> Optional[str]: + if not p: + return None + return p if os.path.isabs(p) else os.path.abspath(os.path.join(base, p)) + + +def load_obs_from_meta(meta_path: str, nan_for_zeros: bool = False) -> Dict: + """ + 读取由 save_obs() 生成的 meta.json, 并还原为 obs dict: + { + "rgb": uint8[H,W,3] (BGR), + "depth": float32[H,W] (meters) 或 None, + "timestamp_s": float, + "intrinsics": dict + } + + Args: + meta_path: *_meta.json 路径 + nan_for_zeros: 若为 True,则把深度为 0 的像素转为 NaN(便于下游遮挡处理) + """ + meta_path = os.path.abspath(meta_path) + base = os.path.dirname(os.path.dirname(meta_path)) + with open(meta_path, "r") as f: + meta = json.load(f) + + paths = meta.get("paths", {}) + rgb_path = _resolve(base, paths.get("rgb")) + depth_mm_path = _resolve(base, paths.get("depth_mm")) + + # 读 RGB(保存时就是 BGR,OpenCV 读回来仍是 BGR) + rgb = None + if rgb_path and os.path.exists(rgb_path): + rgb = cv2.imread(rgb_path, cv2.IMREAD_COLOR) # uint8, HxWx3 (BGR) + + # 读深度(16-bit 毫米 PNG → 米) + depth = None + if depth_mm_path and os.path.exists(depth_mm_path): + depth_mm = cv2.imread(depth_mm_path, cv2.IMREAD_UNCHANGED) # uint16 + if depth_mm is None: + raise RuntimeError(f"Failed to read depth image: {depth_mm_path}") + if depth_mm.dtype != np.uint16: + raise ValueError(f"Depth image must be uint16 (mm). Got {depth_mm.dtype}") + depth = depth_mm.astype(np.float32) / 1000.0 # meters + if nan_for_zeros: + depth[depth_mm == 0] = np.nan + + # 尺寸一致性检查(若两者都有) + if rgb is not None and depth is not None and depth.shape != rgb.shape[:2]: + raise ValueError(f"Shape mismatch: rgb {rgb.shape[:2]} vs depth {depth.shape}. " "确保保存前已对齐(align 到 color)。") + + obs = { + "rgb": rgb, + "depth": depth, + "timestamp_s": float(meta.get("timestamp_s", 0.0)), + "intrinsics": meta.get("intrinsics", {}), + } + return obs + + +def load_all_obs_in_dir(captures_dir: str, pattern: str = "*_meta.json", sort: bool = True) -> List[Tuple[str, Dict]]: + """ + 批量读取目录下所有 meta,返回 [(meta_path, obs), ...] + """ + metas = glob(os.path.join(captures_dir, pattern)) + if sort: + metas.sort() + result = [] + for m in metas: + try: + result.append((m, load_obs_from_meta(m))) + except Exception as e: + print(f"[warn] skip {m}: {e}") + return result diff --git a/internnav/env/utils/agilex_extensions/stream.py b/internnav/env/utils/agilex_extensions/stream.py new file mode 100644 index 00000000..cfbad940 --- /dev/null +++ b/internnav/env/utils/agilex_extensions/stream.py @@ -0,0 +1,191 @@ +# stream_server.py +import threading +import time +from typing import Optional + +import cv2 +import numpy as np +from flask import Flask, Response, make_response, stream_with_context + +app = Flask(__name__) + +_env = None +_stop = threading.Event() # graceful stop for Ctrl+C + + +def set_env(env): + """set env from main to stream server""" + global _env + _env = env + + +_instruction = "" +_instruction_lock = threading.Lock() + + +def set_instruction(text: str) -> None: + """ + Set the instruction text that will be displayed on the viewer page. + Thread-safe; can be called from your main/control loop at any time. + """ + global _instruction + with _instruction_lock: + _instruction = str(text) if text is not None else "" + + +def get_instruction() -> str: + """ + Get the current instruction text (thread-safe). + """ + with _instruction_lock: + return _instruction + + +def _encode_jpeg(frame_bgr: np.ndarray, quality: int = 80) -> Optional[bytes]: + ok, jpg = cv2.imencode(".jpg", frame_bgr, [cv2.IMWRITE_JPEG_QUALITY, quality]) + return jpg.tobytes() if ok else None + + +def _mjpeg_generator(jpeg_quality: int = 80, fps_limit: float = 30.0): + boundary = b"--frame" + min_interval = 1.0 / fps_limit if fps_limit > 0 else 0.0 + + last = 0.0 + while not _stop.is_set(): + if _env is None: + time.sleep(0.05) + continue + + obs = _env.get_observation() + if not obs or "rgb" not in obs: + time.sleep(0.01) + continue + + frame = obs["rgb"] # Expect BGR for OpenCV; convert if your source is RGB + if frame is None: + time.sleep(0.01) + continue + + # If your env returns RGB, uncomment the next line: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + + jpg = _encode_jpeg(frame, quality=jpeg_quality) + if jpg is None: + continue + + # multipart chunk + yield ( + boundary + b"\r\n" + b"Content-Type: image/jpeg\r\n" + b"Cache-Control: no-cache\r\n" + b"Pragma: no-cache\r\n\r\n" + jpg + b"\r\n" + ) + + # simple pacing (prevents 100% CPU + helps Ctrl+C responsiveness) + now = time.time() + sleep_t = min_interval - (now - last) + if sleep_t > 0: + time.sleep(sleep_t) + last = now + + +@app.route("/stream") +def stream(): + # stream_with_context ensures Flask doesn’t buffer the generator + gen = stream_with_context(_mjpeg_generator()) + resp = Response(gen, mimetype="multipart/x-mixed-replace; boundary=frame", direct_passthrough=True) + # extra headers to avoid caching + resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + resp.headers["Pragma"] = "no-cache" + resp.headers["Connection"] = "keep-alive" + return resp + + +@app.route("/instruction") +def instruction(): + text = get_instruction() + r = make_response(text) + r.headers["Content-Type"] = "text/plain; charset=utf-8" + r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + r.headers["Pragma"] = "no-cache" + return r + + +@app.route("/") +def index(): + # simple viewer page with instruction overlay + html = """ + + MJPEG Stream + + +
+ stream +
Instruction:(none)
+
+ + """ + r = make_response(html) + r.headers["Cache-Control"] = "no-cache" + return r + + +def _handle_sigint(sig, frame): + _stop.set() + + +# start the HTTP server in a background thread (non-blocking) +def run_server(env, host, port): + # make env accessible to the server + set_env(env) + # IMPORTANT: disable reloader so it doesn't spawn another process + app.run(host="0.0.0.0", port=8080, threaded=True, use_reloader=False) + + +def run(host="0.0.0.0", port=8080, env=None): + server_thread = threading.Thread(target=run_server, args=(env, host, port), daemon=True) + server_thread.start() + time.sleep(0.3) # tiny delay to let the server bind the port + print("--- stream app is running on http://0.0.0.0:8080 ---") + + +if __name__ == "__main__": + # Example: plug your env here before run() + # set_env(MyEnv()) + run() diff --git a/internnav/projects/habitat_extensions/evaluator_single.py b/internnav/env/utils/habitat_extensions/evaluator_single.py similarity index 94% rename from internnav/projects/habitat_extensions/evaluator_single.py rename to internnav/env/utils/habitat_extensions/evaluator_single.py index da2a1574..24d95ea4 100644 --- a/internnav/projects/habitat_extensions/evaluator_single.py +++ b/internnav/env/utils/habitat_extensions/evaluator_single.py @@ -21,7 +21,6 @@ FogOfWarConfig, TopDownMapMeasurementConfig, ) -from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower from habitat.utils.visualizations.utils import images_to_video, observations_to_image from habitat_baselines.config.default import get_config as get_habitat_config from omegaconf import OmegaConf @@ -38,7 +37,7 @@ split_and_clean, traj_to_actions, ) -from internnav.utils.dist import * +from internnav.utils.dist import get_rank, get_world_size, init_distributed_mode DEFAULT_IMAGE_TOKEN = "" @@ -113,7 +112,7 @@ def __init__( self.model = model self.processor = processor - prompt = f"You are an autonomous navigation assistant. Your task is to . Where should you go next to stay on track? Please output the next waypoint\'s coordinates in the image. Please output STOP when you have successfully completed the task." + prompt = "You are an autonomous navigation assistant. Your task is to . Where should you go next to stay on track? Please output the next waypoint\'s coordinates in the image. Please output STOP when you have successfully completed the task." answer = "" self.conversation = [{"from": "human", "value": prompt}, {"from": "gpt", "value": answer}] @@ -277,7 +276,7 @@ def run_single_eval(self): self.config.habitat.simulator.agents.main_agent.sim_sensors.rgb_sensor ) sucs, spls, oss, nes = [], [], [], [] - done_res = [] + if True: # fixme scenes_keys = list(sorted(self.scene_episode_dict.keys())) @@ -303,7 +302,7 @@ def run_single_eval(self): ) # episode.instruction.instruction_text if 'objectnav' not in self.config_path else episode.object_category print("episode start", episode_instruction) - episode_id = int(episode.episode_id) + # episode_id = int(episode.episode_id) env = self.env env.current_episode = episode observations = env.reset() @@ -316,7 +315,7 @@ def run_single_eval(self): transformation_matrix[:3, :3] = rotation_matrix transformation_matrix[:3, 3] = translation - agent = ShortestPathFollower(env.sim, 0.25, False) + # agent = ShortestPathFollower(env.sim, 0.25, False) os.makedirs(os.path.join(self.output_path, f'check_sim_{self.epoch}'), exist_ok=True) @@ -328,17 +327,17 @@ def run_single_eval(self): initial_height = env.sim.get_agent_state().position[1] rgb_list = [] - depth_list = [] + # depth_list = [] action_seq = [] - past_key_values = None + # past_key_values = None output_ids = None goal = None action = None - look_down_observations = None - look_down_id_list = [] + # look_down_observations = None + # look_down_id_list = [] messages = [] - last_action = None + # last_action = None local_actions = [] # begin evaluation main loop @@ -354,7 +353,7 @@ def run_single_eval(self): agent_state = env.sim.get_agent_state() height = agent_state.position[1] - initial_height # Habitat GPS makes west negative, so flip y camera_position = np.array([x, -y, self._camera_height + height]) - robot_xy = camera_position[:2] + # robot_xy = camera_position[:2] # tf_camera_to_episodic = self.xyz_yaw_to_tf_matrix(camera_position, camera_yaw) @ self.get_axis_align_matrix() tf_camera_to_episodic = ( self.xyz_yaw_pitch_to_tf_matrix(camera_position, camera_yaw, np.deg2rad(30)) @@ -362,7 +361,7 @@ def run_single_eval(self): ) image = Image.fromarray(rgb).convert('RGB') # raw observation image - image_size = image.size # 640*480 + # image_size = image.size # 640*480 save_raw_image = image.copy() if action == 5: @@ -378,7 +377,7 @@ def run_single_eval(self): target_width=224, ) look_down_depth = torch.as_tensor(np.ascontiguousarray(look_down_depth)).float() # [H, W] - ### depth clip to 5m + # depth clip to 5m look_down_depth[look_down_depth > 5.0] = 5.0 else: image = image.resize((self.args.resize_w, self.args.resize_h)) @@ -400,7 +399,7 @@ def run_single_eval(self): target_width=224, ) look_down_depth = torch.as_tensor(np.ascontiguousarray(look_down_depth)).float() # [H, W] - ### depth clip to 5m + # depth clip to 5m look_down_depth[look_down_depth > 5.0] = 5.0 env.step(4) @@ -440,7 +439,7 @@ def run_single_eval(self): assert action == 5 # last action is look down sources = [{"from": "human", "value": ""}, {"from": "gpt", "value": ""}] input_images += [look_down_image] - messages.append({'role': 'assistant', 'content': [{'type': 'text', 'text': llm_outputs}]}) + # messages.append({'role': 'assistant', 'content': [{'type': 'text', 'text': llm_outputs}]}) input_img_id = -1 prompt = random.choice(self.conjunctions) + DEFAULT_IMAGE_TOKEN @@ -498,12 +497,12 @@ def run_single_eval(self): with torch.no_grad(): traj_latents = self.model.generate_latents(output_ids, pixel_values, image_grid_thw) - image_dp = torch.tensor(np.array(look_down_image.resize((224, 224)))).to(torch.bfloat16) - pix_goal_image = copy.copy(image_dp) - images_dp = torch.stack([pix_goal_image, image_dp]).unsqueeze(0) - depth_dp = look_down_depth.unsqueeze(-1).to(torch.bfloat16) - pix_goal_depth = copy.copy(depth_dp) - depths_dp = torch.stack([pix_goal_depth, depth_dp]).unsqueeze(0) + # image_dp = torch.tensor(np.array(look_down_image.resize((224, 224)))).to(torch.bfloat16) + # pix_goal_image = copy.copy(image_dp) + # images_dp = torch.stack([pix_goal_image, image_dp]).unsqueeze(0) + # depth_dp = look_down_depth.unsqueeze(-1).to(torch.bfloat16) + # pix_goal_depth = copy.copy(depth_dp) + # depths_dp = torch.stack([pix_goal_depth, depth_dp]).unsqueeze(0) with torch.no_grad(): dp_actions = self.model.generate_traj(traj_latents) @@ -598,7 +597,7 @@ def run_single_eval(self): self.infer_success_cnt += 1 - metrics = env.get_metrics() + # metrics = env.get_metrics() if self.save_video: images_to_video(vis_frames, self.output_path, f"res_{self.infer_success_cnt}", fps=6, quality=9) self.infer_success = True diff --git a/internnav/projects/habitat_extensions/fonts/arial.ttf b/internnav/env/utils/habitat_extensions/fonts/arial.ttf similarity index 100% rename from internnav/projects/habitat_extensions/fonts/arial.ttf rename to internnav/env/utils/habitat_extensions/fonts/arial.ttf diff --git a/internnav/projects/habitat_extensions/maps.py b/internnav/env/utils/habitat_extensions/maps.py similarity index 100% rename from internnav/projects/habitat_extensions/maps.py rename to internnav/env/utils/habitat_extensions/maps.py diff --git a/internnav/projects/habitat_extensions/measures.py b/internnav/env/utils/habitat_extensions/measures.py similarity index 100% rename from internnav/projects/habitat_extensions/measures.py rename to internnav/env/utils/habitat_extensions/measures.py diff --git a/internnav/projects/internutopia_vln_extension/__init__.py b/internnav/env/utils/internutopia_extension/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/__init__.py rename to internnav/env/utils/internutopia_extension/__init__.py diff --git a/internnav/env/utils/internutopia_extension/configs/__init__.py b/internnav/env/utils/internutopia_extension/configs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/internnav/env/utils/internutopia_extension/configs/controllers/__init__.py b/internnav/env/utils/internutopia_extension/configs/controllers/__init__.py new file mode 100644 index 00000000..ec05c189 --- /dev/null +++ b/internnav/env/utils/internutopia_extension/configs/controllers/__init__.py @@ -0,0 +1,5 @@ +from .discrete_controller import DiscreteControllerCfg +from .flash_controller import VlnMoveByFlashControllerCfg +from .stand_still import StandStillControllerCfg + +__all__ = ['StandStillControllerCfg', 'VlnMoveByFlashControllerCfg', 'DiscreteControllerCfg'] diff --git a/internnav/projects/internutopia_vln_extension/configs/controllers/discrete_controller.py b/internnav/env/utils/internutopia_extension/configs/controllers/discrete_controller.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/controllers/discrete_controller.py rename to internnav/env/utils/internutopia_extension/configs/controllers/discrete_controller.py diff --git a/internnav/projects/internutopia_vln_extension/configs/controllers/flash_controller.py b/internnav/env/utils/internutopia_extension/configs/controllers/flash_controller.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/controllers/flash_controller.py rename to internnav/env/utils/internutopia_extension/configs/controllers/flash_controller.py diff --git a/internnav/projects/internutopia_vln_extension/configs/controllers/stand_still.py b/internnav/env/utils/internutopia_extension/configs/controllers/stand_still.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/controllers/stand_still.py rename to internnav/env/utils/internutopia_extension/configs/controllers/stand_still.py diff --git a/internnav/projects/internutopia_vln_extension/configs/metrics/__init__.py b/internnav/env/utils/internutopia_extension/configs/metrics/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/metrics/__init__.py rename to internnav/env/utils/internutopia_extension/configs/metrics/__init__.py diff --git a/internnav/projects/internutopia_vln_extension/configs/metrics/vln_pe_metrics.py b/internnav/env/utils/internutopia_extension/configs/metrics/vln_pe_metrics.py similarity index 99% rename from internnav/projects/internutopia_vln_extension/configs/metrics/vln_pe_metrics.py rename to internnav/env/utils/internutopia_extension/configs/metrics/vln_pe_metrics.py index 7bdcbb19..ab1372ab 100644 --- a/internnav/projects/internutopia_vln_extension/configs/metrics/vln_pe_metrics.py +++ b/internnav/env/utils/internutopia_extension/configs/metrics/vln_pe_metrics.py @@ -1,5 +1,6 @@ from internutopia.core.config.task import MetricCfg + class VLNPEMetricCfg(MetricCfg): type: str = 'VLNPEMetric' shortest_to_goal_distance: float diff --git a/internnav/projects/internutopia_vln_extension/configs/sensors/vln_camera.py b/internnav/env/utils/internutopia_extension/configs/sensors/vln_camera.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/configs/sensors/vln_camera.py rename to internnav/env/utils/internutopia_extension/configs/sensors/vln_camera.py diff --git a/internnav/projects/internutopia_vln_extension/configs/tasks/vln_eval_task.py b/internnav/env/utils/internutopia_extension/configs/tasks/vln_eval_task.py similarity index 90% rename from internnav/projects/internutopia_vln_extension/configs/tasks/vln_eval_task.py rename to internnav/env/utils/internutopia_extension/configs/tasks/vln_eval_task.py index b1afdf04..59815073 100644 --- a/internnav/projects/internutopia_vln_extension/configs/tasks/vln_eval_task.py +++ b/internnav/env/utils/internutopia_extension/configs/tasks/vln_eval_task.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Dict +from typing import Dict, List, Optional from internutopia.core.config.metric import MetricCfg from internutopia.core.config.task import TaskCfg @@ -15,4 +15,3 @@ class VLNEvalTaskCfg(TaskCfg): data: Dict robot_flash: Optional[bool] = False one_step_stand_still: Optional[bool] = False - diff --git a/internnav/projects/internutopia_vln_extension/controllers/__init__.py b/internnav/env/utils/internutopia_extension/controllers/__init__.py similarity index 99% rename from internnav/projects/internutopia_vln_extension/controllers/__init__.py rename to internnav/env/utils/internutopia_extension/controllers/__init__.py index 975904f4..10eaa68b 100644 --- a/internnav/projects/internutopia_vln_extension/controllers/__init__.py +++ b/internnav/env/utils/internutopia_extension/controllers/__init__.py @@ -1,6 +1,7 @@ # from .aliengo_vln_move_by_speed_controller import AliengoVlnMoveBySpeedController from .discrete_controller import DiscreteController -from .h1_vln_move_by_speed_controller import VlnMoveBySpeedController from .h1_vln_move_by_flash_controller import VlnMoveByFlashController +from .h1_vln_move_by_speed_controller import VlnMoveBySpeedController from .stand_still import StandStillController + # from .h1_vln_dp_conrtroller import VlnDpMoveBySpeedController diff --git a/internnav/projects/internutopia_vln_extension/controllers/discrete_controller.py b/internnav/env/utils/internutopia_extension/controllers/discrete_controller.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/controllers/discrete_controller.py rename to internnav/env/utils/internutopia_extension/controllers/discrete_controller.py index 0721d72b..b588703b 100644 --- a/internnav/projects/internutopia_vln_extension/controllers/discrete_controller.py +++ b/internnav/env/utils/internutopia_extension/controllers/discrete_controller.py @@ -1,10 +1,10 @@ from typing import Any, Dict, List import numpy as np +from internutopia.core.robot.articulation import ArticulationAction from internutopia.core.robot.controller import BaseController from internutopia.core.robot.robot import BaseRobot from internutopia.core.scene.scene import IScene -from internutopia.core.robot.articulation import ArticulationAction from ..configs.controllers.discrete_controller import DiscreteControllerCfg diff --git a/internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_flash_controller.py b/internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_flash_controller.py similarity index 88% rename from internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_flash_controller.py rename to internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_flash_controller.py index 097c78eb..86e57c00 100644 --- a/internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_flash_controller.py +++ b/internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_flash_controller.py @@ -1,19 +1,21 @@ +import math from typing import Any, Dict, List import numpy as np -import math +from internutopia.core.robot.articulation import ArticulationAction from internutopia.core.robot.controller import BaseController from internutopia.core.robot.robot import BaseRobot from internutopia.core.scene.scene import IScene -from internutopia.core.robot.articulation import ArticulationAction from ..configs.controllers.flash_controller import VlnMoveByFlashControllerCfg + @BaseController.register('VlnMoveByFlashController') class VlnMoveByFlashController(BaseController): # codespell:ignore """Discrete Controller, direct set robot world position to achieve teleport-type locomotion. - a genral controller adaptable to different type of robots. - """ + a general controller adaptable to different type of robots. + """ + def __init__(self, config: VlnMoveByFlashControllerCfg, robot: BaseRobot, scene: IScene) -> None: self._user_config = None self.current_steps = 0 @@ -37,8 +39,8 @@ def __init__(self, config: VlnMoveByFlashControllerCfg, robot: BaseRobot, scene: def get_new_position_and_rotation(self, robot_position, robot_rotation, action): """ Calculate robot new state by previous state and action. The move should be based on the controller - settings. - Caution: the rotation need to reset pitch and roll to prevent robot falling. This may due to no + settings. + Caution: the rotation need to reset pitch and roll to prevent robot falling. This may due to no adjustment during the whole path and some rotation accumulated Args: @@ -55,31 +57,35 @@ def get_new_position_and_rotation(self, robot_position, robot_rotation, action): both in world frame. """ from omni.isaac.core.utils.rotations import ( - quat_to_euler_angles, euler_angles_to_quat + euler_angles_to_quat, + quat_to_euler_angles, ) + _, _, yaw = quat_to_euler_angles(robot_rotation) - if action == 1: # forward + if action == 1: # forward dx = self.forward_distance * math.cos(yaw) dy = self.forward_distance * math.sin(yaw) - new_robot_position = robot_position + [dx,dy,0] + new_robot_position = robot_position + [dx, dy, 0] new_robot_rotation = robot_rotation - elif action == 2: #left + elif action == 2: # left new_robot_position = robot_position new_yaw = yaw + math.radians(self.rotation_angle) - new_robot_rotation = euler_angles_to_quat(np.array([0.0,0.0,new_yaw])) # using 0 to prevent the robot from falling - elif action == 3: #right + new_robot_rotation = euler_angles_to_quat( + np.array([0.0, 0.0, new_yaw]) + ) # using 0 to prevent the robot from falling + elif action == 3: # right new_robot_position = robot_position new_yaw = yaw - math.radians(self.rotation_angle) - new_robot_rotation = euler_angles_to_quat(np.array([0.0,0.0,new_yaw])) + new_robot_rotation = euler_angles_to_quat(np.array([0.0, 0.0, new_yaw])) else: new_robot_position = robot_position new_robot_rotation = robot_rotation - return new_robot_position,new_robot_rotation + return new_robot_position, new_robot_rotation def reset_robot_state(self, position, orientation): """Set robot state to the new position and orientation. - + Args: position, orientation: np.array, issac_robot.get_world_pose() """ @@ -93,25 +99,24 @@ def reset_robot_state(self, position, orientation): def forward(self, action: int) -> ArticulationAction: """Teleport robot by position, orientation and action - + Args: - action: int + action: int 0. discrete action (int): 0: stop, 1: forward, 2: left, 3: right Returns: ArticulationAction: joint signals to apply (nothing). """ # get robot new position - # positions, orientations = self.robot.isaac_robot.get_world_pose() - positions, orientations = self.robot.articulation.get_world_pose() + # positions, orientations = self.robot.isaac_robot.get_world_pose() + positions, orientations = self.robot.articulation.get_world_pose() new_robot_position, new_robot_rotation = self.get_new_position_and_rotation(positions, orientations, action) # set robot to new state - self.reset_robot_state(new_robot_position,new_robot_rotation) + self.reset_robot_state(new_robot_position, new_robot_rotation) # Dummy action to do nothing return ArticulationAction() - def action_to_control(self, action: List | np.ndarray) -> ArticulationAction: """Convert input action (in 1d array format) to joint signals to apply. diff --git a/internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_speed_controller.py b/internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_speed_controller.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_speed_controller.py rename to internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_speed_controller.py index 35739d13..072a1f27 100644 --- a/internnav/projects/internutopia_vln_extension/controllers/h1_vln_move_by_speed_controller.py +++ b/internnav/env/utils/internutopia_extension/controllers/h1_vln_move_by_speed_controller.py @@ -5,14 +5,14 @@ import numpy as np import torch import torch.nn.functional as F -from internutopia.core.robot.controller import BaseController -from internutopia.core.robot.robot import BaseRobot -from internutopia_extension.configs.controllers import H1MoveBySpeedControllerCfg +from internutopia.core.robot.articulation import ArticulationAction from internutopia.core.robot.articulation_subset import ArticulationSubset +from internutopia.core.robot.controller import BaseController from internutopia.core.robot.rigid_body import IRigidBody +from internutopia.core.robot.robot import BaseRobot from internutopia.core.scene.scene import IScene -from internutopia.core.robot.articulation import ArticulationAction from internutopia.core.sensor.sensor import BaseSensor +from internutopia_extension.configs.controllers import H1MoveBySpeedControllerCfg from .math import quat_apply_yaw diff --git a/internnav/projects/internutopia_vln_extension/controllers/math.py b/internnav/env/utils/internutopia_extension/controllers/math.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/controllers/math.py rename to internnav/env/utils/internutopia_extension/controllers/math.py diff --git a/internnav/projects/internutopia_vln_extension/controllers/stand_still.py b/internnav/env/utils/internutopia_extension/controllers/stand_still.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/controllers/stand_still.py rename to internnav/env/utils/internutopia_extension/controllers/stand_still.py index 343e0a97..d914f2cb 100644 --- a/internnav/projects/internutopia_vln_extension/controllers/stand_still.py +++ b/internnav/env/utils/internutopia_extension/controllers/stand_still.py @@ -1,10 +1,10 @@ from typing import List import numpy as np +from internutopia.core.robot.articulation import ArticulationAction from internutopia.core.robot.controller import BaseController from internutopia.core.robot.robot import BaseRobot from internutopia.core.scene.scene import IScene -from internutopia.core.robot.articulation import ArticulationAction from ..configs.controllers.stand_still import StandStillControllerCfg diff --git a/internnav/projects/internutopia_vln_extension/metrics/__init__.py b/internnav/env/utils/internutopia_extension/metrics/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/metrics/__init__.py rename to internnav/env/utils/internutopia_extension/metrics/__init__.py diff --git a/internnav/projects/internutopia_vln_extension/metrics/vln_pe_metrics.py b/internnav/env/utils/internutopia_extension/metrics/vln_pe_metrics.py similarity index 86% rename from internnav/projects/internutopia_vln_extension/metrics/vln_pe_metrics.py rename to internnav/env/utils/internutopia_extension/metrics/vln_pe_metrics.py index 30e39027..a8f3ced9 100644 --- a/internnav/projects/internutopia_vln_extension/metrics/vln_pe_metrics.py +++ b/internnav/env/utils/internutopia_extension/metrics/vln_pe_metrics.py @@ -1,11 +1,8 @@ import numpy as np -from internnav.projects.internutopia_vln_extension.configs.metrics.vln_pe_metrics import VLNPEMetricCfg -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import VLNEvalTaskCfg from internutopia.core.task.metric import BaseMetric -from internnav.projects.internutopia_vln_extension.configs.metrics.vln_pe_metrics import ( - VLNPEMetricCfg, -) +from ..configs.metrics.vln_pe_metrics import VLNPEMetricCfg +from ..configs.tasks.vln_eval_task import VLNEvalTaskCfg XY_DISTANCE_CLOSE_THRESHHOLD = 1.0 @@ -65,17 +62,21 @@ def update(self, task_obs: dict): # update related to step self.fail_reason = obs['fail_reason'] if 'fail_reason' in obs else '' # update step count - self.sim_step += 1 + self.sim_step += 1 # calculate current path_length if self.prev_position is not None: # initialize, warm_up will change position - self.current_path_length += np.linalg.norm(current_position[:2] - self.prev_position[:2]) # total path length, only xy + self.current_path_length += np.linalg.norm( + current_position[:2] - self.prev_position[:2] + ) # total path length, only xy else: - self.pred_traj_list[0].append(current_position) + self.pred_traj_list[0].append(current_position) self.prev_position = current_position # current if obs['finish_action']: # add trajectory array - self.pred_traj_list[0].append(current_position) # trajectory array, consider calculation to complete trajectory + self.pred_traj_list[0].append( + current_position + ) # trajectory array, consider calculation to complete trajectory # calculate NE, every round needs self.ne = np.linalg.norm(current_position[:2] - self.goal_position[:2]) diff --git a/internnav/projects/internutopia_vln_extension/robots/__init__.py b/internnav/env/utils/internutopia_extension/robots/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/robots/__init__.py rename to internnav/env/utils/internutopia_extension/robots/__init__.py diff --git a/internnav/projects/internutopia_vln_extension/robots/h1.py b/internnav/env/utils/internutopia_extension/robots/h1.py similarity index 99% rename from internnav/projects/internutopia_vln_extension/robots/h1.py rename to internnav/env/utils/internutopia_extension/robots/h1.py index 283d3d88..fe5bbd81 100644 --- a/internnav/projects/internutopia_vln_extension/robots/h1.py +++ b/internnav/env/utils/internutopia_extension/robots/h1.py @@ -1,8 +1,8 @@ import numpy as np from internutopia.core.config.robot import RobotCfg from internutopia.core.robot.robot import BaseRobot -from internutopia_extension.robots.h1 import H1Robot from internutopia.core.scene.scene import IScene +from internutopia_extension.robots.h1 import H1Robot @BaseRobot.register('VLNH1Robot') @@ -18,6 +18,7 @@ def post_reset(self): def apply_action(self, action: dict): import omni.isaac.core.utils.numpy.rotations as rot_utils + self.current_action = action ret = super().apply_action(action) if 'topdown_camera_500' in self.sensors: diff --git a/internnav/projects/internutopia_vln_extension/sensors/__init__.py b/internnav/env/utils/internutopia_extension/sensors/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/sensors/__init__.py rename to internnav/env/utils/internutopia_extension/sensors/__init__.py diff --git a/internnav/projects/internutopia_vln_extension/sensors/vln_camera.py b/internnav/env/utils/internutopia_extension/sensors/vln_camera.py similarity index 83% rename from internnav/projects/internutopia_vln_extension/sensors/vln_camera.py rename to internnav/env/utils/internutopia_extension/sensors/vln_camera.py index 1296ea7d..238b9a8a 100644 --- a/internnav/projects/internutopia_vln_extension/sensors/vln_camera.py +++ b/internnav/env/utils/internutopia_extension/sensors/vln_camera.py @@ -1,11 +1,13 @@ from typing import Dict +import numpy as np from internutopia.core.robot.robot import BaseRobot from internutopia.core.scene.scene import IScene from internutopia.core.sensor.camera import ICamera from internutopia.core.sensor.sensor import BaseSensor -import numpy as np + from internnav.utils.common_log_util import common_logger as log + from ..configs.sensors.vln_camera import VLNCameraCfg @@ -24,11 +26,15 @@ def get_data(self) -> Dict: output_data = {} output_data['rgba'] = self._camera.get_rgba() if output_data['rgba'].shape[0] != self.config.resolution[1]: - output_data['rgba'] = np.random.randint(0, 256, (self.config.resolution[1], self.config.resolution[0], 4), dtype=np.uint8) + output_data['rgba'] = np.random.randint( + 0, 256, (self.config.resolution[1], self.config.resolution[0], 4), dtype=np.uint8 + ) log.error("rgba shape wrong, use random one!!!") output_data['depth'] = self._camera.get_distance_to_image_plane() if output_data['depth'].shape[0] != self.config.resolution[1]: - output_data['depth'] = np.random.uniform(0, 256, size=(self.config.resolution[1], self.config.resolution[0])).astype(np.float32) + output_data['depth'] = np.random.uniform( + 0, 256, size=(self.config.resolution[1], self.config.resolution[0]) + ).astype(np.float32) log.error("depth shape wrong, use random one!!!") return self._make_ordered(output_data) @@ -57,4 +63,4 @@ def set_world_pose(self, *args, **kwargs): self._camera.set_world_pose(*args, **kwargs) def get_world_pose(self): - return self._camera.get_world_pose() \ No newline at end of file + return self._camera.get_world_pose() diff --git a/internnav/projects/internutopia_vln_extension/tasks/__init__.py b/internnav/env/utils/internutopia_extension/tasks/__init__.py similarity index 100% rename from internnav/projects/internutopia_vln_extension/tasks/__init__.py rename to internnav/env/utils/internutopia_extension/tasks/__init__.py diff --git a/internnav/projects/internutopia_vln_extension/tasks/utils.py b/internnav/env/utils/internutopia_extension/tasks/utils.py similarity index 93% rename from internnav/projects/internutopia_vln_extension/tasks/utils.py rename to internnav/env/utils/internutopia_extension/tasks/utils.py index 89a20d22..0ca1b88f 100644 --- a/internnav/projects/internutopia_vln_extension/tasks/utils.py +++ b/internnav/env/utils/internutopia_extension/tasks/utils.py @@ -1,7 +1,8 @@ from internnav.evaluator.utils.common import check_robot_fall from internnav.evaluator.utils.stuck_checker import StuckChecker from internnav.utils.common_log_util import common_logger as log -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import VLNEvalTaskCfg + +from ..configs.tasks.vln_eval_task import VLNEvalTaskCfg def get_action_state(obs, action_name): @@ -11,7 +12,7 @@ def get_action_state(obs, action_name): class DoneChecker: - def __init__(self, offset, robot, config:VLNEvalTaskCfg): + def __init__(self, offset, robot, config: VLNEvalTaskCfg): self._offset = offset self.stuck_checker = StuckChecker(offset, robot.articulation) self.total_max_step = config.max_step diff --git a/internnav/projects/internutopia_vln_extension/tasks/vln_eval_task.py b/internnav/env/utils/internutopia_extension/tasks/vln_eval_task.py similarity index 92% rename from internnav/projects/internutopia_vln_extension/tasks/vln_eval_task.py rename to internnav/env/utils/internutopia_extension/tasks/vln_eval_task.py index 729817f9..0715060d 100644 --- a/internnav/projects/internutopia_vln_extension/tasks/vln_eval_task.py +++ b/internnav/env/utils/internutopia_extension/tasks/vln_eval_task.py @@ -1,8 +1,8 @@ -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import VLNEvalTaskCfg from internutopia.core.task import BaseTask from internnav.evaluator.utils.common import set_seed +from ..configs.tasks.vln_eval_task import VLNEvalTaskCfg from .utils import DoneChecker @@ -100,12 +100,14 @@ def get_rgb_depth(self): if 'pano_camera_0' in self.robot.sensors: camera = self.robot.sensors['pano_camera_0'] import omni.replicator.core as rep + if self.env_id == 0: rep.orchestrator.step(rt_subframes=2, delta_time=0.0, pause_timeline=False) cur_obs = camera.get_data() rgb_info = cur_obs['rgba'][..., :3] import numpy as np + from internnav.evaluator.utils.common import norm_depth depth_info = norm_depth(cur_obs['depth']) @@ -146,17 +148,24 @@ def get_observations(self): # add step self.step_count = self.step_count + 1 - assert action_name in ['stand_still', 'move_by_discrete', 'vln_move_by_speed', 'vln_dp_move_by_speed', 'move_by_flash', 'stop'], f"Got invalid action name {action_name}!!!" + assert action_name in [ + 'stand_still', + 'move_by_discrete', + 'vln_move_by_speed', + 'vln_dp_move_by_speed', + 'move_by_flash', + 'stop', + ], f"Got invalid action name {action_name}!!!" if action_name == 'stand_still': if self.warm_up_step > 1: self.step_count -= 1 self.warm_up_step -= 1 self.robot.current_action = None return {self.robot_name: obs} - else: + else: obs.update(self.get_rgb_depth()) if (not self.config.robot_flash) and (not self.config.one_step_stand_still): - self.warm_up_step = 50 # without this, possible issues: delay by get_rgb; break warm up + self.warm_up_step = 50 # without this, possible issues: delay by get_rgb; break warm up elif action_name == 'move_by_discrete': move_by_discrete_obs = self.robot.controllers['move_by_discrete'].get_obs() @@ -164,26 +173,26 @@ def get_observations(self): self.robot.current_action = None return {self.robot_name: obs} obs.update(self.get_rgb_depth()) - + elif action_name == 'vln_move_by_speed': move_by_speed_obs = self.robot.controllers['vln_move_by_speed'].get_obs() if not move_by_speed_obs['finished']: - return {self.robot_name:obs} # not finish + return {self.robot_name: obs} # not finish obs.update(self.get_rgb_depth()) elif action_name == 'vln_dp_move_by_speed': move_by_speed_obs = self.robot.controllers['vln_dp_move_by_speed'].get_obs() if not move_by_speed_obs['finished']: - return {self.robot_name:obs} # not finish - obs.update(self.get_rgb_depth()) - + return {self.robot_name: obs} # not finish + obs.update(self.get_rgb_depth()) + elif action_name == 'move_by_flash': obs.update(self.get_rgb_depth()) - + obs['finish_action'] = True self.robot.current_action = None # update when stop - dones, reason = self.done_checker.execute(obs, action_name, self.step_count) + dones, reason = self.done_checker.execute(obs, action_name, self.step_count) self._done = dones[0] if self._done: self.update_metrics({self.robot_name: obs}) diff --git a/internnav/env/vln_multi_env.py b/internnav/env/vln_multi_env.py deleted file mode 100644 index 2c925ce1..00000000 --- a/internnav/env/vln_multi_env.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import Any, Dict, List - -from internutopia.core.config import Config, SimConfig -from internutopia.core.config.distribution import RayDistributionCfg -from internutopia.core.config.metric import MetricCfg -from internutopia.core.vec_env import Env - -from internnav.configs.evaluator import EnvCfg, TaskCfg -from internnav.env import base -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import ( - VLNEvalTaskCfg, -) -from internnav.projects.internutopia_vln_extension import import_extensions - - -@base.Env.register('vln_multi') -class VlnMultiEnv(base.Env): - def __init__(self, env_config: EnvCfg, task_config: TaskCfg): - super().__init__(env_config, task_config) - env_settings = self.env_config.env_settings - task_settings = self.task_config.task_settings - config = Config( - simulator=SimConfig(**env_settings), - env_num=task_settings['env_num'], - env_offset_size=task_settings['offset_size'], - task_configs=task_settings['episodes'], - ) - if 'distribution_config' in env_settings: - distribution_config=RayDistributionCfg(**env_settings['distribution_config']) - config = config.distribute(distribution_config) - import_extensions() - - self.env = Env(config) - - def reset(self, reset_index=None): - # print('Vln env reset') - return self.env.reset(reset_index) - - def step(self, action: List[Any]): - # print('CalvinEnv step') - return self.env.step(action) - - def is_running(self): - return True - - def close(self): - print('Vln Env close') - self.env.close() - - def render(self): - self.env.render() - - def get_observation(self) -> Dict[str, Any]: - return self.env.get_observations() - - def get_info(self) -> Dict[str, Any]: - pass diff --git a/internnav/evaluator/__init__.py b/internnav/evaluator/__init__.py index 960b8724..88393e50 100644 --- a/internnav/evaluator/__init__.py +++ b/internnav/evaluator/__init__.py @@ -1,9 +1,4 @@ from internnav.evaluator.base import Evaluator -from internnav.evaluator.vln_pe_evaluator import VlnPeEvaluator from internnav.evaluator.vln_multi_evaluator import VlnMultiEvaluator -__all__ = [ - 'Evaluator', - 'VlnPeEvaluator', - 'VlnMultiEvaluator' -] +__all__ = ['Evaluator', 'VlnMultiEvaluator'] diff --git a/internnav/evaluator/base.py b/internnav/evaluator/base.py index f397ae04..b51b5cdc 100644 --- a/internnav/evaluator/base.py +++ b/internnav/evaluator/base.py @@ -25,6 +25,8 @@ def register(cls, evaluator_type: str): """ def decorator(evaluator_class): + if evaluator_type in cls.evaluators: + raise ValueError(f"Evaluator {evaluator_type} already registered.") cls.evaluators[evaluator_type] = evaluator_class return decorator @@ -34,4 +36,4 @@ def init(cls, config: EvalCfg): """ Init a evaluator instance from a config. """ - return cls.evaluators[config.env.env_type](config) + return cls.evaluators[config.eval_type](config) diff --git a/internnav/evaluator/default_evaluator.py b/internnav/evaluator/default_evaluator.py new file mode 100644 index 00000000..f93b908b --- /dev/null +++ b/internnav/evaluator/default_evaluator.py @@ -0,0 +1,120 @@ +import argparse +import importlib.util +import sys + +from real_world_env import RealWorldEnv +from stream import run, set_instruction + +from internnav.utils.comm_utils.client import AgentClient + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + default='challenge/onsite_competition/configs/h1_internvla_n1_cfg.py', + help='eval config file path, e.g. scripts/eval/configs/h1_cma_cfg.py', + ) + parser.add_argument( + "--instruction", + type=str, + default='Go straight and pass the sofa and turn right into the hallway. Keep walking down, pass the kitchen and the bathroom, then enter the study room at the far end on the right with a desk, stop next to the white shelf.', + help='current instruction to follow', + ) + parser.add_argument("--uninteractive_mode", action='store_true', help="whether to confirm each step") + return parser.parse_args() + + +def load_eval_cfg(config_path, attr_name='eval_cfg'): + spec = importlib.util.spec_from_file_location("eval_config_module", config_path) + config_module = importlib.util.module_from_spec(spec) + sys.modules["eval_config_module"] = config_module + spec.loader.exec_module(config_module) + return getattr(config_module, attr_name) + + +def confirm(msg: str) -> bool: + """ + Ask user to confirm. Return True if user types 'y' (case-insensitive), + False for anything else (including empty input). + """ + try: + answer = input(f"{msg} [y/N]: ").strip().lower() + except (EOFError, KeyboardInterrupt): + print("\nCancelled.") + return False + return answer in ("", "y") + + +def get_instruction() -> int: + try: + import json + + instruction_lst = json.load(open("challenge/onsite_competition/instructions.json")) + print("Available instructions:") + for i, item in enumerate(instruction_lst): + print(f"{i}: {item['instruction_title']}") + answer = input("input instruction id: ").strip().lower() + answer = int(answer) + except (EOFError, KeyboardInterrupt): + print("\nCancelled.") + sys.exit() + return instruction_lst[answer]['instruction'][0] + + +def action_to_word(action: int) -> str: + action = max(0, min(3, action)) + wl = ["stand still", "move forward", "turn left", "turn right"] + return wl[action] + + +def main(): + args = parse_args() + print("--- Loading config from:", args.config, "---") + cfg = load_eval_cfg(args.config, attr_name='eval_cfg') + agent_cfg = cfg.agent + + # initialize user's agent + agent = AgentClient(agent_cfg) + + # initialize real world env + env = RealWorldEnv(fps=30, duration=0.1, distance=0.3, angle=15, move_speed=0.5, turn_speed=0.5) + env.reverse() # reverse move direction if using a rear camera + env.step(0) + obs = env.get_observation() + + # start stream + print("--- start running steam app ---") + run(env=env) + + while True: + instruction = get_instruction() + print("\nNew instruction:", instruction) + set_instruction(instruction) + + while True: + # print("get observation...") + # obs contains {rgb, depth, instruction} + obs = env.get_observation() + # print(obs) + obs["instruction"] = instruction + + print("agent step...") + # action is a integer in [0, 3], agent return [{'action': [int], 'ideal_flag': bool}] (same to internvla_n1 agent) + action = agent.step([obs])[0]['action'][0] + print("agent step success, action:", action) + + if args.uninteractive_mode or confirm(f"Execute this action [{action_to_word(action)}]?"): + print("env step...") + env.step(action) + print("env step success") + else: + print("Stop requested. Exiting loop.") + print("agent reset...") + agent.reset() + break + + +if __name__ == "__main__": + main() diff --git a/internnav/projects/habitat_extensions/evaluator.py b/internnav/evaluator/habitat_vln_evaluator.py similarity index 100% rename from internnav/projects/habitat_extensions/evaluator.py rename to internnav/evaluator/habitat_vln_evaluator.py diff --git a/internnav/evaluator/utils/eval.py b/internnav/evaluator/utils/eval.py index c370461b..75cc2198 100644 --- a/internnav/evaluator/utils/eval.py +++ b/internnav/evaluator/utils/eval.py @@ -3,14 +3,14 @@ from internutopia_extension.configs.sensors import RepCameraCfg from internnav.configs.evaluator import EvalCfg -from internnav.evaluator.utils.common import load_kujiale_scene_usd, load_scene_usd -from internnav.projects.dataloader.resumable import ResumablePathKeyDataloader -from internnav.projects.internutopia_vln_extension.configs.metrics.vln_pe_metrics import ( +from internnav.env.utils.internutopia_extension.configs.metrics.vln_pe_metrics import ( VLNPEMetricCfg, ) -from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import ( +from internnav.env.utils.internutopia_extension.configs.tasks.vln_eval_task import ( VLNEvalTaskCfg, ) +from internnav.evaluator.utils.common import load_kujiale_scene_usd, load_scene_usd +from internnav.projects.dataloader.resumable import ResumablePathKeyDataloader def generate_episode(dataloader: ResumablePathKeyDataloader, config: EvalCfg): diff --git a/internnav/configs/evaluator/default_config.py b/internnav/evaluator/utils/vln_default_config.py similarity index 83% rename from internnav/configs/evaluator/default_config.py rename to internnav/evaluator/utils/vln_default_config.py index 1f3bc86b..71bb6967 100644 --- a/internnav/configs/evaluator/default_config.py +++ b/internnav/evaluator/utils/vln_default_config.py @@ -1,17 +1,12 @@ -import sys import os +import sys + import numpy as np -from pydantic import BaseModel from internutopia.core.config.distribution import RayDistributionCfg -from internnav.configs.model import cma_cfg, rdp_cfg, seq2seq_cfg, internvla_n1_cfg +from internutopia_extension.configs.controllers import H1MoveBySpeedControllerCfg +from internutopia_extension.configs.sensors import RepCameraCfg +from pydantic import BaseModel -from internnav.projects.internutopia_vln_extension.configs.controllers.stand_still import ( - StandStillControllerCfg, -) -from internnav.projects.internutopia_vln_extension.configs.robots.h1 import ( - vln_move_by_speed_cfg as h1_vln_move_by_speed_cfg, -) -from internnav.projects.internutopia_vln_extension.configs.sensors.vln_camera import VLNCameraCfg from internnav.configs.evaluator import ( ControllerCfg, EnvCfg, @@ -23,26 +18,51 @@ SensorCfg, TaskCfg, ) -from internnav.configs.model import cma_cfg, rdp_cfg, seq2seq_cfg -from internnav.projects.internutopia_vln_extension.configs.controllers.discrete_controller import ( +from internnav.configs.model import cma_cfg, internvla_n1_cfg, rdp_cfg, seq2seq_cfg +from internnav.env.utils.internutopia_extension.configs.controllers import ( DiscreteControllerCfg, -) -from internnav.projects.internutopia_vln_extension.configs.controllers.stand_still import ( StandStillControllerCfg, + VlnMoveByFlashControllerCfg, ) -from internnav.projects.internutopia_vln_extension.configs.metrics.vln_pe_metrics import ( +from internnav.env.utils.internutopia_extension.configs.metrics.vln_pe_metrics import ( VLNPEMetricCfg, ) -from internnav.projects.internutopia_vln_extension.configs.robots.h1 import ( - vln_move_by_speed_cfg as h1_vln_move_by_speed_cfg, vln_move_by_flash_cfg +from internnav.env.utils.internutopia_extension.configs.sensors.vln_camera import ( + VLNCameraCfg, ) -from internnav.projects.internutopia_vln_extension.configs.sensors.vln_camera import VLNCameraCfg -from internutopia_extension.configs.sensors import RepCameraCfg + +h1_vln_move_by_speed_cfg = H1MoveBySpeedControllerCfg( + name='vln_move_by_speed', + type='VlnMoveBySpeedController', + policy_weights_path='data/Embodiments/vln-pe/h1/policy/move_by_speed/h1_loco_jit_policy.pt', + joint_names=[ + 'left_hip_yaw_joint', + 'right_hip_yaw_joint', + 'torso_joint', + 'left_hip_roll_joint', + 'right_hip_roll_joint', + 'left_shoulder_pitch_joint', + 'right_shoulder_pitch_joint', + 'left_hip_pitch_joint', + 'right_hip_pitch_joint', + 'left_shoulder_roll_joint', + 'right_shoulder_roll_joint', + 'left_knee_joint', + 'right_knee_joint', + 'left_shoulder_yaw_joint', + 'right_shoulder_yaw_joint', + 'left_ankle_joint', + 'right_ankle_joint', + 'left_elbow_joint', + 'right_elbow_joint', + ], +) +vln_move_by_flash_cfg = VlnMoveByFlashControllerCfg(name='move_by_flash') cfg = EvalCfg( agent=None, env=EnvCfg( - env_type='vln_multi', + env_type='internutopia', env_settings={ 'physics_dt': 1 / 200, 'rendering_dt': 1 / 200, @@ -78,10 +98,8 @@ 'retry_list': [], }, ), - eval_settings={ - 'save_to_json': True, - 'vis_output': True - }, + eval_type='vln_multi', + eval_settings={'save_to_json': True, 'vis_output': True}, ) @@ -114,7 +132,7 @@ def check_nested_none(obj, path=''): if none_fields: error_msg = 'Evaluation config validation failed!\nNone values found in:\n' for field in none_fields: - error_msg += f' - {field}\n' + error_msg += f' - {field}\n' raise ValueError(error_msg) print('Evaluation config validation passed!') @@ -154,7 +172,7 @@ def update(base_dict, update_dict): try: validate_eval_config(base_dict) except ValueError as e: - print(f'❌ Configuration Error:\n{e}') + print(f'❌ Configuration Error: \n{e}') sys.exit(1) return base_dict @@ -165,7 +183,9 @@ def get_config(evaluator_cfg: EvalCfg): move_by_speed_cfg = h1_vln_move_by_speed_cfg.model_dump() robot_type = 'VLNH1Robot' robot_usd_path = evaluator_cfg.task.robot_usd_path - move_by_speed_cfg["policy_weights_path"] = os.path.dirname(robot_usd_path) + '/policy/move_by_speed/h1_loco_jit_policy.pt' + move_by_speed_cfg["policy_weights_path"] = ( + os.path.dirname(robot_usd_path) + '/policy/move_by_speed/h1_loco_jit_policy.pt' + ) camera_resolution = evaluator_cfg.task.camera_resolution robot_offset = np.array([0.0, 0.0, 1.05]) camera_prim_path = evaluator_cfg.task.camera_prim_path @@ -219,10 +239,7 @@ def get_config(evaluator_cfg: EvalCfg): # add the flash controller in, by flash flag. if evaluator_cfg.task.robot_flash: - robot.controllers.append( - ControllerCfg( - controller_settings=vln_move_by_flash_cfg.model_dump()) - ) + robot.controllers.append(ControllerCfg(controller_settings=vln_move_by_flash_cfg.model_dump())) if evaluator_cfg.task.robot_flash or evaluator_cfg.eval_settings.get('vis_output', True): topdown_camera = SensorCfg( @@ -257,7 +274,7 @@ def get_config(evaluator_cfg: EvalCfg): {'robot_offset': robot_offset, 'task_name': evaluator_cfg.task.task_name} ) - #switch scene + # switch scene if evaluator_cfg.task.scene.scene_type == 'mp3d': evaluator_cfg.task.scene = SceneCfg( scene_type='mp3d', diff --git a/internnav/evaluator/vln_multi_evaluator.py b/internnav/evaluator/vln_multi_evaluator.py index 823524ec..a825952b 100644 --- a/internnav/evaluator/vln_multi_evaluator.py +++ b/internnav/evaluator/vln_multi_evaluator.py @@ -2,6 +2,7 @@ from enum import Enum from pathlib import Path from time import time +from typing import Dict, List import numpy as np @@ -25,29 +26,6 @@ class runner_status_code(Enum): STOP = 4 -def transform_action_batch(actions, flash=False): - transformed_actions = [] - for action in actions: - if 'ideal_flag' in action.keys(): - ideal_flag = action['ideal_flag'] - if flash: - assert ideal_flag is True - else: - ideal_flag = False - if not ideal_flag: - transformed_actions.append({'h1': {'vln_dp_move_by_speed': action['action'][0]}}) - continue - a = action['action'] - if a == 0 or a == [0] or a == [[0]]: - transformed_actions.append({'h1': {'stop': []}}) - elif a == -1 or a == [-1] or a == [[-1]]: - transformed_actions.append({'h1': {'stand_still': []}}) - else: - move = f"move_by_{'discrete' if not flash else 'flash'}" - transformed_actions.append({'h1': {move: a}}) # discrete e.g. [3] - return transformed_actions - - @Evaluator.register('vln_multi') class VlnMultiEvaluator(Evaluator): def __init__(self, config: EvalCfg): @@ -61,7 +39,7 @@ def __init__(self, config: EvalCfg): progress_log_multi_util.init(self.task_name, self.dataloader.size) self.total_path_num = self.dataloader.size progress_log_multi_util.progress_logger_multi.info( - f'start eval dataset: {self.task_name}, total_path:{self.dataloader.size}' # noqa: E501 + f'start eval dataset: {self.task_name}, total_path: {self.dataloader.size}' # noqa: E501 ) # generate episode episodes = generate_episode(self.dataloader, config) @@ -130,6 +108,28 @@ def _obs_remove_robot_name(self, obs): ] return obs + def _transform_action_batch(self, actions: List[Dict], flash=False): + transformed_actions = [] + for action in actions: + if 'ideal_flag' in action.keys(): + ideal_flag = action['ideal_flag'] + if flash: + assert ideal_flag is True + else: + ideal_flag = False + if not ideal_flag: + transformed_actions.append({'h1': {'vln_dp_move_by_speed': action['action'][0]}}) + continue + a = action['action'] + if a == 0 or a == [0] or a == [[0]]: + transformed_actions.append({'h1': {'stop': []}}) + elif a == -1 or a == [-1] or a == [[-1]]: + transformed_actions.append({'h1': {'stand_still': []}}) + else: + move = f"move_by_{'discrete' if not flash else 'flash'}" + transformed_actions.append({'h1': {move: a}}) # discrete e.g. [3] + return transformed_actions + def get_action(self, obs, action): # process obs obs = np.array(obs) @@ -141,8 +141,8 @@ def get_action(self, obs, action): obs = self.remove_obs_attr(obs) if not np.logical_and.reduce(self.runner_status == runner_status_code.WARM_UP): action = self.agent.step(obs) - log.info(f'now action:{len(action)} ,{action}, fake_obs_index:{fake_obs_index}') - action = transform_action_batch(action, self.robot_flash) + log.info(f'now action: {len(action)}, {action}, fake_obs_index: {fake_obs_index}') + action = self._transform_action_batch(action, self.robot_flash) # change warm_up action = np.array(action) action[self.runner_status == runner_status_code.WARM_UP] = {'h1': {'stand_still': []}} diff --git a/internnav/evaluator/vln_pe_evaluator.py b/internnav/evaluator/vln_pe_evaluator.py deleted file mode 100644 index 4c4acf64..00000000 --- a/internnav/evaluator/vln_pe_evaluator.py +++ /dev/null @@ -1,302 +0,0 @@ -import sys -from enum import Enum -from pathlib import Path -from time import time - -import numpy as np - -from internnav.configs.evaluator import EvalCfg -from internnav.evaluator.base import Evaluator -from internnav.evaluator.utils.common import set_seed_model -from internnav.evaluator.utils.config import get_lmdb_path -from internnav.evaluator.utils.data_collector import DataCollector -from internnav.evaluator.utils.dataset import ResultLogger, split_data -from internnav.evaluator.utils.eval import generate_episode -from internnav.projects.dataloader.resumable import ResumablePathKeyDataloader -from internnav.utils import common_log_util, progress_log_multi_util -from internnav.utils.common_log_util import common_logger as log -from internnav.utils.visualize_util import VisualizeUtil - - -class runner_status_code(Enum): - NORMAL = 0 - WARM_UP = 1 - NOT_RESET = 3 - TERMINATED = 2 - STOP = 4 - - -def transform_action_batch(origin, flash=False): - transformed_actions = [] - for _, a in enumerate([*map(lambda a: a[0], origin)]): - if a == 0: - transformed_actions.append({'h1': {'stop': []}}) - elif a == -1: - transformed_actions.append({'h1': {'stand_still': []}}) - else: - action_name = f"move_by_{'discrete' if not flash else 'flash'}" - transformed_actions.append({'h1': {action_name: [a]}}) - return transformed_actions - - -@Evaluator.register('vln_pe') -class VlnPeEvaluator(Evaluator): - def __init__(self, config: EvalCfg): - self.task_name = config.task.task_name - if not Path(get_lmdb_path(self.task_name)).exists(): - split_data(config.dataset) - self.result_logger = ResultLogger(config.dataset) - common_log_util.init(self.task_name) - self.dataloader = ResumablePathKeyDataloader(config.dataset.dataset_type, **config.dataset.dataset_settings) - self.dataset_name = Path(config.dataset.dataset_settings['base_data_dir']).name - progress_log_multi_util.init(self.task_name, self.dataloader.size) - self.total_path_num = self.dataloader.size - progress_log_multi_util.progress_logger_multi.info( - f'start eval dataset: {self.task_name}, total_path:{self.dataloader.size}' # noqa: E501 - ) - self.vis_output = config.eval_settings['vis_output'] - self.visualize_util = VisualizeUtil(self.task_name, fps=6) - - # generate episode - episodes = generate_episode(self.dataloader, config) - if len(episodes) == 0: - log.info("No more episodes to evaluate. Episodes are saved in data/sample_episodes/") - sys.exit(0) - config.task.task_settings.update({'episodes': episodes}) - self.env_num = config.task.task_settings['env_num'] - self.proc_num = ( - config.env.env_settings['distribution_config']['proc_num'] - if 'distribution_config' in config.env.env_settings - else 1 - ) - # check env_num and proc_num - # priority: reduce env_num first then reduce proc_num - while self.env_num > 1 and self.proc_num * self.env_num > self.total_path_num: - self.env_num -= 1 - log.info(f'dataset size is too small! Change env_num to {self.env_num}.') - while self.proc_num > 1 and self.proc_num * self.env_num > self.total_path_num: - self.proc_num -= 1 - log.info(f'dataset size is too small! Change proc_num to {self.proc_num}.') - # update - config.task.task_settings['env_num'] = self.env_num - if 'distribution_config' in config.env.env_settings: - config.env.env_settings['distribution_config']['proc_num'] = self.proc_num - - config.agent.model_settings.update({'env_num': self.env_num, 'proc_num': self.proc_num}) - self.robot_name = config.task.robot_name - super().__init__(config) - set_seed_model(0) - self.data_collector = DataCollector(self.dataloader.lmdb_path) - self.robot_flash = config.task.robot_flash - self.save_to_json = config.eval_settings['save_to_json'] - - @property - def ignore_obs_attr(self): - return [ - 'finish_action', - 'current_pose', - 'render', - 'fail_reason', - 'metrics', - ] - - def remove_obs_attr(self, obs): - return [{k: v for k, v in ob.items() if k not in self.ignore_obs_attr} for ob in obs] - - def warm_up(self): - while True: - obs, _, _, _, _ = self.env.step( - action=[{self.robot_name: {'stand_still': []}} for _ in range(self.env_num * self.proc_num)] - ) - if obs[0][self.robot_name]['finish_action']: - print('get_obs') - break - return obs - - def now_path_key(self, info): - return info.data['path_key'] - - def _obs_remove_robot_name(self, obs): - obs = [ - *map( - lambda ob: ob[self.robot_name] if ob is not None else self.fake_obs, - obs, - ) - ] - return obs - - def get_action(self, obs, action): - # process obs - obs = np.array(obs) - fake_obs_index = np.logical_or( - self.runner_status == runner_status_code.WARM_UP, - self.runner_status == runner_status_code.TERMINATED, - ) - obs[fake_obs_index] = self.fake_obs - obs = self.remove_obs_attr(obs) - if not np.logical_and.reduce(self.runner_status == runner_status_code.WARM_UP): - action = self.agent.step(obs) - log.info(f'get {len(action)} actions :{action}') - action = transform_action_batch(action, self.robot_flash) - # change warm_up - action = np.array(action) - action[self.runner_status == runner_status_code.WARM_UP] = {'h1': {'stand_still': []}} - return obs, action - - def _need_reset(self, terminated_ls): - return np.logical_or.reduce( - np.logical_and( - terminated_ls, - (self.runner_status != runner_status_code.TERMINATED), - ) - ) - - def env_step(self, action): - start_time = time() - while True: - # Stop requires special handling and also requires 50 steps to be taken - self.runner_status[ - np.logical_and(self.runner_status == runner_status_code.NORMAL, action == {'h1': {'stop': []}}) - ] = runner_status_code.STOP - obs, reward, terminated, truncated, info = self.env.step(action=action.tolist()) - obs = self._obs_remove_robot_name(obs) - finish_status = np.logical_or( - np.array([ob['finish_action'] for ob in obs]), - np.array(terminated), - ) - - if ( - np.logical_and.reduce(np.array(finish_status)[self.runner_status == runner_status_code.NORMAL]) - and runner_status_code.NORMAL in self.runner_status - ) or np.logical_and.reduce(np.array(finish_status)): - self.runner_status[self.runner_status == runner_status_code.STOP] = runner_status_code.NORMAL - break - end_time = time() - duration = round(end_time - start_time, 2) - log.debug(f'env step duration: {duration}s') - - return obs, terminated - - def terminate_ops(self, obs_ls, reset_infos, terminated_ls): - finish_warmup_ls = (self.runner_status == runner_status_code.WARM_UP) & [ob['finish_action'] for ob in obs_ls] - if np.logical_or.reduce(finish_warmup_ls): - self.agent.reset(np.where(finish_warmup_ls)[0].tolist()) - self.runner_status[finish_warmup_ls] = runner_status_code.NORMAL - log.debug(f'env{np.where(finish_warmup_ls)[0].tolist()}: states switch to NORMAL.') - # if no need reset, return False - if not self._need_reset(terminated_ls): - return False, reset_infos - import json - - for env_id, terminated in enumerate(terminated_ls): - if terminated and self.runner_status[env_id] != runner_status_code.TERMINATED: - obs = obs_ls[env_id] - reset_info = reset_infos[env_id] - log.info(f"{self.now_path_key(reset_info)}: {json.dumps(obs['metrics'], indent=4)}") - self.data_collector.save_eval_result( - key=self.now_path_key(reset_info), - result=obs['metrics'][list(obs['metrics'].keys())[0]][0]['fail_reason'], - info=obs['metrics'][list(obs['metrics'].keys())[0]][0], - ) # save data to dataset - # log data - progress_log_multi_util.trace_end( - trajectory_id=self.now_path_key(reset_info), - step_count=obs['metrics'][list(obs['metrics'].keys())[0]][0]['steps'], - result=obs['metrics'][list(obs['metrics'].keys())[0]][0]['fail_reason'], - ) - if self.vis_output: - self.visualize_util.trace_end( - trajectory_id=self.now_path_key(reset_info), - result=obs['metrics'][list(obs['metrics'].keys())[0]][0]['fail_reason'], - ) - if self.save_to_json: - self.result_logger.write_now_result_json() - self.result_logger.write_now_result() - self.runner_status[env_id] = runner_status_code.NOT_RESET - log.debug(f'env{env_id}: states switch to NOT_RESET.') - - # need this status to reset - reset_env_ids = np.where(self.runner_status == runner_status_code.NOT_RESET)[0].tolist() - - if len(reset_env_ids) > 0: - log.debug(f'env{reset_env_ids}: start new episode!') - obs, new_reset_infos = self.env.reset(reset_env_ids) - self.runner_status[reset_env_ids] = runner_status_code.WARM_UP - log.debug(f'env{reset_env_ids}: states switch to WARM UP.') - # modify original reset_info - reset_infos = np.array(reset_infos) - reset_infos[reset_env_ids] = ( - new_reset_infos if len(new_reset_infos) > 0 else None - ) # If there is only one reset and no new_deset_infos, return an empty array - self.runner_status[ - np.vectorize(lambda x: x)(reset_infos) == None # noqa: E711 - ] = runner_status_code.TERMINATED - log.debug(f'env{np.vectorize(lambda x: x)(reset_infos) == None}: states switch to TERMINATED.') - reset_infos = reset_infos.tolist() - - if np.logical_and.reduce(self.runner_status == runner_status_code.TERMINATED): - print('finished') - return True, reset_infos - for reset_info in new_reset_infos: - if reset_info is None: - continue - progress_log_multi_util.trace_start( - trajectory_id=self.now_path_key(reset_info), - ) - if self.vis_output: - self.visualize_util.trace_start( - trajectory_id=self.now_path_key(reset_info), reference_path=reset_info.data['reference_path'] - ) - return False, reset_infos - - def eval(self): - print('--- VlnPeEvaluator start ---') - obs, reset_info = self.env.reset() - print('obs:', obs) - for info in reset_info: - if info is None: - continue - progress_log_multi_util.trace_start( - trajectory_id=self.now_path_key(info), - ) - if self.vis_output: - self.visualize_util.trace_start( - trajectory_id=self.now_path_key(info), reference_path=info.data['reference_path'] - ) - log.info('start new episode!') - - obs = self.warm_up() - self.fake_obs = obs[0][self.robot_name] - action = [{self.robot_name: {'stand_still': []}} for _ in range(self.env_num * self.proc_num)] - obs = self._obs_remove_robot_name(obs) - self.runner_status = np.full( - (self.env_num * self.proc_num), - runner_status_code.NORMAL, - runner_status_code, - ) - self.runner_status[[info is None for info in reset_info]] = runner_status_code.TERMINATED - - while self.env.is_running(): - - obs, action = self.get_action(obs, action) - obs, terminated = self.env_step(action) - env_term, reset_info = self.terminate_ops(obs, reset_info, terminated) - if env_term: - break - - # save step obs - if self.vis_output: - if self.config.task.task_settings['use_distributed']: - raise Exception('visualization not support distributed mode!') - - for ob, info, act in zip(obs, reset_info, action): - if info is None or 'rgb' not in ob or ob['fail_reason']: - continue - self.visualize_util.save_observation( - trajectory_id=self.now_path_key(info), obs=ob, action=act[self.robot_name] - ) - - self.env.close() - progress_log_multi_util.report() - - print('--- VlnPeEvaluator end ---') diff --git a/internnav/projects/internutopia_vln_extension/configs/robots/h1.py b/internnav/projects/internutopia_vln_extension/configs/robots/h1.py deleted file mode 100644 index 08931962..00000000 --- a/internnav/projects/internutopia_vln_extension/configs/robots/h1.py +++ /dev/null @@ -1,31 +0,0 @@ -from internutopia.macros import gm -from internutopia_extension.configs.controllers import H1MoveBySpeedControllerCfg -from internnav.projects.internutopia_vln_extension.configs.controllers.flash_controller import VlnMoveByFlashControllerCfg - -vln_move_by_speed_cfg = H1MoveBySpeedControllerCfg( - name='vln_move_by_speed', - type='VlnMoveBySpeedController', - policy_weights_path='data/Embodiments/vln-pe/h1/policy/move_by_speed/h1_loco_jit_policy.pt', - joint_names=[ - 'left_hip_yaw_joint', - 'right_hip_yaw_joint', - 'torso_joint', - 'left_hip_roll_joint', - 'right_hip_roll_joint', - 'left_shoulder_pitch_joint', - 'right_shoulder_pitch_joint', - 'left_hip_pitch_joint', - 'right_hip_pitch_joint', - 'left_shoulder_roll_joint', - 'right_shoulder_roll_joint', - 'left_knee_joint', - 'right_knee_joint', - 'left_shoulder_yaw_joint', - 'right_shoulder_yaw_joint', - 'left_ankle_joint', - 'right_ankle_joint', - 'left_elbow_joint', - 'right_elbow_joint', - ], -) -vln_move_by_flash_cfg = VlnMoveByFlashControllerCfg(name='move_by_flash') \ No newline at end of file diff --git a/scripts/demo/vln_gradio_backend.py b/scripts/demo/vln_gradio_backend.py index b6f89095..efbdbb38 100644 --- a/scripts/demo/vln_gradio_backend.py +++ b/scripts/demo/vln_gradio_backend.py @@ -16,8 +16,8 @@ from pydantic import BaseModel from transformers import AutoProcessor +from internnav.env.utils.habitat_extensions.evaluator_single import VLNEvaluator from internnav.model.basemodel.internvla_n1.internvla_n1 import InternVLAN1ForCausalLM -from internnav.projects.habitat_extensions.evaluator_single import VLNEvaluator from internnav.utils.dist import * PROJECT_ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) diff --git a/scripts/eval/configs/challenge_cfg.py b/scripts/eval/configs/challenge_cfg.py deleted file mode 100644 index fa569bd1..00000000 --- a/scripts/eval/configs/challenge_cfg.py +++ /dev/null @@ -1,49 +0,0 @@ -from internnav.configs.agent import AgentCfg -from internnav.configs.evaluator import ( - EnvCfg, - EvalCfg, - EvalDatasetCfg, - SceneCfg, - TaskCfg, -) - -eval_cfg = EvalCfg( - agent=AgentCfg( - server_port=8087, - model_name='rdp', - ckpt_path='checkpoints/r2r/fine_tuned/rdp', - model_settings={}, - ), - env=EnvCfg( - env_type='vln_pe', - env_settings={ - 'use_fabric': False, - 'headless': True, # display option: set to False will open isaac-sim interactive window - }, - ), - task=TaskCfg( - task_name='rdp_iros_test', - task_settings={ - 'env_num': 1, - 'use_distributed': False, # Ray distributed framework - 'proc_num': 8, - }, - scene=SceneCfg( - scene_type='mp3d', - scene_data_dir='data/scene_data/mp3d_pe', - ), - robot_name='h1', - robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256, 256], # (W,H) - camera_prim_path='torso_link/h1_pano_camera_0', - ), - dataset=EvalDatasetCfg( - dataset_type="mp3d", - dataset_settings={ - 'base_data_dir': 'data/vln_pe/raw_data/r2r', - 'split_data_types': ['val_unseen', 'val_seen'], - 'filter_stairs': False, - }, - ), - eval_settings={'save_to_json': True, 'vis_output': True}, # save result to video under logs/ -) diff --git a/scripts/eval/configs/challenge_kujiale_cfg.py b/scripts/eval/configs/challenge_kujiale_cfg.py deleted file mode 100644 index d1bd95a9..00000000 --- a/scripts/eval/configs/challenge_kujiale_cfg.py +++ /dev/null @@ -1,49 +0,0 @@ -from internnav.configs.agent import AgentCfg -from internnav.configs.evaluator import ( - EnvCfg, - EvalCfg, - EvalDatasetCfg, - SceneCfg, - TaskCfg, -) - -eval_cfg = EvalCfg( - agent=AgentCfg( - server_port=8087, - model_name='rdp', - ckpt_path='checkpoints/r2r/fine_tuned/rdp', - model_settings={}, - ), - env=EnvCfg( - env_type='vln_pe', - env_settings={ - 'use_fabric': False, - 'headless': True, # display option: set to False will open isaac-sim interactive window - }, - ), - task=TaskCfg( - task_name='challenge_kujiale_eval', - task_settings={ - 'env_num': 1, - 'use_distributed': False, # Ray distributed framework - 'proc_num': 8, - }, - scene=SceneCfg( - scene_type='kujiale', - scene_data_dir='interiornav_data/scene_data', - ), - robot_name='h1', - robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256, 256], # (W,H) - camera_prim_path='torso_link/h1_pano_camera_0', - ), - dataset=EvalDatasetCfg( - dataset_type="kujiale", - dataset_settings={ - 'base_data_dir': 'interiornav_data/raw_data', - 'split_data_types': ['val_unseen', 'val_seen'], - 'filter_stairs': False, - }, - ), - eval_settings={'save_to_json': True, 'vis_output': True}, # save result to video under logs/ -) diff --git a/scripts/eval/configs/challenge_mp3d_cfg.py b/scripts/eval/configs/challenge_mp3d_cfg.py deleted file mode 100644 index 74617534..00000000 --- a/scripts/eval/configs/challenge_mp3d_cfg.py +++ /dev/null @@ -1,49 +0,0 @@ -from internnav.configs.agent import AgentCfg -from internnav.configs.evaluator import ( - EnvCfg, - EvalCfg, - EvalDatasetCfg, - SceneCfg, - TaskCfg, -) - -eval_cfg = EvalCfg( - agent=AgentCfg( - server_port=8087, - model_name='cma', - ckpt_path='checkpoints/r2r/fine_tuned/cma', - model_settings={}, - ), - env=EnvCfg( - env_type='vln_pe', - env_settings={ - 'use_fabric': False, - 'headless': True, - }, - ), - task=TaskCfg( - task_name='challenge_mp3d_eval', - task_settings={ - 'env_num': 1, - 'use_distributed': False, - 'proc_num': 2, - }, - scene=SceneCfg( - scene_type='mp3d', - scene_data_dir='data/scene_data/mp3d_pe', - ), - robot_name='h1', - robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256, 256], # (W,H) - camera_prim_path='torso_link/h1_pano_camera_0', - ), - dataset=EvalDatasetCfg( - dataset_type="mp3d", - dataset_settings={ - 'base_data_dir': 'data/vln_pe/raw_data/r2r', - 'split_data_types': ['val_unseen', 'val_seen'], - 'filter_stairs': False, - }, - ), - eval_settings={'save_to_json': False, 'vis_output': True}, -) diff --git a/scripts/eval/configs/h1_cma_cfg.py b/scripts/eval/configs/h1_cma_cfg.py index 7c987b75..6b27ee93 100644 --- a/scripts/eval/configs/h1_cma_cfg.py +++ b/scripts/eval/configs/h1_cma_cfg.py @@ -11,11 +11,11 @@ agent=AgentCfg( server_port=8087, model_name='cma', - ckpt_path='checkpoints/r2r/fine_tuned/cma', + ckpt_path='checkpoints/r2r/fine_tuned/cma_plus', model_settings={}, ), env=EnvCfg( - env_type='vln_pe', + env_type='internutopia', env_settings={ 'use_fabric': False, 'headless': True, @@ -34,10 +34,10 @@ ), robot_name='h1', robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256,256], # (W,H) + camera_resolution=[256, 256], # (W,H) camera_prim_path='torso_link/h1_pano_camera_0', - vlnce=False, # vlnpe by default - obstacle_detection=False, # whether allow flash across obstacle + vlnce=False, # vlnpe by default + obstacle_detection=False, # whether allow flash across obstacle ), dataset=EvalDatasetCfg( dataset_type="mp3d", @@ -47,8 +47,5 @@ 'filter_stairs': False, }, ), - eval_settings={ - 'save_to_json': False, - 'vis_output': True - } + eval_settings={'save_to_json': False, 'vis_output': True}, ) diff --git a/scripts/eval/configs/h1_cma_cfg_kujiale.py b/scripts/eval/configs/h1_cma_cfg_kujiale.py index e004bb84..7f43e10e 100644 --- a/scripts/eval/configs/h1_cma_cfg_kujiale.py +++ b/scripts/eval/configs/h1_cma_cfg_kujiale.py @@ -15,7 +15,7 @@ model_settings={}, ), env=EnvCfg( - env_type='vln_pe', + env_type='internutopia', env_settings={ 'use_fabric': False, 'headless': True, @@ -34,7 +34,7 @@ ), robot_name='h1', robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256,256], # (W,H) + camera_resolution=[256, 256], # (W,H) camera_prim_path='torso_link/h1_pano_camera_0', ), dataset=EvalDatasetCfg( diff --git a/scripts/eval/configs/h1_internvla_n1_async_cfg.py b/scripts/eval/configs/h1_internvla_n1_async_cfg.py index 6ce3e18b..9ee5b833 100644 --- a/scripts/eval/configs/h1_internvla_n1_async_cfg.py +++ b/scripts/eval/configs/h1_internvla_n1_async_cfg.py @@ -14,33 +14,32 @@ model_name='internvla_n1', ckpt_path='', model_settings={ - 'env_num': 1, 'sim_num': 1, + 'env_num': 1, + 'sim_num': 1, 'model_path': "checkpoints/InternVLA-N1", - 'camera_intrinsic': [ - [585.0, 0.0, 320.0], [0.0, 585.0, 240.0], [0.0, 0.0, 1.0] - ], - - 'width': 640, 'height': 480, 'hfov': 79, - 'resize_w': 384, 'resize_h': 384, + 'camera_intrinsic': [[585.0, 0.0, 320.0], [0.0, 585.0, 240.0], [0.0, 0.0, 1.0]], + 'width': 640, + 'height': 480, + 'hfov': 79, + 'resize_w': 384, + 'resize_h': 384, 'max_new_tokens': 1024, 'num_frames': 32, 'num_history': 8, 'num_future_steps': 4, - 'device': 'cuda:1', 'predict_step_nums': 32, - 'continuous_traj': True, - - 'infer_mode': 'partial_async', # You can choose "sync" or "partial_async", but for this model, "partial_async" is better. + 'continuous_traj': True, + 'infer_mode': 'partial_async', # You can choose "sync" or "partial_async", but for this model, "partial_async" is better. # debug - 'vis_debug': True, # If vis_debug=True, you can get visualization results - 'vis_debug_path': './logs/test/vis_debug' + 'vis_debug': True, # If vis_debug=True, you can get visualization results + 'vis_debug_path': './logs/test/vis_debug', }, ), env=EnvCfg( - env_type='vln_multi', + env_type='internutopia', env_settings={ - 'use_fabric': False, # Please set use_fabric=False due to the render delay; + 'use_fabric': False, # Please set use_fabric=False due to the render delay; 'headless': True, }, ), @@ -48,27 +47,27 @@ task_name='test', task_settings={ 'env_num': 1, - 'use_distributed': False, # If the others setting in task_settings, please set use_distributed = False. + 'use_distributed': False, # If the others setting in task_settings, please set use_distributed = False. 'proc_num': 1, - # 'max_step': 1000, #If use flash mode,default 1000; descrete mode, set 50000 + # 'max_step': 1000, #If use flash mode,default 1000; descrete mode, set 50000 }, scene=SceneCfg( scene_type='mp3d', scene_data_dir='data/scene_data/mp3d_pe', ), robot_name='h1', - robot_flash=True, # If robot_flash is True, the mode is flash (set world_pose directly); else you choose physical mode. + robot_flash=True, # If robot_flash is True, the mode is flash (set world_pose directly); else you choose physical mode. robot_usd_path='data/Embodiments/vln-pe/h1/h1_internvla.usd', - camera_resolution=[640, 480], # (W,H) + camera_resolution=[640, 480], # (W,H) camera_prim_path='torso_link/h1_1_25_down_30', - one_step_stand_still = True, #For dual-system, please keep this param True. + one_step_stand_still=True, # For dual-system, please keep this param True. ), dataset=EvalDatasetCfg( dataset_type="mp3d", dataset_settings={ 'base_data_dir': 'data/vln_pe/raw_data/r2r', 'split_data_types': ['val_seen', 'val_unseen'], # 'val_seen' - 'filter_stairs': False, # For iros challenge, this is False; For results in the paper, this is True. + 'filter_stairs': False, # For iros challenge, this is False; For results in the paper, this is True. # 'selected_scans': ['zsNo4HB9uLZ'], # 'selected_scans': ['8194nk5LbLH', 'pLe4wQe7qrG'], }, diff --git a/scripts/eval/configs/h1_internvla_n1_cfg.py b/scripts/eval/configs/h1_internvla_n1_cfg.py index d6a3b0ab..76fb10eb 100644 --- a/scripts/eval/configs/h1_internvla_n1_cfg.py +++ b/scripts/eval/configs/h1_internvla_n1_cfg.py @@ -36,7 +36,7 @@ }, ), env=EnvCfg( - env_type='vln_multi', + env_type='internutopia', env_settings={ 'use_fabric': False, # Please set use_fabric=False due to the render delay; 'headless': True, diff --git a/scripts/eval/configs/h1_rdp_cfg.py b/scripts/eval/configs/h1_rdp_cfg.py index 55ae179b..a9ac26a1 100644 --- a/scripts/eval/configs/h1_rdp_cfg.py +++ b/scripts/eval/configs/h1_rdp_cfg.py @@ -15,7 +15,7 @@ model_settings={}, ), env=EnvCfg( - env_type='vln_pe', + env_type='internutopia', env_settings={ 'use_fabric': False, 'headless': True, @@ -34,7 +34,7 @@ ), robot_name='h1', robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256,256], # (W,H) + camera_resolution=[256, 256], # (W,H) camera_prim_path='torso_link/h1_pano_camera_0', ), dataset=EvalDatasetCfg( diff --git a/scripts/eval/configs/h1_seq2seq_cfg.py b/scripts/eval/configs/h1_seq2seq_cfg.py index 45164873..2934e8e7 100644 --- a/scripts/eval/configs/h1_seq2seq_cfg.py +++ b/scripts/eval/configs/h1_seq2seq_cfg.py @@ -15,7 +15,7 @@ model_settings={}, ), env=EnvCfg( - env_type='vln_pe', + env_type='internutopia', env_settings={ 'use_fabric': False, 'headless': True, @@ -34,7 +34,7 @@ ), robot_name='h1', robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd', - camera_resolution=[256,256], # (W,H) + camera_resolution=[256, 256], # (W,H) camera_prim_path='torso_link/h1_pano_camera_0', ), dataset=EvalDatasetCfg( diff --git a/scripts/eval/eval.py b/scripts/eval/eval.py index 9b182952..7cb72a38 100644 --- a/scripts/eval/eval.py +++ b/scripts/eval/eval.py @@ -1,13 +1,16 @@ import sys + sys.path.append('.') -from internnav.configs.evaluator.default_config import get_config -from internnav.evaluator import Evaluator import argparse import importlib.util +from internnav.evaluator import Evaluator +from internnav.evaluator.utils.vln_default_config import get_config + # This file is the main file + def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( @@ -18,6 +21,7 @@ def parse_args(): ) return parser.parse_args() + def load_eval_cfg(config_path, attr_name='eval_cfg'): spec = importlib.util.spec_from_file_location("eval_config_module", config_path) config_module = importlib.util.module_from_spec(spec) @@ -25,6 +29,7 @@ def load_eval_cfg(config_path, attr_name='eval_cfg'): spec.loader.exec_module(config_module) return getattr(config_module, attr_name) + def main(): args = parse_args() evaluator_cfg = load_eval_cfg(args.config, attr_name='eval_cfg') @@ -34,5 +39,6 @@ def main(): print(type(evaluator)) evaluator.eval() + if __name__ == '__main__': main() diff --git a/scripts/eval/eval_habitat.py b/scripts/eval/eval_habitat.py index 46d54370..2fc420e4 100644 --- a/scripts/eval/eval_habitat.py +++ b/scripts/eval/eval_habitat.py @@ -6,8 +6,8 @@ import torch from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration +from internnav.evaluator.habitat_vln_evaluator import VLNEvaluator from internnav.model.basemodel.internvla_n1.internvla_n1 import InternVLAN1ForCausalLM -from internnav.projects.habitat_extensions.evaluator import VLNEvaluator from internnav.utils.dist import * diff --git a/scripts/iros_challenge/eval_iros.py b/scripts/iros_challenge/eval_iros.py index 83c479d5..8924bf1b 100644 --- a/scripts/iros_challenge/eval_iros.py +++ b/scripts/iros_challenge/eval_iros.py @@ -5,7 +5,8 @@ import argparse import importlib.util -from internnav.configs.evaluator.default_config import get_config +from evaluator.utils.vln_default_config import get_config + from internnav.evaluator import Evaluator # This file is the main file diff --git a/scripts/iros_challenge/onsite_competition/sdk/test_agent.py b/scripts/iros_challenge/onsite_competition/sdk/test_agent.py index 1d6ac59f..deeeb798 100644 --- a/scripts/iros_challenge/onsite_competition/sdk/test_agent.py +++ b/scripts/iros_challenge/onsite_competition/sdk/test_agent.py @@ -2,10 +2,10 @@ import sys import numpy as np +from evaluator.utils.vln_default_config import get_config from save_obs import load_obs_from_meta from internnav.agent.utils.client import AgentClient -from internnav.configs.evaluator.default_config import get_config def load_eval_cfg(config_path, attr_name='eval_cfg'): diff --git a/setup.cfg b/setup.cfg index 4118ae4d..3aeaebe4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,7 +40,9 @@ per-file-ignores=*/__init__.py:F401 # R505: Unnecessary elif after return statement # SIM102: Use a single if-statement instead of nested if-statements # SIM117: Merge with statements for context managers that have same scope. -ignore=E402,E501,W503,E203,D401,R504,R505,SIM102,SIM117 +# E711: comparison to None should be 'if cond is None:' +# E226: missing whitespace around arithmetic operator +ignore=E402,E501,W503,E203,D401,R504,R505,SIM102,SIM117,E711,E226 max-line-length = 120 max-complexity = 30 exclude=_*,.vscode,.git,docs/**,**/test/**,**/lcmtypes/**,*.ipynb,scripts/**,internnav/projects/** diff --git a/tests/function_test/e2e_test.py b/tests/function_test/e2e_test.py index 61074b3d..023508c2 100644 --- a/tests/function_test/e2e_test.py +++ b/tests/function_test/e2e_test.py @@ -58,9 +58,3 @@ def test_server(): def test_evaluator(): start_command = 'python ./tests/function_test/test_evaluator.py' common_body(start_command) - - -@pytest.mark.gpu -def test_challenge(): - start_command = 'python ./tests/function_test/test_challenge.py' - common_body(start_command) diff --git a/tests/function_test/test_challenge.py b/tests/function_test/test_challenge.py deleted file mode 100644 index 3d6b4bad..00000000 --- a/tests/function_test/test_challenge.py +++ /dev/null @@ -1,97 +0,0 @@ -''' -Test the evaluator eval logic without model involve. -The main progress: - Init => warm up => fake one action -''' - -import importlib.util -import sys -import time - -import numpy as np -from test_server import start_server, stop_server - -from internnav.configs.evaluator.default_config import get_config -from internnav.evaluator import Evaluator -from internnav.utils import progress_log_multi_util - - -def main(): - from enum import Enum - - class runner_status_code(Enum): - NORMAL = 0 - WARM_UP = 1 - NOT_RESET = 3 - TERMINATED = 2 - STOP = 4 - - def load_eval_cfg(config_path, attr_name='eval_cfg'): - spec = importlib.util.spec_from_file_location("eval_config_module", config_path) - config_module = importlib.util.module_from_spec(spec) - sys.modules["eval_config_module"] = config_module - spec.loader.exec_module(config_module) - return getattr(config_module, attr_name) - - evaluator_cfg = load_eval_cfg('scripts/eval/configs/challenge_cfg.py', attr_name='eval_cfg') - cfg = get_config(evaluator_cfg) - evaluator = Evaluator.init(cfg) - - print('--- VlnPeEvaluator start ---') - obs, reset_info = evaluator.env.reset() - for info in reset_info: - if info is None: - continue - progress_log_multi_util.trace_start( - trajectory_id=evaluator.now_path_key(info), - ) - - obs = evaluator.warm_up() - evaluator.fake_obs = obs[0][evaluator.robot_name] - action = [{evaluator.robot_name: {'stand_still': []}} for _ in range(evaluator.env_num * evaluator.proc_num)] - obs = evaluator._obs_remove_robot_name(obs) - evaluator.runner_status = np.full( - (evaluator.env_num * evaluator.proc_num), - runner_status_code.NORMAL, - runner_status_code, - ) - evaluator.runner_status[[info is None for info in reset_info]] = runner_status_code.TERMINATED - - while evaluator.env.is_running(): - obs, action = evaluator.get_action(obs, action) - obs, terminated = evaluator.env_step(action) - env_term, reset_info = evaluator.terminate_ops(obs, reset_info, terminated) - break - - evaluator.env.close() - - -def start_evaluator(): - from multiprocessing import get_context - - ctx = get_context("spawn") # Use 'spawn' to avoid issues on some platforms - p = ctx.Process(target=main) - p.start() - p.join() - assert p.exitcode == 0 - print("Evaluator process completed successfully.") - - -if __name__ == '__main__': - try: - proc = start_server() - time.sleep(3) - start_evaluator() - - except Exception as e: - print(f'exception is {e}') - import traceback - - traceback.print_exc() - sys.exit(1) - - except SystemExit as e: - print(f"Caught SystemExit from env.close(): code={e.code}", flush=True) - - finally: - stop_server(proc) diff --git a/tests/function_test/test_challenge_ray.py b/tests/function_test/test_challenge_ray.py deleted file mode 100644 index 2b35afb2..00000000 --- a/tests/function_test/test_challenge_ray.py +++ /dev/null @@ -1,106 +0,0 @@ -''' -Test the evaluator eval logic with ray, set proc_num = 4. -The main progress: - Init => warm up => one action -''' - -import importlib.util -import subprocess -import sys -import time - -import numpy as np - -from internnav.configs.evaluator.default_config import get_config -from internnav.evaluator import Evaluator -from internnav.utils import progress_log_multi_util - - -def main(): - from enum import Enum - - class runner_status_code(Enum): - NORMAL = 0 - WARM_UP = 1 - NOT_RESET = 3 - TERMINATED = 2 - STOP = 4 - - def load_eval_cfg(config_path, attr_name='eval_cfg'): - spec = importlib.util.spec_from_file_location("eval_config_module", config_path) - config_module = importlib.util.module_from_spec(spec) - sys.modules["eval_config_module"] = config_module - spec.loader.exec_module(config_module) - return getattr(config_module, attr_name) - - evaluator_cfg = load_eval_cfg('scripts/eval/configs/challenge_cfg.py', attr_name='eval_cfg') - evaluator_cfg.task.task_settings["use_distributed"] = True - evaluator_cfg.task.task_settings["proc_num"] = 4 - cfg = get_config(evaluator_cfg) - evaluator = Evaluator.init(cfg) - - print('--- VlnPeEvaluator start ---') - obs, reset_info = evaluator.env.reset() - for info in reset_info: - if info is None: - continue - progress_log_multi_util.trace_start( - trajectory_id=evaluator.now_path_key(info), - ) - - obs = evaluator.warm_up() - evaluator.fake_obs = obs[0][evaluator.robot_name] - action = [{evaluator.robot_name: {'stand_still': []}} for _ in range(evaluator.env_num * evaluator.proc_num)] - obs = evaluator._obs_remove_robot_name(obs) - evaluator.runner_status = np.full( - (evaluator.env_num * evaluator.proc_num), - runner_status_code.NORMAL, - runner_status_code, - ) - evaluator.runner_status[[info is None for info in reset_info]] = runner_status_code.TERMINATED - - while evaluator.env.is_running(): - obs, action = evaluator.get_action(obs, action) - obs, terminated = evaluator.env_step(action) - env_term, reset_info = evaluator.terminate_ops(obs, reset_info, terminated) - break - - evaluator.env.close() - - -def start_server(): - server_cmd = [ - sys.executable, - "internnav/agent/utils/server.py", - "--config", - "scripts/eval/configs/challenge_cfg.py", - ] - - proc = subprocess.Popen( - server_cmd, - stdout=None, - stderr=None, - ) - return proc - - -if __name__ == '__main__': - try: - proc = start_server() - time.sleep(3) - main() - except Exception as e: - print(f'exception is {e}') - import traceback - - traceback.print_exc() - sys.exit(1) - finally: - if proc and proc.poll() is None: - print("Shutting down server...") - proc.terminate() - try: - proc.wait(timeout=10) - except subprocess.TimeoutExpired: - print("Force killing server...") - proc.kill() diff --git a/tests/function_test/test_evaluator.py b/tests/function_test/test_evaluator.py index 8aab08c3..b6658e40 100644 --- a/tests/function_test/test_evaluator.py +++ b/tests/function_test/test_evaluator.py @@ -29,8 +29,8 @@ SceneCfg, TaskCfg, ) -from internnav.configs.evaluator.default_config import get_config from internnav.evaluator import Evaluator +from internnav.evaluator.utils.vln_default_config import get_config eval_cfg = EvalCfg( agent=AgentCfg( @@ -40,7 +40,7 @@ model_settings={}, ), env=EnvCfg( - env_type='vln_pe', + env_type='internutopia', env_settings={ 'use_fabric': False, 'headless': True, # display option: set to False will open isaac-sim interactive window @@ -51,7 +51,7 @@ task_settings={ 'env_num': 2, 'use_distributed': True, # Ray distributed framework - 'proc_num': 8, + 'proc_num': 4, }, scene=SceneCfg( scene_type='mp3d', diff --git a/tests/unit_test/test_evaluator_unit.py b/tests/unit_test/test_evaluator_unit.py deleted file mode 100644 index 829da0a7..00000000 --- a/tests/unit_test/test_evaluator_unit.py +++ /dev/null @@ -1,22 +0,0 @@ -import numpy as np -import pytest - -from internnav.evaluator.vln_pe_evaluator import transform_action_batch - - -@pytest.mark.slow -def test_transform_action_batch_discrete(): - origin = [(np.array([0]),), (np.array([-1]),), (np.array([3]),)] - out = transform_action_batch(origin, flash=False) - assert out == [ - {'h1': {'stop': []}}, - {'h1': {'stand_still': []}}, - {'h1': {'move_by_discrete': [3]}}, - ] - - -@pytest.mark.slow -def test_transform_action_batch_flash(): - origin = [(np.array([5]),)] - out = transform_action_batch(origin, flash=True) - assert out == [{'h1': {'move_by_flash': [5]}}]