Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,8 @@ data
kujiale_data
interiornav_data
images/
internutopia
internutopia_extension
# internutopia
# internutopia_extension
*.pyc

pre-commit*
Expand Down
2 changes: 2 additions & 0 deletions internnav/agent/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ def register(cls, agent_type: str):
"""

def decorator(agent_class):
if agent_type in cls.agents:
raise ValueError(f"Agent {agent_type} already registered.")
cls.agents[agent_type] = agent_class

return decorator
Expand Down
17 changes: 12 additions & 5 deletions internnav/agent/cma_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@
from gym import spaces

from internnav.agent.base import Agent
from internnav.agent.utils.common import batch_obs, set_seed_model
from internnav.configs.agent import AgentCfg
from internnav.configs.model.base_encoders import ModelCfg
from internnav.evaluator.utils.common import set_seed_model
from internnav.evaluator.utils.models import batch_obs
from internnav.model import get_config, get_policy


Expand All @@ -22,6 +21,7 @@ class CmaAgent(Agent):
)

def __init__(self, agent_config: AgentCfg):

super().__init__(agent_config)
self._model_settings = ModelCfg(**agent_config.model_settings)
model_settings = self._model_settings
Expand Down Expand Up @@ -119,13 +119,20 @@ def inference(self, obs):
dtype=torch.bool,
)
end = time.time()
print(f'CmaAgent step time: {round(end-start,4)}s')
print(f'CmaAgent step time: {round(end-start, 4)}s')
return actions.cpu().numpy().tolist()

def step(self, obs):
print('CmaPolicyAgent step')
start = time.time()
action = self.inference(obs)
end = time.time()
print(f'Time: {round(end-start,4)}s')
return action
print(f'Time: {round(end-start, 4)}s')

# convert from [[x],[y]] to [{'action': [x],'ideal_flag':True}, {'action': [y],'ideal_flag':True}]
actions = []
for a in action:
if not isinstance(a, list):
a = [a]
actions.append({'action': a, 'ideal_flag': True})
return actions
24 changes: 11 additions & 13 deletions internnav/agent/rdp_agent.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import random
import time

import numpy as np
import torch
from gym import spaces

from internnav.agent.base import Agent
from internnav.agent.utils.common import batch_obs, set_seed_model
from internnav.configs.agent import AgentCfg
from internnav.configs.model.base_encoders import ModelCfg
from internnav.evaluator.utils.models import batch_obs
from internnav.model import get_config, get_policy
from internnav.model.basemodel.LongCLIP.model import longclip
from internnav.model.basemodel.rdp.utils import (
Expand All @@ -25,17 +24,9 @@
extract_image_features,
extract_instruction_tokens,
)
from internnav.utils import common_log_util
from internnav.utils.common_log_util import common_logger as log


def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)


@Agent.register('rdp')
class RdpAgent(Agent):
observation_space = spaces.Box(
Expand All @@ -47,7 +38,7 @@ class RdpAgent(Agent):

def __init__(self, config: AgentCfg):
super().__init__(config)
set_random_seed(0)
set_seed_model(0)
self._model_settings = self.config.model_settings
self._model_settings = ModelCfg(**self._model_settings)
env_num = getattr(self._model_settings, 'env_num', 1)
Expand Down Expand Up @@ -348,5 +339,12 @@ def step(self, obs):
start = time.time()
action = self.inference(obs)
end = time.time()
print(f'总时间: {round(end-start,4)}s')
return action
print(f'总时间: {round(end-start, 4)}s')

# convert from [[a1],[a2]] to [{'action': [a1],'ideal_flag':True}, {'action': [a2],'ideal_flag':True}]
actions = []
for a in action:
if not isinstance(a, list):
a = [a]
actions.append({'action': a, 'ideal_flag': True})
return actions
Original file line number Diff line number Diff line change
@@ -1,11 +1,25 @@
from collections import defaultdict
from typing import DefaultDict, List, Optional

import numpy as np
import torch

from .tensor_dict import TensorDict


def set_seed_model(seed):
import random

import torch

random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = False


def batch_obs(
observations,
device: Optional[torch.device] = None,
Expand Down
3 changes: 2 additions & 1 deletion internnav/configs/evaluator/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,12 @@ class EvalDatasetCfg(BaseModel):


class EvalCfg(BaseModel):
eval_type: Optional[str] = None
eval_settings: Optional[Dict[str, Any]] = {}
agent: Optional[AgentCfg] = None
env: EnvCfg
task: TaskCfg
dataset: EvalDatasetCfg
eval_settings: Optional[Dict[str, Any]] = {}


__all__ = [
Expand Down
1 change: 0 additions & 1 deletion internnav/dist.py

This file was deleted.

5 changes: 2 additions & 3 deletions internnav/env/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from internnav.env.base import Env
from internnav.env.vln_pe_env import VlnPeEnv
from internnav.env.vln_multi_env import VlnMultiEnv
from internnav.env.internutopia_env import InternutopiaEnv

__all__ = ['Env', 'VlnPeEnv', 'VlnMultiEnv']
__all__ = ['Env', 'InternutopiaEnv']
2 changes: 2 additions & 0 deletions internnav/env/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ def register(cls, env_type: str):
"""

def decorator(env_class):
if env_type in cls.envs:
raise ValueError(f"Env {env_type} already registered.")
cls.envs[env_type] = env_class

return decorator
Expand Down
28 changes: 17 additions & 11 deletions internnav/env/vln_pe_env.py → internnav/env/internutopia_env.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,24 @@
from typing import Any, Dict, List

from internutopia.core.config import Config, SimConfig
from internutopia.core.config.distribution import RayDistributionCfg
from internutopia.core.vec_env import Env

from internnav.configs.evaluator import EnvCfg, TaskCfg
from internnav.env import base
from internnav.projects.internutopia_vln_extension.configs.tasks.vln_eval_task import (
VLNEvalTaskCfg,
)
from internnav.projects.internutopia_vln_extension import import_extensions


@base.Env.register('vln_pe')
class VlnPeEnv(base.Env):
@base.Env.register('internutopia')
class InternutopiaEnv(base.Env):
def __init__(self, env_config: EnvCfg, task_config: TaskCfg):
try:
from internutopia.core.config import Config, SimConfig
from internutopia.core.config.distribution import RayDistributionCfg
from internutopia.core.vec_env import Env

from internnav.env.utils.internutopia_extension import import_extensions
except ImportError as e:
raise RuntimeError(
"InternUtopia modules could not be imported. "
"Make sure both repositories are installed and on PYTHONPATH."
) from e

super().__init__(env_config, task_config)
env_settings = self.env_config.env_settings
task_settings = self.task_config.task_settings
Expand All @@ -25,8 +29,10 @@ def __init__(self, env_config: EnvCfg, task_config: TaskCfg):
task_configs=task_settings['episodes'],
)
if 'distribution_config' in env_settings:
distribution_config=RayDistributionCfg(**env_settings['distribution_config'])
distribution_config = RayDistributionCfg(**env_settings['distribution_config'])
config = config.distribute(distribution_config)

# register all extensions
import_extensions()

self.env = Env(config)
Expand Down
85 changes: 85 additions & 0 deletions internnav/env/realworld_agilex_env.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import threading
import time

from internnav.env import Env
from internnav.env.utils.agilex_extensions.cam import AlignedRealSense
from internnav.env.utils.agilex_extensions.control import DiscreteRobotController


@Env.register('realworld')
class RealWorldEnv(Env):
def __init__(
self,
fps: int = 30,
duration: float = 0.5,
distance: float = 0.25,
angle: int = 15,
turn_speed: float = 0.5,
move_speed: float = 0.3,
):

self.node = DiscreteRobotController()
self.cam = AlignedRealSense()
self.latest_obs = None
self.lock = threading.Lock()
self.stop_flag = threading.Event()
self.fps = fps

# 启动相机
self.cam.start()
# 启动采集线程
self.thread = threading.Thread(target=self._capture_loop, daemon=True)
self.thread.start()

# control setting
self.duration = duration
self.distance = distance
self.angle = angle
self.turn_speed = turn_speed # rad/s
self.move_speed = move_speed # m/s

def reverse(self):
self.distance = -self.distance

def _capture_loop(self):
"""keep capturing frames"""
interval = 1.0 / self.fps
while not self.stop_flag.is_set():
t0 = time.time()
try:
obs = self.cam.get_observation(timeout_ms=1000)
with self.lock:
self.latest_obs = obs
except Exception as e:
print("Camera capture failed:", e)
time.sleep(0.05)
dt = time.time() - t0
if dt < interval:
time.sleep(interval - dt)

def get_observation(self):
"""return most recent frame"""
with self.lock:
return self.latest_obs

def step(self, action: int):
"""
action:
0: stand still
1: move forward
2: turn left
3: turn right
"""
if action == 0:
self.node.stand_still(self.duration)
elif action == 1:
self.node.move_feedback(self.distance, self.move_speed)
elif action == 2:
self.node.turn(self.angle, self.turn_speed)
elif action == 3:
self.node.turn(self.angle, -self.turn_speed)

def close(self):
self.stop_flag.set()
self.thread.join(timeout=1.0)
self.cam.stop()
Loading