From 860aa524b5823a778c80ee6a9f81402c6d84437d Mon Sep 17 00:00:00 2001 From: wangyukai Date: Wed, 29 Oct 2025 09:41:06 +0000 Subject: [PATCH 1/7] fix doc --- README.md | 5 ++--- setup.py | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 40919191..9e17af10 100644 --- a/README.md +++ b/README.md @@ -33,9 +33,9 @@ The toolbox supports the most comprehensive 6 datasets \& benchmarks and 10+ pop The toolbox supports the most advanced high-quality navigation dataset, InternData-N1, which includes 3k+ scenes and 830k VLN data covering diverse embodiments and scenes, and the first dual-system navigation foundation model with leading performance on all the benchmarks and zero-shot generalization capability in the real world, InternVLA-N1. -## πŸ”₯ +## πŸ”₯ News - [2025/10] Add a simple [inference-only demo](scripts/notebooks/inference_only_demo.ipynb) of InternVLA-N1. -- [2025/10] InternVLA-N1 [technique report](https://internrobotics.github.io/internvla-n1.github.io/static/pdfs/InternVLA_N1.pdf) is released. Please check our [homepage](https://internrobotics.github.io/internvla-n1.github.io/). +- [2025/10] InternVLA-N1 [technical report](https://internrobotics.github.io/internvla-n1.github.io/static/pdfs/InternVLA_N1.pdf) is released. Please check our [homepage](https://internrobotics.github.io/internvla-n1.github.io/). - [2025/09] Real-world deployment code of InternVLA-N1 is released. - [2025/07] We are hosting πŸ†IROS 2025 Grand Challenge, stay tuned at [official website](https://internrobotics.shlab.org.cn/challenge/2025/). - [2025/07] InternNav v0.1.1 released. @@ -181,7 +181,6 @@ Please refer to the [documentation](https://internrobotics.github.io/user_guide/ **NOTE:** -- The detailed benchmark results of other baselines will be updated in the next few days. - VLN-CE RxR benchmark and StreamVLN will be supported soon. ## πŸ”§ Customization diff --git a/setup.py b/setup.py index 2256e165..9916d889 100644 --- a/setup.py +++ b/setup.py @@ -52,12 +52,11 @@ def parse_readme(readme: str) -> str: setuptools.setup( name='internnav', - version='0.0.1', + version='0.1.2', packages=setuptools.find_packages(), author='OpenRobotLab', author_email='OpenRobotLab@pjlab.org.cn', license='Apache 2.0', - # readme='README.md', description='InternNav: A benchmark evaluation framework for navigation tasks', long_description=long_description, long_description_content_type='text/markdown', From 83d6cc3f4ec66247ba631c8fa79ee382dc980681 Mon Sep 17 00:00:00 2001 From: wangyukai Date: Wed, 29 Oct 2025 09:58:55 +0000 Subject: [PATCH 2/7] update docker image version --- scripts/iros_challenge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/iros_challenge/README.md b/scripts/iros_challenge/README.md index 8c30c5f9..b43d1b05 100644 --- a/scripts/iros_challenge/README.md +++ b/scripts/iros_challenge/README.md @@ -78,7 +78,7 @@ $ docker run --name internnav -it --rm --gpus all --network host \ -v ${HOME}/docker/isaac-sim/data:/root/.local/share/ov/data:rw \ -v ${HOME}/docker/isaac-sim/documents:/root/Documents:rw \ -v ${PWD}/data/scene_data/mp3d_pe:/isaac-sim/Matterport3D/data/v1/scans:ro \ - crpi-mdum1jboc8276vb5.cn-beijing.personal.cr.aliyuncs.com/iros-challenge/internnav:v1.0 + crpi-mdum1jboc8276vb5.cn-beijing.personal.cr.aliyuncs.com/iros-challenge/internnav:v1.2 ``` ### Download the starter dataset (val_seen + val_unseen splits) From a93f568984e47118258e2d2e3abba35bf294884f Mon Sep 17 00:00:00 2001 From: wangyukai Date: Wed, 29 Oct 2025 12:21:05 +0000 Subject: [PATCH 3/7] update bash script for evaluation --- scripts/eval/bash/start_eval.sh | 4 +- scripts/eval/bash/start_eval_iros.sh | 108 --------------------------- 2 files changed, 2 insertions(+), 110 deletions(-) delete mode 100644 scripts/eval/bash/start_eval_iros.sh diff --git a/scripts/eval/bash/start_eval.sh b/scripts/eval/bash/start_eval.sh index 1f472a8a..ce41df13 100755 --- a/scripts/eval/bash/start_eval.sh +++ b/scripts/eval/bash/start_eval.sh @@ -29,14 +29,14 @@ mkdir -p logs SERVER_LOG="logs/${CONFIG_PREFIX}_server.log" EVAL_LOG="logs/${CONFIG_PREFIX}_eval.log" -processes=$(ps -ef | grep 'internnav/agent/utils/server.py' | grep -v grep | awk '{print $2}') +processes=$(ps -ef | grep 'scripts/eval/start_server.py' | grep -v grep | awk '{print $2}') if [ -n "$processes" ]; then for pid in $processes; do kill -9 $pid echo "kill: $pid" done fi -python internnav/agent/utils/server.py --config $CONFIG > "$SERVER_LOG" 2>&1 & +python scripts/eval/start_server.py --config $CONFIG > "$SERVER_LOG" 2>&1 & RETRY_LIMIT=5 diff --git a/scripts/eval/bash/start_eval_iros.sh b/scripts/eval/bash/start_eval_iros.sh deleted file mode 100644 index 12d819dd..00000000 --- a/scripts/eval/bash/start_eval_iros.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -# source /root/miniconda3/etc/profile.d/conda.sh -# conda activate internutopia - -source /root/miniconda3/etc/profile.d/conda.sh -conda activate internutopia - -CONFIG=scripts/eval/configs/challenge_cfg.py -SPLIT="" - -export CUDA_VISIBLE_DEVICES=0,1 - -while [[ $# -gt 0 ]]; do - case $1 in - --config) - CONFIG="$2" - shift 2 - ;; - --split) - SPLIT="$2" - shift 2 - ;; - *) - echo "Unknown argument: $1" - exit 1 - ;; - esac -done - -# Extract the prefix from the config filename -CONFIG_BASENAME=$(basename "$CONFIG" .py) -CONFIG_PREFIX=$(echo "$CONFIG_BASENAME" | sed 's/_cfg$//') - -# Create the logs directory if it doesn't exist -mkdir -p logs - -# Set the log file paths -SERVER_LOG="logs/${CONFIG_PREFIX}_server.log" -EVAL_LOG="logs/${CONFIG_PREFIX}_eval.log" - -processes=$(ps -ef | grep 'internnav/agent/utils/server.py' | grep -v grep | awk '{print $2}') -if [ -n "$processes" ]; then - for pid in $processes; do - kill -9 $pid - echo "kill: $pid" - done -fi -python internnav/agent/utils/server.py --config scripts/eval/configs/challenge_kujiale_cfg.py > "$SERVER_LOG" 2>&1 & - - -RETRY_LIMIT=5 -MONITOR_INTERVAL=60 -DEADLOCK_THRESHOLD=$((5 * 60)) - -START_COMMAND_KUJIALE="python -u scripts/eval/eval_iros.py --config $CONFIG --default_config scripts/eval/configs/challenge_kujiale_cfg.py --split $SPLIT" -START_COMMAND_MP3D="python -u scripts/eval/eval_iros.py --config $CONFIG --default_config scripts/eval/configs/challenge_mp3d_cfg.py --split $SPLIT" -LOG_FILE="$EVAL_LOG" - -pid=0 - -retry_count=0 - -rm eval_stdout.log -rm eval_stderr.log - -start_process() { - echo "Starting process..." - $START_COMMAND_MP3D > >(ansi2txt >> eval_stdout.log) 2> >(ansi2txt >> eval_stderr.log) - $START_COMMAND_KUJIALE > >(ansi2txt >> eval_stdout.log) 2> >(ansi2txt >> eval_stderr.log) -} - - -check_process() { - if ! kill -0 $pid > /dev/null 2>&1; then - echo "Process $pid has exited." - return 1 - fi - return 0 -} - - -check_log_update() { - if [ ! -e "$LOG_FILE" ]; then - return 1 - fi - last_update=$(stat -c %Y "$LOG_FILE") - current_time=$(date +%s) - - delta=$(( current_time - last_update )) - - if [ $delta -ge $DEADLOCK_THRESHOLD ]; then - echo "Log file has not been updated for $((DEADLOCK_THRESHOLD / 60)) minutes." - return 1 - fi - - return 0 -} - -start_process - -# terminate server process -processes=$(ps -ef | grep 'internnav/agent/utils/server.py' | grep -v grep | awk '{print $2}') -if [ -n "$processes" ]; then - for pid in $processes; do - kill -9 $pid - echo "kill: $pid" - done -fi From a40b210b6c2553968ae3187cb99d4716274c34b5 Mon Sep 17 00:00:00 2001 From: wangyukai Date: Wed, 29 Oct 2025 12:38:02 +0000 Subject: [PATCH 4/7] fix typo in filename lerobot --- scripts/dataset_converters/{vlnce2erobot.py => vlnce2lerobot.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename scripts/dataset_converters/{vlnce2erobot.py => vlnce2lerobot.py} (100%) diff --git a/scripts/dataset_converters/vlnce2erobot.py b/scripts/dataset_converters/vlnce2lerobot.py similarity index 100% rename from scripts/dataset_converters/vlnce2erobot.py rename to scripts/dataset_converters/vlnce2lerobot.py From 72f4c32f6a665ba5c8421b1f509b0663bbfe604c Mon Sep 17 00:00:00 2001 From: wangyukai Date: Wed, 29 Oct 2025 12:51:19 +0000 Subject: [PATCH 5/7] fix pre commit in vlnce2lerobot --- scripts/dataset_converters/vlnce2lerobot.py | 210 ++++++++++---------- 1 file changed, 101 insertions(+), 109 deletions(-) diff --git a/scripts/dataset_converters/vlnce2lerobot.py b/scripts/dataset_converters/vlnce2lerobot.py index 3403714d..01354ea2 100644 --- a/scripts/dataset_converters/vlnce2lerobot.py +++ b/scripts/dataset_converters/vlnce2lerobot.py @@ -1,49 +1,47 @@ #!/usr/bin/env python +import glob import json -import shutil import os -import glob -import tqdm -import numpy as np +import shutil +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path +from typing import Any, Dict, Iterator, Tuple + import cv2 -import tyro import datasets -import logging -import threading -import sys -import datetime - -from pathlib import Path -from loguru import logger -from concurrent.futures import ThreadPoolExecutor, as_completed -from typing import Iterator, Dict, Any, List, Tuple, Generator +import numpy as np +import torch +import torchvision +import tqdm from datasets import concatenate_datasets - -from lerobot.common.datasets.utils import embed_images, hf_transform_to_torch -from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata -from lerobot.common.datasets.compute_stats import aggregate_stats +from lerobot.common.datasets.compute_stats import ( + aggregate_stats, + auto_downsample_height_width, + get_feature_stats, + sample_indices, +) +from lerobot.common.datasets.lerobot_dataset import ( + LeRobotDataset, + LeRobotDatasetMetadata, +) from lerobot.common.datasets.utils import ( check_timestamps_sync, + embed_images, get_episode_data_index, + hf_transform_to_torch, validate_episode_buffer, validate_frame, write_episode, write_episode_stats, write_info, ) -from lerobot.common.datasets.compute_stats import auto_downsample_height_width, get_feature_stats, sample_indices from lerobot.common.datasets.video_utils import get_safe_default_codec - -import open3d as o3d -import numpy as np -from plyfile import PlyData - -import torch -import torchvision +from loguru import logger LEROBOT_HOME = Path(os.environ.get("LEROBOT_HOME", "/shared/smartbot_new/liuyu/")) + def sample_images(input): if type(input) is str: video_path = input @@ -76,17 +74,20 @@ def sample_images(input): return images + def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], features: dict) -> dict: """calculate episode statistics""" ep_stats = {} for key, data in episode_data.items(): if key not in features: # skip non-feature data continue - + if features[key]["dtype"] == "string": continue elif features[key]["dtype"] in ["image", "video"]: - if isinstance(data, (str, list)) and all(isinstance(item, str) for item in (data if isinstance(data, list) else [data])): + if isinstance(data, (str, list)) and all( + isinstance(item, str) for item in (data if isinstance(data, list) else [data]) + ): # string path, skip stats calculation continue # ensure data is in the correct shape @@ -99,7 +100,7 @@ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], featu # for non-image/video data, ensure it's a 2D array [N, D] ep_ft_array = np.array(data) if ep_ft_array.ndim == 1: - if key == "episode_index": + if key == "episode_index": ep_ft_array = ep_ft_array.reshape(-1, 1) else: feature_shape = features[key]["shape"] @@ -107,7 +108,7 @@ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], featu ep_ft_array = ep_ft_array.reshape(-1, np.prod(feature_shape)) else: ep_ft_array = ep_ft_array.reshape(-1, 1) - + axes_to_reduce = (0,) # calculate stats on the first dimension keepdims = True @@ -116,16 +117,14 @@ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], featu if features[key]["dtype"] in ["image", "video"]: value_norm = 1.0 if "depth" in key else 255.0 - ep_stats[key] = { - k: v if k == "count" else np.squeeze(v / value_norm) - for k, v in ep_stats[key].items() - } + ep_stats[key] = {k: v if k == "count" else np.squeeze(v / value_norm) for k, v in ep_stats[key].items()} except Exception as e: - logger.warning(f"Failed to calculate stats for feature {key}: {e}") + logger.warning(f"Failed to calculate stats for feature {key}: {e}") continue return ep_stats + class NavDatasetMetadata(LeRobotDatasetMetadata): def get_data_file_path(self, ep_index: int) -> Path: chunk = self.get_episode_chunk(ep_index) @@ -133,9 +132,9 @@ def get_data_file_path(self, ep_index: int) -> Path: def get_video_file_path(self, ep_index: int, key: str) -> Path: chunk = self.get_episode_chunk(ep_index) - video_key = key.split(".")[-1] + video_key = key.split(".")[-1] return Path("videos") / f"chunk-{chunk:03d}" / video_key - + def save_episode( self, episode_index: int, @@ -172,6 +171,7 @@ def save_episode( self.stats = aggregate_stats([self.stats, episode_stats]) if self.stats else episode_stats write_episode_stats(episode_index, episode_stats, self.root) + class NavDataset(LeRobotDataset): @classmethod def create( @@ -294,7 +294,6 @@ def save_episode(self, files: dict) -> None: video_path = self.root / self.meta.get_video_file_path(episode_index, key) video_path.parent.mkdir(parents=True, exist_ok=True) - source_dir = Path(source_path) if source_dir.exists(): @@ -307,7 +306,7 @@ def save_episode(self, files: dict) -> None: ep_stats = compute_episode_stats(episode_buffer, self.features) self._save_episode_table(episode_buffer, episode_index) - + self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats) ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index]) @@ -337,26 +336,19 @@ def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None: def get_streamvln_features() -> Dict[str, Dict]: """ define the feature structure of StreamVLN dataset - + Args: img_size: image size (height, width) - + Returns: feature definition dictionary """ return { - "observation.images.rgb": { - "dtype": "image", - "shape": (480, 640, 3), - "names": ["height", "width", "channel"] - }, - "action": { - "dtype": "int64", - "shape": (1,), - "names": ["action_index"] - }, + "observation.images.rgb": {"dtype": "image", "shape": (480, 640, 3), "names": ["height", "width", "channel"]}, + "action": {"dtype": "int64", "shape": (1,), "names": ["action_index"]}, } + def load_streamvln_episode( ann: Dict[str, Any], dataset_name: str, @@ -365,20 +357,20 @@ def load_streamvln_episode( ) -> Iterator[Dict[str, Any]]: """ load StreamVLN episode data, return an iterator in LeRobot format - + Args: ann: single annotation dictionary dataset_name: dataset name (EnvDrop/R2R/RxR) data_dir: data root directory img_size: output image size (height, width) - + Yields: a dictionary of LeRobot format data for each frame """ try: ann_id = ann["id"] video_path = ann["video"] - + # parse scene ID and episode ID parts = video_path.split("/")[-1].split("_") scene_id = parts[0] @@ -386,57 +378,53 @@ def load_streamvln_episode( # fix path parsing logic # original format: "video": "images/17DRP5sb8fy_envdrop_111702" # actual path: images/17DRP5sb8fy/rgb - + # src_image_dir = data_dir / dataset_name / "images" / "rgb" /scene_id - + # build source image directory src_image_dir = data_dir / dataset_name / video_path / "rgb" - + # get all image files - image_files = sorted( - glob.glob(str(src_image_dir / "*.jpg")) - - ) + image_files = sorted(glob.glob(str(src_image_dir / "*.jpg"))) if not image_files: logger.warning(f"No image files found in {src_image_dir}") return - + # get actions and instructions actions = np.array(ann.get("actions", []), dtype=np.int64) instructions = ann.get("instructions", []) instruction = json.dumps({"instruction": instructions[0]}) if instructions else "Navigation task" - + # build file path mapping - files = { - "observation.images.rgb": str(src_image_dir) - } - + files = {"observation.images.rgb": str(src_image_dir)} + for frame_idx, img_path in enumerate(image_files): img = cv2.imread(img_path) if img is None: continue - + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - action_value = -1 + + action_value = -1 if frame_idx < len(actions): action_value = actions[frame_idx] - + action = np.array([action_value], dtype=np.int64) - + yield { 'observation': { 'images.rgb': img, }, 'action': action, 'language_instruction': instruction, - 'files': files + 'files': files, } - + except Exception as e: logger.error(f"Failed to load episode {ann_id}: {str(e)}", exc_info=True) return - + + def process_episode( ann: Dict[str, Any], dataset_name: str, @@ -450,10 +438,10 @@ def process_episode( video_path = ann["video"] parts = video_path.split("/")[-1].split("_") scene_id = parts[0] - ep_id = parts[-1] if len(parts) > 2 else "000000" + ep_id = parts[-1] if len(parts) > 2 else "000000" output_path = LEROBOT_HOME / repo_name / dataset_name.lower() / scene_id / ep_id - + if output_path.exists(): return (episode_id, True, "Skipped, already exists") @@ -473,22 +461,22 @@ def process_episode( episode_iterator = load_streamvln_episode(ann, dataset_name, data_dir) frame_count = 0 files = {} - + for step_data in episode_iterator: if frame_count == 0: files = step_data.pop('files', {}) else: step_data.pop('files', {}) - + dataset.add_frame( frame={ "observation.images.rgb": step_data["observation"]["images.rgb"], "action": step_data["action"], }, - task=step_data["language_instruction"] + task=step_data["language_instruction"], ) frame_count += 1 - + if frame_count > 0: dataset.save_episode(files=files) message = f"Successfully processed: {episode_id}, {frame_count} frames" @@ -496,12 +484,13 @@ def process_episode( else: message = f"No frames were processed, skipping: {episode_id}" return (episode_id, False, message) - + except Exception as e: message = f"Failed to process episode: {str(e)}" logger.error(message, exc_info=True) return (ann.get('id', 'unknown'), False, message) + def process_dataset( dataset_name: str, data_dir: Path, @@ -509,13 +498,12 @@ def process_dataset( # img_size: Tuple[int, int], push_to_hub: bool, num_threads: int = 10, - start_idx: int = 0, - end_idx: int | None = None - + start_idx: int = 0, + end_idx: int | None = None, ) -> Tuple[int, int]: """ process the entire dataset - + Args: dataset_name: dataset name data_dir: data root directory @@ -523,7 +511,7 @@ def process_dataset( img_size: image size push_to_hub: whether to push to Hub num_threads: number of threads - + Returns: (total_episodes, success_episodes) """ @@ -532,19 +520,19 @@ def process_dataset( if not ann_file.exists(): logger.error(f"Annotation file not found: {ann_file}") return 0, 0 - + with open(ann_file, "r") as f: annotations = json.load(f) - + total = len(annotations) end_idx = end_idx if end_idx is not None else total - selected_anns = annotations[start_idx:end_idx] + selected_anns = annotations[start_idx:end_idx] selected_count = len(selected_anns) - + if selected_count == 0: logger.warning(f"No episodes found in the index range [{start_idx}, {end_idx})") return 0, 0 - + logger.info( f"Start processing dataset: {dataset_name} " f"(Total episodes: {total}, processing range: [{start_idx}, {end_idx}), actual processing: {selected_count})" @@ -564,18 +552,21 @@ def process_dataset( ): ann['id'] for ann in selected_anns } - - progress_bar = tqdm.tqdm(as_completed(futures), total=len(selected_anns), desc=f"倄理 {dataset_name} [{start_idx}:{end_idx}]") + + progress_bar = tqdm.tqdm( + as_completed(futures), total=len(selected_anns), desc=f"倄理 {dataset_name} [{start_idx}:{end_idx}]" + ) for future in progress_bar: _, success, message = future.result() if success: success_count += 1 - progress_bar.set_postfix_str(f"Success: {success_count}/{selected_count} " - f"({success_count/selected_count:.1%})" + progress_bar.set_postfix_str( + f"Success: {success_count}/{selected_count} " f"({success_count/selected_count:.1%})" ) - + return selected_count, success_count + def main( data_dir: str, repo_name: str = "nav_S1", @@ -586,11 +577,11 @@ def main( num_threads: int = 10, start_index: int = None, end_index: int = None, - datasets: str = None + datasets: str = None, ): """ main function - + Args: data_dir: data root directory repo_name: output dataset name @@ -603,9 +594,9 @@ def main( data_path = Path(data_dir) if not data_path.exists(): raise ValueError(f"Data directory does not exist: {data_dir}") - + # datasets_to_process = ["R2R", "EnvDrop", "RxR"] - + total_episodes = 0 success_episodes = 0 dataset_name = datasets @@ -619,11 +610,11 @@ def main( push_to_hub=push_to_hub, num_threads=num_threads, start_idx=start_index, - end_idx=end_index + end_idx=end_index, ) total_episodes += total success_episodes += success - + logger.info("=" * 50) logger.info("Conversion completed!") logger.info(f"Total episodes: {total_episodes}") @@ -631,8 +622,10 @@ def main( logger.info(f"Failed: {total_episodes - success_episodes}") logger.info("=" * 50) + if __name__ == "__main__": import argparse + parser = argparse.ArgumentParser(description="Convert StreamVLN dataset to LeRobot format") parser.add_argument("--data_dir", type=str, default="/path/to/streamvln", help="StreamVLN data root directory") parser.add_argument("--repo_name", type=str, default="vln_ce_lerobot", help="Output dataset name") @@ -641,11 +634,10 @@ def main( parser.add_argument("--num_threads", type=int, default=10, help="Number of threads") parser.add_argument("--start_index", type=int, default=0, help="Start index (inclusive)") parser.add_argument("--end_index", type=int, default=2000, help="End index (exclusive)") - parser.add_argument("--datasets", type=str, default="RxR", - help="List of datasets to process") - + parser.add_argument("--datasets", type=str, default="RxR", help="List of datasets to process") + args = parser.parse_args() - + main( data_dir=args.data_dir, repo_name=args.repo_name, @@ -654,5 +646,5 @@ def main( num_threads=args.num_threads, start_index=args.start_index, end_index=args.end_index, - datasets=args.datasets - ) \ No newline at end of file + datasets=args.datasets, + ) From 55e5be4ab95244b23628917940b4af874c3fc8d4 Mon Sep 17 00:00:00 2001 From: wangyukai Date: Thu, 30 Oct 2025 03:32:30 +0000 Subject: [PATCH 6/7] fix internnav version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9916d889..af5e76b5 100644 --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ def parse_readme(readme: str) -> str: setuptools.setup( name='internnav', - version='0.1.2', + version='0.0.1', packages=setuptools.find_packages(), author='OpenRobotLab', author_email='OpenRobotLab@pjlab.org.cn', From 3df82f25974801b95be58b5cae9ad39e08c82b26 Mon Sep 17 00:00:00 2001 From: wangyukai Date: Thu, 30 Oct 2025 03:37:02 +0000 Subject: [PATCH 7/7] update author and email --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index af5e76b5..ac0a1870 100644 --- a/setup.py +++ b/setup.py @@ -54,8 +54,8 @@ def parse_readme(readme: str) -> str: name='internnav', version='0.0.1', packages=setuptools.find_packages(), - author='OpenRobotLab', - author_email='OpenRobotLab@pjlab.org.cn', + author='Intern Robotics', + author_email='embodiedai@pjlab.org.cn', license='Apache 2.0', description='InternNav: A benchmark evaluation framework for navigation tasks', long_description=long_description,