diff --git a/.gitignore b/.gitignore index 483e65f..07de83c 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,46 @@ __MACOSX/ *.webm *.jpg *.svo -*.png \ No newline at end of file +*.png + +# Testing +.pytest_cache/ +.coverage +htmlcov/ +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.tox/ +.nox/ + +# Claude +.claude/* + +# Poetry +dist/ +poetry.lock + +# Virtual environments +venv/ +.venv/ +ENV/ +env/ +.env + +# IDE +.vscode/ +*.swp +*.swo +*~ + +# Mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Jupyter Notebook +.ipynb_checkpoints/ + +# pyenv +.python-version \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..077d441 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,174 @@ +[tool.poetry] +name = "humanoid-teleoperation" +version = "0.1.0" +description = "A Python project for humanoid robot teleoperation" +authors = ["Your Name "] +readme = "README.md" +license = "MIT" +packages = [ + { include = "act" }, + { include = "teleop" }, + { include = "scripts" } +] + +[tool.poetry.dependencies] +python = ">=3.8.1,<3.12" +aiohttp = "^3.9.5" +aiohttp-cors = "^0.7.0" +aiortc = "^1.8.0" +av = "^11.0.0" +# dex-retargeting = "0.1.1" # Requires Python <3.11 +dynamixel-sdk = "^3.7.31" +einops = "^0.8.0" +h5py = "^3.9.0" +ipython = "^8.12.3" +matplotlib = "^3.7.5" +numpy = ">=1.23.0,<2.0.0" +opencv-contrib-python = "^4.9.0.80" +opencv-python = "^4.9.0.80" +packaging = "^24.1" +pandas = "^2.0.3" +params-proto = "^2.12.1" +pytransform3d = "^3.4.0" +PyYAML = "^6.0.1" +scikit-learn = "^1.3.2" +scipy = "^1.10.1" +seaborn = "^0.13.2" +setuptools = "^69.5.1" +torch = "^2.3.0" +torchvision = "^0.18.0" +tqdm = "^4.66.4" +vuer = "^0.0.32rc7" +wandb = "^0.17.3" + +[tool.poetry.group.dev.dependencies] +pytest = "^8.0.0" +pytest-cov = "^5.0.0" +pytest-mock = "^3.14.0" +black = "^24.0.0" +flake8 = "^7.0.0" +mypy = "^1.8.0" +isort = "^5.13.0" +toml = "^0.10.2" + +[tool.poetry.scripts] +test = "pytest" +tests = "pytest" + +[tool.pytest.ini_options] +minversion = "8.0" +addopts = [ + "-ra", + "--strict-markers", + "--strict-config", + "--cov=act", + "--cov=teleop", + "--cov=scripts", + "--cov-branch", + "--cov-report=html:htmlcov", + "--cov-report=xml:coverage.xml", + "--cov-report=term-missing", + "--cov-fail-under=80", + "-vv", + "--tb=short", + "--maxfail=1", +] +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +markers = [ + "unit: marks tests as unit tests (fast, isolated)", + "integration: marks tests as integration tests (may require external resources)", + "slow: marks tests as slow running", +] +filterwarnings = [ + "error", + "ignore::UserWarning", + "ignore::DeprecationWarning", +] + +[tool.coverage.run] +source = ["act", "teleop", "scripts"] +branch = true +omit = [ + "*/tests/*", + "*/test_*", + "*/__pycache__/*", + "*/site-packages/*", + "*/dist-packages/*", + "*/venv/*", + "*/.venv/*", + "*/migrations/*", + "*/__init__.py", +] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "def __str__", + "raise AssertionError", + "raise NotImplementedError", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", + "class .*\\bProtocol\\):", + "@(abc\\.)?abstractmethod", +] +precision = 2 +show_missing = true +skip_covered = false +fail_under = 80 + +[tool.coverage.html] +directory = "htmlcov" + +[tool.coverage.xml] +output = "coverage.xml" + +[tool.black] +line-length = 88 +target-version = ['py38', 'py39', 'py310', 'py311'] +include = '\.pyi?$' +extend-exclude = ''' +/( + # directories + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist + | assets + | img +)/ +''' + +[tool.isort] +profile = "black" +line_length = 88 +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = false +ignore_missing_imports = true +exclude = [ + "tests/", + "build/", + "dist/", + "assets/", + "img/", +] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1043d44 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,248 @@ +""" +Shared pytest fixtures and configuration for all tests. +""" +import json +import os +import shutil +import tempfile +from pathlib import Path +from typing import Dict, Generator, Any +from unittest.mock import MagicMock, Mock + +import numpy as np +import pytest +import torch +import yaml + + +@pytest.fixture +def temp_dir() -> Generator[Path, None, None]: + """Create a temporary directory for test files.""" + temp_path = tempfile.mkdtemp() + yield Path(temp_path) + shutil.rmtree(temp_path) + + +@pytest.fixture +def sample_config() -> Dict[str, Any]: + """Provide a sample configuration dictionary.""" + return { + "model": { + "name": "test_model", + "hidden_dim": 256, + "num_layers": 3, + "dropout": 0.1, + }, + "training": { + "batch_size": 32, + "learning_rate": 1e-4, + "epochs": 100, + "device": "cpu", + }, + "data": { + "train_path": "/path/to/train", + "val_path": "/path/to/val", + "test_path": "/path/to/test", + }, + } + + +@pytest.fixture +def sample_yaml_config(temp_dir: Path, sample_config: Dict[str, Any]) -> Path: + """Create a sample YAML configuration file.""" + config_path = temp_dir / "config.yaml" + with open(config_path, "w") as f: + yaml.dump(sample_config, f) + return config_path + + +@pytest.fixture +def sample_json_config(temp_dir: Path, sample_config: Dict[str, Any]) -> Path: + """Create a sample JSON configuration file.""" + config_path = temp_dir / "config.json" + with open(config_path, "w") as f: + json.dump(sample_config, f) + return config_path + + +@pytest.fixture +def sample_numpy_array() -> np.ndarray: + """Provide a sample numpy array for testing.""" + return np.random.randn(10, 20, 3).astype(np.float32) + + +@pytest.fixture +def sample_torch_tensor() -> torch.Tensor: + """Provide a sample PyTorch tensor for testing.""" + return torch.randn(8, 3, 224, 224) + + +@pytest.fixture +def mock_model() -> MagicMock: + """Provide a mock PyTorch model.""" + model = MagicMock() + model.forward = MagicMock(return_value=torch.randn(8, 10)) + model.parameters = MagicMock(return_value=[torch.randn(10, 10)]) + model.train = MagicMock() + model.eval = MagicMock() + model.to = MagicMock(return_value=model) + return model + + +@pytest.fixture +def mock_dataset() -> MagicMock: + """Provide a mock PyTorch dataset.""" + dataset = MagicMock() + dataset.__len__ = MagicMock(return_value=100) + dataset.__getitem__ = MagicMock( + return_value=(torch.randn(3, 224, 224), torch.tensor(1)) + ) + return dataset + + +@pytest.fixture +def sample_h5_data(temp_dir: Path) -> Path: + """Create a sample HDF5 file with test data.""" + h5py = pytest.importorskip("h5py") + h5_path = temp_dir / "test_data.h5" + + with h5py.File(h5_path, "w") as f: + # Create sample datasets + f.create_dataset("observations", data=np.random.randn(100, 10)) + f.create_dataset("actions", data=np.random.randn(100, 5)) + f.create_dataset("rewards", data=np.random.randn(100)) + + # Create groups with nested data + grp = f.create_group("metadata") + grp.attrs["version"] = "1.0" + grp.attrs["description"] = "Test HDF5 file" + + return h5_path + + +@pytest.fixture +def mock_robot_state() -> Dict[str, Any]: + """Provide a mock robot state dictionary.""" + return { + "joint_positions": np.random.randn(19).tolist(), + "joint_velocities": np.random.randn(19).tolist(), + "joint_torques": np.random.randn(19).tolist(), + "gripper_position": [0.5, 0.5], + "timestamp": 1234567890.123, + "is_active": True, + } + + +@pytest.fixture +def mock_camera_image() -> np.ndarray: + """Provide a mock camera image.""" + return np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8) + + +@pytest.fixture +def mock_dynamixel_driver() -> MagicMock: + """Provide a mock Dynamixel driver.""" + driver = MagicMock() + driver.connect = MagicMock(return_value=True) + driver.disconnect = MagicMock() + driver.read_position = MagicMock(return_value=2048) + driver.write_position = MagicMock(return_value=True) + driver.set_torque_enable = MagicMock(return_value=True) + return driver + + +@pytest.fixture +def env_vars() -> Generator[Dict[str, str], None, None]: + """Temporarily set environment variables for testing.""" + original_env = os.environ.copy() + test_env = { + "TEST_VAR": "test_value", + "MODEL_PATH": "/test/model/path", + "DATA_DIR": "/test/data", + } + + os.environ.update(test_env) + yield test_env + + # Restore original environment + os.environ.clear() + os.environ.update(original_env) + + +@pytest.fixture +def mock_websocket() -> MagicMock: + """Provide a mock WebSocket connection.""" + ws = MagicMock() + ws.send = MagicMock() + ws.recv = MagicMock(return_value='{"type": "test", "data": "test_data"}') + ws.close = MagicMock() + ws.closed = False + return ws + + +@pytest.fixture +def sample_episode_data() -> Dict[str, Any]: + """Provide sample episode data for testing.""" + return { + "observations": [np.random.randn(10).tolist() for _ in range(50)], + "actions": [np.random.randn(5).tolist() for _ in range(50)], + "rewards": np.random.randn(50).tolist(), + "done": [False] * 49 + [True], + "info": { + "episode_length": 50, + "total_reward": 42.0, + "success": True, + }, + } + + +@pytest.fixture(autouse=True) +def reset_torch_seed(): + """Reset PyTorch random seed for reproducible tests.""" + torch.manual_seed(42) + np.random.seed(42) + yield + # No cleanup needed + + +@pytest.fixture +def mock_wandb(monkeypatch): + """Mock wandb for tests that use it.""" + mock_wandb_module = MagicMock() + mock_wandb_module.init = MagicMock() + mock_wandb_module.log = MagicMock() + mock_wandb_module.finish = MagicMock() + mock_wandb_module.config = {} + monkeypatch.setattr("wandb", mock_wandb_module) + return mock_wandb_module + + +# Markers for different test types +def pytest_configure(config): + """Configure pytest with custom markers.""" + config.addinivalue_line( + "markers", "unit: mark test as a unit test" + ) + config.addinivalue_line( + "markers", "integration: mark test as an integration test" + ) + config.addinivalue_line( + "markers", "slow: mark test as slow running" + ) + + +# Skip slow tests by default unless --runslow is passed +def pytest_addoption(parser): + """Add custom command line options.""" + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_collection_modifyitems(config, items): + """Modify test collection to skip slow tests by default.""" + if not config.getoption("--runslow"): + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_infrastructure_validation.py b/tests/test_infrastructure_validation.py new file mode 100644 index 0000000..ce993bb --- /dev/null +++ b/tests/test_infrastructure_validation.py @@ -0,0 +1,181 @@ +""" +Validation tests to ensure the testing infrastructure is properly set up. +""" +import sys +from pathlib import Path + +import pytest + + +class TestInfrastructureValidation: + """Tests to validate the testing infrastructure setup.""" + + @pytest.mark.unit + def test_pytest_is_installed(self): + """Test that pytest is properly installed.""" + assert "pytest" in sys.modules + + @pytest.mark.unit + def test_project_structure_exists(self): + """Test that the basic project structure exists.""" + workspace_root = Path(__file__).parent.parent + + # Check main directories + assert workspace_root.exists() + assert (workspace_root / "act").exists() + assert (workspace_root / "teleop").exists() + assert (workspace_root / "scripts").exists() + assert (workspace_root / "tests").exists() + + # Check test structure + assert (workspace_root / "tests" / "unit").exists() + assert (workspace_root / "tests" / "integration").exists() + assert (workspace_root / "tests" / "conftest.py").exists() + + @pytest.mark.unit + def test_fixtures_are_available(self, temp_dir, sample_config, mock_model): + """Test that conftest fixtures are available.""" + # Test temp_dir fixture + assert temp_dir.exists() + assert temp_dir.is_dir() + + # Test sample_config fixture + assert isinstance(sample_config, dict) + assert "model" in sample_config + assert "training" in sample_config + + # Test mock_model fixture + assert hasattr(mock_model, "forward") + assert hasattr(mock_model, "parameters") + + @pytest.mark.unit + def test_coverage_is_configured(self): + """Test that coverage is properly configured.""" + try: + import coverage + assert coverage.__version__ + except ImportError: + pytest.fail("Coverage module not installed") + + @pytest.mark.unit + def test_markers_are_defined(self, request): + """Test that custom markers are properly defined.""" + markers = request.config.getini("markers") + markers_str = str(markers) + assert "unit:" in markers_str + assert "integration:" in markers_str + assert "slow:" in markers_str + + @pytest.mark.integration + def test_integration_marker_works(self): + """Test that integration marker works correctly.""" + assert True + + @pytest.mark.slow + def test_slow_marker_works(self): + """Test that slow marker works correctly.""" + # This test should be skipped by default unless --runslow is passed + import time + time.sleep(0.1) + assert True + + @pytest.mark.unit + def test_numpy_fixtures(self, sample_numpy_array): + """Test numpy-related fixtures.""" + import numpy as np + + assert isinstance(sample_numpy_array, np.ndarray) + assert sample_numpy_array.shape == (10, 20, 3) + assert sample_numpy_array.dtype == np.float32 + + @pytest.mark.unit + def test_torch_fixtures(self, sample_torch_tensor, mock_dataset): + """Test PyTorch-related fixtures.""" + import torch + + assert isinstance(sample_torch_tensor, torch.Tensor) + assert sample_torch_tensor.shape == (8, 3, 224, 224) + + # Test mock dataset + assert len(mock_dataset) == 100 + data, label = mock_dataset[0] + assert isinstance(data, torch.Tensor) + assert isinstance(label, torch.Tensor) + + @pytest.mark.unit + def test_file_fixtures(self, sample_yaml_config, sample_json_config): + """Test file creation fixtures.""" + assert sample_yaml_config.exists() + assert sample_yaml_config.suffix == ".yaml" + + assert sample_json_config.exists() + assert sample_json_config.suffix == ".json" + + @pytest.mark.unit + def test_environment_fixture(self, env_vars): + """Test environment variable fixture.""" + import os + + assert os.environ.get("TEST_VAR") == "test_value" + assert os.environ.get("MODEL_PATH") == "/test/model/path" + assert os.environ.get("DATA_DIR") == "/test/data" + + @pytest.mark.unit + @pytest.mark.parametrize("value,expected", [ + (1, 1), + (2, 4), + (3, 9), + (4, 16), + ]) + def test_parametrize_works(self, value, expected): + """Test that pytest parametrize decorator works.""" + assert value ** 2 == expected + + @pytest.mark.unit + def test_mock_fixtures(self, mock_dynamixel_driver, mock_websocket): + """Test mock fixtures are properly configured.""" + # Test Dynamixel driver mock + assert mock_dynamixel_driver.connect() is True + assert mock_dynamixel_driver.read_position() == 2048 + + # Test WebSocket mock + assert mock_websocket.closed is False + data = mock_websocket.recv() + assert isinstance(data, str) + assert "test" in data + + +@pytest.mark.unit +class TestPoetryIntegration: + """Tests to validate Poetry integration.""" + + def test_pyproject_toml_exists(self): + """Test that pyproject.toml exists.""" + pyproject_path = Path(__file__).parent.parent / "pyproject.toml" + assert pyproject_path.exists() + + def test_pyproject_toml_has_poetry_config(self): + """Test that pyproject.toml has Poetry configuration.""" + import toml + + pyproject_path = Path(__file__).parent.parent / "pyproject.toml" + config = toml.load(pyproject_path) + + assert "tool" in config + assert "poetry" in config["tool"] + assert "dependencies" in config["tool"]["poetry"] + assert "group" in config["tool"]["poetry"] + assert "dev" in config["tool"]["poetry"]["group"] + + def test_test_scripts_are_defined(self): + """Test that test scripts are defined in pyproject.toml.""" + import toml + + pyproject_path = Path(__file__).parent.parent / "pyproject.toml" + config = toml.load(pyproject_path) + + assert "scripts" in config["tool"]["poetry"] + assert "test" in config["tool"]["poetry"]["scripts"] + assert "tests" in config["tool"]["poetry"]["scripts"] + assert config["tool"]["poetry"]["scripts"]["test"] == "pytest" + assert config["tool"]["poetry"]["scripts"]["tests"] == "pytest" \ No newline at end of file diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29