From fc48f36acb2dca19fc757ac2d036c4b59024191f Mon Sep 17 00:00:00 2001 From: hudsonaikins-crown Date: Tue, 28 Oct 2025 17:07:25 -0400 Subject: [PATCH 1/2] =?UTF-8?q?Bump=20version:=200.3.1=20=E2=86=92=200.4.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .bumpversion.cfg | 2 +- neural/__init__.py | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 4f374687..10ac1bf8 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.3.0 +current_version = 0.4.0 commit = True tag = True tag_name = v{new_version} diff --git a/neural/__init__.py b/neural/__init__.py index 3f29d553..d75502ff 100644 --- a/neural/__init__.py +++ b/neural/__init__.py @@ -12,7 +12,7 @@ modules (sentiment analysis, FIX streaming) are experimental. """ -__version__ = "0.3.1" +__version__ = "0.4.0" __author__ = "Neural Contributors" __license__ = "MIT" diff --git a/pyproject.toml b/pyproject.toml index 28e24168..787595c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "neural-sdk" -version = "0.3.1" +version = "0.4.0" description = "Professional-grade SDK for algorithmic trading on prediction markets (Beta - Core features stable, advanced modules experimental)" readme = "README.md" requires-python = ">=3.10" From cad25047f43d304c7b5557d3f9a3b6814cddc675 Mon Sep 17 00:00:00 2001 From: Hudson Aikins Date: Wed, 29 Oct 2025 18:41:59 -0400 Subject: [PATCH 2/2] feat: Add Docker deployment module (v0.4.0) (#10) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds comprehensive Docker deployment infrastructure for trading bots with proper async/await patterns, timezone-aware timestamps, and robust error handling. ## Key Features - Docker provider with ThreadPoolExecutor for non-blocking async operations - Database persistence with SQLAlchemy (trades, positions, performance, deployments) - Pydantic configuration models with validation - Jinja2 Dockerfile templates (single & multi-stage builds) - Context manager pattern for automatic resource cleanup - NBA series mapping fix (KXNBA → KXNBAGAME) ## Issues Fixed - ✅ Fixed 6 blocking I/O operations in async methods - ✅ Replaced deprecated datetime.utcnow() with timezone-aware datetime - ✅ Added secrets directory validation - ✅ Fixed timezone consistency across models Greptile Score: 4/5 - Production-ready for beta release Co-authored-by: Claude --- examples/08_docker_deployment.py | 174 +++++++++ neural/__init__.py | 3 +- neural/data_collection/kalshi.py | 4 +- neural/deployment/__init__.py | 135 +++++++ neural/deployment/base.py | 234 ++++++++++++ neural/deployment/config.py | 224 +++++++++++ neural/deployment/database/__init__.py | 12 + neural/deployment/database/schema.py | 101 +++++ neural/deployment/docker/__init__.py | 13 + neural/deployment/docker/compose.py | 151 ++++++++ neural/deployment/docker/provider.py | 462 +++++++++++++++++++++++ neural/deployment/docker/templates.py | 257 +++++++++++++ neural/deployment/exceptions.py | 66 ++++ neural/deployment/monitoring/__init__.py | 8 + pyproject.toml | 11 + 15 files changed, 1853 insertions(+), 2 deletions(-) create mode 100644 examples/08_docker_deployment.py create mode 100644 neural/deployment/__init__.py create mode 100644 neural/deployment/base.py create mode 100644 neural/deployment/config.py create mode 100644 neural/deployment/database/__init__.py create mode 100644 neural/deployment/database/schema.py create mode 100644 neural/deployment/docker/__init__.py create mode 100644 neural/deployment/docker/compose.py create mode 100644 neural/deployment/docker/provider.py create mode 100644 neural/deployment/docker/templates.py create mode 100644 neural/deployment/exceptions.py create mode 100644 neural/deployment/monitoring/__init__.py diff --git a/examples/08_docker_deployment.py b/examples/08_docker_deployment.py new file mode 100644 index 00000000..ba324f3c --- /dev/null +++ b/examples/08_docker_deployment.py @@ -0,0 +1,174 @@ +""" +Example: Deploy a Trading Bot with Docker + +This example demonstrates how to use the Neural SDK deployment module +to deploy trading bots in Docker containers. + +⚠️ EXPERIMENTAL: The deployment module is experimental in v0.4.0. + +Requirements: + - Docker installed and running + - neural-sdk[deployment] installed + - Kalshi API credentials (optional for paper trading) + +Usage: + python examples/08_docker_deployment.py +""" + +import asyncio +import os + +from neural.deployment import ( + DeploymentConfig, + DockerDeploymentProvider, + deploy, +) + + +async def basic_deployment_example(): + """Basic deployment example using Docker provider.""" + print("=" * 60) + print("Neural SDK Deployment Module - Basic Example") + print("=" * 60) + print() + + # Create deployment configuration + config = DeploymentConfig( + bot_name="NFL-MeanReversion-Bot", + strategy_type="NFL", + environment="paper", # Use paper trading mode + algorithm_config={ + "algorithm_type": "mean_reversion", + "poll_interval": 30.0, + "market_limit": 12, + }, + risk_config={ + "max_position_size": 100, + "max_total_exposure": 1000, + }, + database_enabled=False, # Disable database for this example + websocket_enabled=True, + monitoring_enabled=True, + ) + + print(f"📋 Configuration:") + print(f" Bot Name: {config.bot_name}") + print(f" Strategy: {config.strategy_type}") + print(f" Environment: {config.environment}") + print() + + # Create Docker deployment provider + provider = DockerDeploymentProvider() + + # Deploy with context manager (auto-cleanup) + print("🚀 Deploying trading bot...") + async with deploy(provider, config) as deployment: + print(f"✅ Deployed successfully!") + print(f" Deployment ID: {deployment.deployment_id}") + print(f" Container ID: {deployment.container_id[:12]}...") + print(f" Container Name: {deployment.container_name}") + print() + + # Wait a moment for container to start + await asyncio.sleep(3) + + # Check status + print("📊 Checking deployment status...") + status = await provider.status(deployment.deployment_id) + print(f" Status: {status.status}") + print(f" Uptime: {status.uptime_seconds:.1f} seconds") + if status.metrics: + print(f" CPU: {status.metrics.get('cpu_percent', 0):.1f}%") + print(f" Memory: {status.metrics.get('memory_usage_mb', 0):.1f} MB") + print() + + # Get logs + print("📜 Recent logs (last 10 lines):") + logs = await provider.logs(deployment.deployment_id, tail=10) + for log in logs[-10:]: + print(f" {log}") + print() + + # Keep running for 30 seconds + print("⏳ Bot running for 30 seconds...") + await asyncio.sleep(30) + + print("🛑 Deployment stopped (auto-cleanup on context exit)") + print() + + +async def manual_deployment_example(): + """Manual deployment example without context manager.""" + print("=" * 60) + print("Manual Deployment Example (No Auto-Cleanup)") + print("=" * 60) + print() + + config = DeploymentConfig( + bot_name="Manual-Test-Bot", + strategy_type="NFL", + environment="sandbox", + ) + + provider = DockerDeploymentProvider() + + # Deploy manually + print("🚀 Deploying...") + result = await provider.deploy(config) + print(f"✅ Deployed: {result.deployment_id}") + print() + + try: + # List all deployments + print("📋 Active deployments:") + deployments = await provider.list_deployments() + for dep in deployments: + print(f" - {dep.bot_name} ({dep.status})") + print() + + # Wait a bit + await asyncio.sleep(5) + + finally: + # Manual cleanup + print("🛑 Stopping deployment...") + await provider.stop(result.deployment_id) + print("✅ Stopped successfully") + print() + + +async def main(): + """Run all examples.""" + print("\n🤖 Neural SDK Deployment Module Examples\n") + + # Check if Docker is available + try: + provider = DockerDeploymentProvider() + print("✅ Docker is available\n") + except Exception as e: + print(f"❌ Docker is not available: {e}") + print(" Please make sure Docker is installed and running.") + return + + # Run examples + try: + # Example 1: Basic deployment with context manager + await basic_deployment_example() + + # Example 2: Manual deployment + await manual_deployment_example() + + print("=" * 60) + print("✨ All examples completed successfully!") + print("=" * 60) + + except Exception as e: + print(f"\n❌ Error: {e}") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + # Run examples + asyncio.run(main()) diff --git a/neural/__init__.py b/neural/__init__.py index d75502ff..68fe3baf 100644 --- a/neural/__init__.py +++ b/neural/__init__.py @@ -19,7 +19,7 @@ import warnings from typing import Set # noqa: UP035 -from neural import analysis, auth, data_collection, trading +from neural import analysis, auth, data_collection, deployment, trading # Track which experimental features have been used _experimental_features_used: set[str] = set() @@ -65,5 +65,6 @@ def _warn_beta() -> None: "data_collection", "analysis", "trading", + "deployment", # v0.4.0: Docker deployment module (experimental) "_warn_experimental", # For internal use by modules ] diff --git a/neural/data_collection/kalshi.py b/neural/data_collection/kalshi.py index cf10d679..040590ef 100644 --- a/neural/data_collection/kalshi.py +++ b/neural/data_collection/kalshi.py @@ -14,7 +14,9 @@ _BASE_URL = "https://api.elections.kalshi.com/trade-api/v2" _SPORT_SERIES_MAP = { "NFL": "KXNFLGAME", - "NBA": "KXNBA", + "NBA": "KXNBAGAME", + "NBA_CHAMPIONSHIP": "KXNBA", + "WNBA": "KXWNBAGAME", "MLB": "KXMLB", "NHL": "KXNHL", "NCAAF": "KXNCAAFGAME", diff --git a/neural/deployment/__init__.py b/neural/deployment/__init__.py new file mode 100644 index 00000000..9aa66787 --- /dev/null +++ b/neural/deployment/__init__.py @@ -0,0 +1,135 @@ +""" +Neural SDK Deployment Module (Experimental) + +Provides Docker-based deployment infrastructure for trading bots with +database persistence, monitoring, and multi-environment support. + +  EXPERIMENTAL: This module is experimental in Neural SDK Beta v0.4.0. +Use with caution in production environments. + +Example: + ```python + from neural.deployment import DockerDeploymentProvider, DeploymentConfig, deploy + + # Configure deployment + config = DeploymentConfig( + bot_name="MyNFLBot", + strategy_type="NFL", + environment="paper", + algorithm_config={"algorithm_type": "mean_reversion"} + ) + + # Deploy with context manager + provider = DockerDeploymentProvider() + async with deploy(provider, config) as deployment: + print(f"Deployed: {deployment.deployment_id}") + + # Get status + status = await provider.status(deployment.deployment_id) + print(f"Status: {status.status}") + # Deployment is automatically stopped when exiting context + ``` +""" + +# Core abstractions +from neural.deployment.base import DeploymentContext, DeploymentProvider + +# Configuration models +from neural.deployment.config import ( + DatabaseConfig, + DeploymentConfig, + DeploymentInfo, + DeploymentResult, + DeploymentStatus, + DockerConfig, + MonitoringConfig, +) + +# Docker provider +from neural.deployment.docker import ( + DockerDeploymentProvider, + render_compose_file, + render_dockerfile, + render_dockerignore, + write_compose_file, +) + +# Exceptions +from neural.deployment.exceptions import ( + ConfigurationError, + ContainerNotFoundError, + DatabaseError, + DeploymentError, + DeploymentTimeoutError, + ImageBuildError, + MonitoringError, + NetworkError, + ProviderNotFoundError, + ResourceLimitExceededError, +) + +__all__ = [ + # Core abstractions + "DeploymentProvider", + "DeploymentContext", + "deploy", + # Configuration + "DeploymentConfig", + "DockerConfig", + "DatabaseConfig", + "MonitoringConfig", + "DeploymentResult", + "DeploymentStatus", + "DeploymentInfo", + # Providers + "DockerDeploymentProvider", + # Docker utilities + "render_dockerfile", + "render_dockerignore", + "render_compose_file", + "write_compose_file", + # Exceptions + "DeploymentError", + "ProviderNotFoundError", + "ContainerNotFoundError", + "ResourceLimitExceededError", + "DeploymentTimeoutError", + "ConfigurationError", + "ImageBuildError", + "NetworkError", + "DatabaseError", + "MonitoringError", +] + + +# Convenience function for deploying with context manager +def deploy( + provider: DeploymentProvider, + config: DeploymentConfig, + auto_stop: bool = True, +) -> DeploymentContext: + """Deploy a trading bot using a deployment provider with automatic cleanup. + + This is a convenience function that creates a DeploymentContext for use + with Python's async context manager protocol. + + Args: + provider: Deployment provider to use (e.g., DockerDeploymentProvider) + config: Deployment configuration + auto_stop: Whether to automatically stop deployment on context exit (default: True) + + Returns: + DeploymentContext that can be used with 'async with' + + Example: + ```python + provider = DockerDeploymentProvider() + config = DeploymentConfig(bot_name="MyBot", strategy_type="NFL") + + async with deploy(provider, config) as deployment: + status = await provider.status(deployment.deployment_id) + print(f"Bot is {status.status}") + # Deployment automatically stopped here + ``` + """ + return DeploymentContext(provider, config, auto_stop=auto_stop) diff --git a/neural/deployment/base.py b/neural/deployment/base.py new file mode 100644 index 00000000..fad157ae --- /dev/null +++ b/neural/deployment/base.py @@ -0,0 +1,234 @@ +""" +Abstract base classes for the Neural SDK deployment module. + +This module defines the provider pattern for deployment backends, +allowing extensibility to support Docker, E2B, cloud providers, etc. +""" + +from abc import ABC, abstractmethod +from typing import Any + +from neural.deployment.config import DeploymentConfig, DeploymentInfo, DeploymentResult, DeploymentStatus + + +class DeploymentProvider(ABC): + """Abstract base class for deployment providers. + + All deployment providers (Docker, E2B, AWS, GCP, etc.) must inherit from + this class and implement its abstract methods. + + This enables a consistent API across different deployment backends while + allowing provider-specific implementations. + + Example: + ```python + class DockerDeploymentProvider(DeploymentProvider): + async def deploy(self, config: DeploymentConfig) -> DeploymentResult: + # Docker-specific deployment logic + ... + ``` + """ + + @abstractmethod + async def deploy(self, config: DeploymentConfig) -> DeploymentResult: + """Deploy a trading bot using this provider. + + Args: + config: Deployment configuration + + Returns: + DeploymentResult with deployment details + + Raises: + DeploymentError: If deployment fails + """ + pass + + @abstractmethod + async def stop(self, deployment_id: str) -> bool: + """Stop a running deployment. + + Args: + deployment_id: Unique identifier for the deployment + + Returns: + True if successfully stopped, False otherwise + + Raises: + DeploymentError: If stop operation fails + ContainerNotFoundError: If deployment doesn't exist + """ + pass + + @abstractmethod + async def status(self, deployment_id: str) -> DeploymentStatus: + """Get the current status of a deployment. + + Args: + deployment_id: Unique identifier for the deployment + + Returns: + DeploymentStatus with current status info + + Raises: + DeploymentError: If status check fails + ContainerNotFoundError: If deployment doesn't exist + """ + pass + + @abstractmethod + async def logs(self, deployment_id: str, tail: int = 100) -> list[str]: + """Get recent logs from a deployment. + + Args: + deployment_id: Unique identifier for the deployment + tail: Number of recent log lines to return + + Returns: + List of log lines (most recent last) + + Raises: + DeploymentError: If log retrieval fails + ContainerNotFoundError: If deployment doesn't exist + """ + pass + + @abstractmethod + async def list_deployments(self) -> list[DeploymentInfo]: + """List all active deployments. + + Returns: + List of DeploymentInfo for all active deployments + + Raises: + DeploymentError: If listing fails + """ + pass + + async def restart(self, deployment_id: str) -> bool: + """Restart a deployment (stop then deploy again). + + Default implementation using stop() and deploy(). + Providers can override for more efficient restart logic. + + Args: + deployment_id: Unique identifier for the deployment + + Returns: + True if successfully restarted, False otherwise + + Raises: + DeploymentError: If restart fails + """ + # Get current config from status + status_info = await self.status(deployment_id) + + # Stop the deployment + await self.stop(deployment_id) + + # This is a simplified implementation - in practice, you'd need to + # store the original config or retrieve it from the deployment metadata + raise NotImplementedError( + "Restart requires storing deployment configs. " + "Providers should override this method." + ) + + async def cleanup(self) -> None: + """Clean up provider resources. + + Optional method for providers to clean up resources, connections, etc. + Called when the provider is being shut down. + """ + pass + + +class DeploymentContext: + """Async context manager for deployments. + + Provides a convenient way to deploy and automatically clean up + resources using Python's async context manager protocol. + + Example: + ```python + async with DeploymentContext(provider, config) as deployment: + status = await provider.status(deployment.deployment_id) + logs = await provider.logs(deployment.deployment_id) + # Deployment is automatically stopped when exiting the context + ``` + """ + + def __init__( + self, + provider: DeploymentProvider, + config: DeploymentConfig, + auto_stop: bool = True, + ): + """Initialize deployment context. + + Args: + provider: Deployment provider to use + config: Deployment configuration + auto_stop: Whether to automatically stop deployment on exit (default: True) + """ + self.provider = provider + self.config = config + self.auto_stop = auto_stop + self.deployment_result: DeploymentResult | None = None + + async def __aenter__(self) -> DeploymentResult: + """Deploy when entering the context. + + Returns: + DeploymentResult from the deployment operation + """ + self.deployment_result = await self.provider.deploy(self.config) + return self.deployment_result + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: Any, + ) -> None: + """Stop deployment when exiting the context (if auto_stop is True). + + Args: + exc_type: Exception type (if an exception occurred) + exc_val: Exception value (if an exception occurred) + exc_tb: Exception traceback (if an exception occurred) + """ + if self.auto_stop and self.deployment_result: + await self.provider.stop(self.deployment_result.deployment_id) + + async def status(self) -> DeploymentStatus: + """Get current deployment status. + + Convenience method to check status without keeping track of deployment_id. + + Returns: + Current deployment status + + Raises: + RuntimeError: If called before deployment is created + """ + if not self.deployment_result: + raise RuntimeError("Deployment has not been created yet") + return await self.provider.status(self.deployment_result.deployment_id) + + async def logs(self, tail: int = 100) -> list[str]: + """Get deployment logs. + + Convenience method to get logs without keeping track of deployment_id. + + Args: + tail: Number of recent log lines to return + + Returns: + List of log lines + + Raises: + RuntimeError: If called before deployment is created + """ + if not self.deployment_result: + raise RuntimeError("Deployment has not been created yet") + return await self.provider.logs(self.deployment_result.deployment_id, tail=tail) diff --git a/neural/deployment/config.py b/neural/deployment/config.py new file mode 100644 index 00000000..c57595ba --- /dev/null +++ b/neural/deployment/config.py @@ -0,0 +1,224 @@ +""" +Configuration models for the Neural SDK deployment module. + +This module provides Pydantic models for configuring trading bot deployments, +including Docker containers, resource limits, and deployment parameters. +""" + +from datetime import datetime, timezone +from typing import Any + +from pydantic import BaseModel, Field, field_validator + + +class DeploymentConfig(BaseModel): + """Configuration for trading bot deployment. + + This model defines all parameters needed to deploy a trading bot, + including resource limits, environment settings, and feature flags. + + Attributes: + bot_name: Unique name for the trading bot + strategy_type: Type of trading strategy (e.g., "NFL", "NBA") + risk_config: Risk management configuration dict + algorithm_config: Algorithm-specific parameters + environment: Deployment environment ("sandbox", "paper", or "live") + compute_resources: CPU and memory resource limits + database_enabled: Whether to enable database persistence + websocket_enabled: Whether to enable WebSocket trading + monitoring_enabled: Whether to enable performance monitoring + """ + + bot_name: str = Field(..., description="Name of the trading bot") + strategy_type: str = Field(..., description="Type of trading strategy (NFL, NBA, etc.)") + risk_config: dict[str, Any] = Field( + default_factory=dict, description="Risk management configuration" + ) + algorithm_config: dict[str, Any] = Field( + default_factory=dict, description="Algorithm parameters" + ) + environment: str = Field(default="sandbox", description="Deployment environment") + compute_resources: dict[str, Any] = Field( + default_factory=lambda: { + "cpu_limit": "1.0", + "memory_limit": "2g", + "cpu_request": "0.5", + "memory_request": "1g", + }, + description="Docker resource limits", + ) + database_enabled: bool = Field(default=True, description="Enable database persistence") + websocket_enabled: bool = Field(default=True, description="Enable WebSocket trading") + monitoring_enabled: bool = Field(default=True, description="Enable performance monitoring") + + @field_validator("environment") + @classmethod + def validate_environment(cls, v: str) -> str: + """Validate environment is one of the allowed values.""" + allowed = ["sandbox", "paper", "live"] + if v.lower() not in allowed: + raise ValueError(f"Environment must be one of {allowed}, got: {v}") + return v.lower() + + +class DockerConfig(BaseModel): + """Docker-specific configuration for container deployments. + + Attributes: + image_name: Name of the Docker image to use + image_tag: Tag/version of the Docker image + cpu_limit: Maximum CPU cores (e.g., 1.0 = 1 core) + memory_limit: Maximum memory (e.g., "2g" = 2 gigabytes) + cpu_request: Requested CPU cores (for scheduling) + memory_request: Requested memory (for scheduling) + restart_policy: Container restart policy + network_mode: Docker network mode + labels: Container labels for organization + """ + + image_name: str = Field(default="neural-trading-bot", description="Docker image name") + image_tag: str = Field(default="latest", description="Docker image tag") + cpu_limit: float = Field(default=1.0, description="CPU core limit", ge=0.1, le=16.0) + memory_limit: str = Field(default="2g", description="Memory limit (e.g., '2g', '512m')") + cpu_request: float = Field(default=0.5, description="Requested CPU cores", ge=0.1, le=16.0) + memory_request: str = Field(default="1g", description="Requested memory") + restart_policy: str = Field(default="unless-stopped", description="Container restart policy") + network_mode: str = Field(default="bridge", description="Docker network mode") + labels: dict[str, str] = Field(default_factory=dict, description="Container labels") + + @field_validator("restart_policy") + @classmethod + def validate_restart_policy(cls, v: str) -> str: + """Validate restart policy is one of Docker's allowed values.""" + allowed = ["no", "always", "on-failure", "unless-stopped"] + if v not in allowed: + raise ValueError(f"Restart policy must be one of {allowed}, got: {v}") + return v + + @field_validator("network_mode") + @classmethod + def validate_network_mode(cls, v: str) -> str: + """Validate network mode is one of Docker's allowed values.""" + allowed = ["bridge", "host", "none", "container"] + if v not in allowed and not v.startswith("container:"): + raise ValueError( + f"Network mode must be one of {allowed} or 'container:', got: {v}" + ) + return v + + +class DeploymentResult(BaseModel): + """Result of a deployment operation. + + Attributes: + deployment_id: Unique identifier for the deployment + status: Current deployment status + container_id: Docker container ID (if applicable) + container_name: Human-readable container name + created_at: Timestamp when deployment was created + endpoints: Dict of service endpoints (e.g., {"api": "http://localhost:8000"}) + metadata: Additional deployment metadata + """ + + deployment_id: str = Field(..., description="Unique deployment identifier") + status: str = Field(..., description="Deployment status") + container_id: str | None = Field(None, description="Docker container ID") + container_name: str | None = Field(None, description="Container name") + created_at: datetime = Field( + default_factory=lambda: datetime.now(timezone.utc), description="Creation timestamp" + ) + endpoints: dict[str, str] = Field(default_factory=dict, description="Service endpoints") + metadata: dict[str, Any] = Field(default_factory=dict, description="Additional metadata") + + +class DeploymentStatus(BaseModel): + """Current status of a deployment. + + Attributes: + deployment_id: Unique identifier for the deployment + status: Current status ("running", "stopped", "error", "starting") + uptime_seconds: Number of seconds the deployment has been running + logs: Recent log lines from the deployment + health_status: Health check status (if available) + metrics: Current resource metrics (CPU, memory, etc.) + """ + + deployment_id: str = Field(..., description="Unique deployment identifier") + status: str = Field(..., description="Current status") + uptime_seconds: float | None = Field(None, description="Uptime in seconds") + logs: list[str] = Field(default_factory=list, description="Recent log lines") + health_status: str | None = Field(None, description="Health check status") + metrics: dict[str, Any] = Field(default_factory=dict, description="Current metrics") + + +class DeploymentInfo(BaseModel): + """Summary information about a deployment. + + Used for listing multiple deployments. + + Attributes: + deployment_id: Unique identifier for the deployment + bot_name: Name of the trading bot + status: Current deployment status + environment: Deployment environment (sandbox/paper/live) + created_at: When the deployment was created + deployment_type: Type of deployment (docker, compose, etc.) + """ + + deployment_id: str = Field(..., description="Unique deployment identifier") + bot_name: str = Field(..., description="Trading bot name") + status: str = Field(..., description="Current status") + environment: str = Field(..., description="Deployment environment") + created_at: datetime = Field(..., description="Creation timestamp") + deployment_type: str = Field(..., description="Deployment type") + + +class DatabaseConfig(BaseModel): + """Database configuration for deployment persistence. + + Attributes: + host: Database host address + port: Database port number + user: Database username + password: Database password + database: Database name + connection_pool_size: Size of the connection pool + max_overflow: Maximum overflow connections + """ + + host: str = Field(default="localhost", description="Database host") + port: int = Field(default=5432, description="Database port", ge=1, le=65535) + user: str = Field(default="trading_user", description="Database username") + password: str = Field(default="", description="Database password") + database: str = Field(default="trading_db", description="Database name") + connection_pool_size: int = Field(default=5, description="Connection pool size", ge=1) + max_overflow: int = Field(default=10, description="Max overflow connections", ge=0) + + +class MonitoringConfig(BaseModel): + """Monitoring configuration for deployment metrics. + + Attributes: + enabled: Whether monitoring is enabled + collection_interval: Seconds between metric collections + metrics_port: Port for exposing metrics + log_level: Logging level ("DEBUG", "INFO", "WARNING", "ERROR") + export_prometheus: Whether to export Prometheus metrics + """ + + enabled: bool = Field(default=True, description="Enable monitoring") + collection_interval: int = Field( + default=60, description="Metric collection interval (seconds)", ge=1 + ) + metrics_port: int = Field(default=9090, description="Metrics export port", ge=1024, le=65535) + log_level: str = Field(default="INFO", description="Logging level") + export_prometheus: bool = Field(default=False, description="Export Prometheus metrics") + + @field_validator("log_level") + @classmethod + def validate_log_level(cls, v: str) -> str: + """Validate log level is one of the standard logging levels.""" + allowed = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + if v.upper() not in allowed: + raise ValueError(f"Log level must be one of {allowed}, got: {v}") + return v.upper() diff --git a/neural/deployment/database/__init__.py b/neural/deployment/database/__init__.py new file mode 100644 index 00000000..18196941 --- /dev/null +++ b/neural/deployment/database/__init__.py @@ -0,0 +1,12 @@ +"""Database module for Neural SDK deployment.""" + +from neural.deployment.database.schema import ( + Deployment, + Performance, + Position, + Trade, + create_tables, + get_session, +) + +__all__ = ["Trade", "Position", "Performance", "Deployment", "create_tables", "get_session"] diff --git a/neural/deployment/database/schema.py b/neural/deployment/database/schema.py new file mode 100644 index 00000000..ef79ac37 --- /dev/null +++ b/neural/deployment/database/schema.py @@ -0,0 +1,101 @@ +""" +Database schema for Neural SDK deployment module. + +SQLAlchemy models for storing deployment, trade, and performance data. +""" + +from datetime import datetime, timezone + +from sqlalchemy import JSON, Column, DateTime, Integer, Numeric, String, create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + +Base = declarative_base() + + +class Trade(Base): + """Trade record model.""" + + __tablename__ = "trades" + + id = Column(Integer, primary_key=True, autoincrement=True) + deployment_id = Column(String(255), nullable=False, index=True) + timestamp = Column(DateTime, default=lambda: datetime.now(timezone.utc), nullable=False) + ticker = Column(String(255), nullable=False) + side = Column(String(10), nullable=False) # 'buy' or 'sell' + quantity = Column(Integer, nullable=False) + price = Column(Numeric(10, 2), nullable=False) + pnl = Column(Numeric(10, 2)) + strategy = Column(String(255)) + trade_metadata = Column(JSON) + + +class Position(Base): + """Current position model.""" + + __tablename__ = "positions" + + id = Column(Integer, primary_key=True, autoincrement=True) + deployment_id = Column(String(255), nullable=False, index=True) + ticker = Column(String(255), nullable=False) + quantity = Column(Integer, nullable=False) + entry_price = Column(Numeric(10, 2), nullable=False) + current_price = Column(Numeric(10, 2)) + unrealized_pnl = Column(Numeric(10, 2)) + timestamp = Column(DateTime, default=lambda: datetime.now(timezone.utc), nullable=False) + + +class Performance(Base): + """Performance metrics model.""" + + __tablename__ = "performance" + + id = Column(Integer, primary_key=True, autoincrement=True) + deployment_id = Column(String(255), nullable=False, index=True) + timestamp = Column(DateTime, default=lambda: datetime.now(timezone.utc), nullable=False) + total_pnl = Column(Numeric(10, 2)) + daily_pnl = Column(Numeric(10, 2)) + sharpe_ratio = Column(Numeric(10, 4)) + max_drawdown = Column(Numeric(10, 4)) + win_rate = Column(Numeric(5, 4)) + num_trades = Column(Integer) + + +class Deployment(Base): + """Deployment record model.""" + + __tablename__ = "deployments" + + id = Column(String(255), primary_key=True) + bot_name = Column(String(255), nullable=False) + strategy_type = Column(String(255)) + environment = Column(String(50)) + status = Column(String(50)) + created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc), nullable=False) + config = Column(JSON) + container_id = Column(String(255)) + sandbox_id = Column(String(255)) + + +def create_tables(database_url: str) -> None: + """Create all database tables. + + Args: + database_url: SQLAlchemy database URL + """ + engine = create_engine(database_url) + Base.metadata.create_all(engine) + + +def get_session(database_url: str): + """Get a database session. + + Args: + database_url: SQLAlchemy database URL + + Returns: + SQLAlchemy Session + """ + engine = create_engine(database_url) + Session = sessionmaker(bind=engine) + return Session() diff --git a/neural/deployment/docker/__init__.py b/neural/deployment/docker/__init__.py new file mode 100644 index 00000000..bbafda23 --- /dev/null +++ b/neural/deployment/docker/__init__.py @@ -0,0 +1,13 @@ +"""Docker deployment submodule for Neural SDK.""" + +from neural.deployment.docker.compose import render_compose_file, write_compose_file +from neural.deployment.docker.provider import DockerDeploymentProvider +from neural.deployment.docker.templates import render_dockerfile, render_dockerignore + +__all__ = [ + "DockerDeploymentProvider", + "render_dockerfile", + "render_dockerignore", + "render_compose_file", + "write_compose_file", +] diff --git a/neural/deployment/docker/compose.py b/neural/deployment/docker/compose.py new file mode 100644 index 00000000..bcfc2f66 --- /dev/null +++ b/neural/deployment/docker/compose.py @@ -0,0 +1,151 @@ +""" +Docker Compose orchestration for the Neural SDK deployment module. + +This module provides utilities for generating and managing Docker Compose +configurations for multi-service trading bot deployments. +""" + +from pathlib import Path +from typing import Any + +from jinja2 import Template + +# Docker Compose template for trading bot stack +COMPOSE_TEMPLATE = """version: '3.8' + +services: + trading-bot: + build: {{ build_context }} + container_name: {{ container_name }} + environment: + - ALGORITHM_TYPE={{ algorithm_type }} + - ENVIRONMENT={{ environment }} + - BOT_NAME={{ bot_name }} + - KALSHI_API_KEY_ID=${KALSHI_API_KEY_ID} + - KALSHI_PRIVATE_KEY_PATH=/secrets/private_key.pem + {% if database_enabled %}- DB_HOST=postgres + - DB_PORT=5432 + - DB_USER=trading_user + - DB_PASSWORD=trading_pass + - DB_NAME=trading_db{% endif %} + volumes: + - ./secrets:/secrets:ro + - {{ bot_name|lower|replace(' ', '-') }}_logs:/tmp + {% if database_enabled %}depends_on: + - postgres{% endif %} + networks: + - trading-network + restart: unless-stopped + {% if cpu_limit or memory_limit %}deploy: + resources: + limits: + {% if cpu_limit %}cpus: '{{ cpu_limit }}'{% endif %} + {% if memory_limit %}memory: {{ memory_limit }}{% endif %} + {% endif %} + +{% if database_enabled %} postgres: + image: postgres:15-alpine + container_name: {{ bot_name|lower|replace(' ', '-') }}-postgres + environment: + - POSTGRES_DB=trading_db + - POSTGRES_USER=trading_user + - POSTGRES_PASSWORD=trading_pass + volumes: + - postgres_data:/var/lib/postgresql/data + networks: + - trading-network + restart: unless-stopped +{% endif %} + +{% if monitoring_enabled %} prometheus: + image: prom/prometheus:latest + container_name: {{ bot_name|lower|replace(' ', '-') }}-prometheus + volumes: + - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - prometheus_data:/prometheus + ports: + - "9090:9090" + networks: + - trading-network + restart: unless-stopped +{% endif %} + +volumes: + {{ bot_name|lower|replace(' ', '-') }}_logs: + {% if database_enabled %}postgres_data:{% endif %} + {% if monitoring_enabled %}prometheus_data:{% endif %} + +networks: + trading-network: + driver: bridge +""" + + +def render_compose_file( + bot_name: str, + algorithm_type: str = "mean_reversion", + environment: str = "sandbox", + build_context: str = ".", + container_name: str | None = None, + database_enabled: bool = True, + monitoring_enabled: bool = False, + cpu_limit: str | None = None, + memory_limit: str | None = None, +) -> str: + """Render a Docker Compose file from template. + + Args: + bot_name: Name of the trading bot + algorithm_type: Trading algorithm type + environment: Deployment environment + build_context: Docker build context path + container_name: Custom container name (auto-generated if None) + database_enabled: Include PostgreSQL service + monitoring_enabled: Include Prometheus monitoring + cpu_limit: CPU limit (e.g., "1.0") + memory_limit: Memory limit (e.g., "2g") + + Returns: + Rendered docker-compose.yml as string + """ + if not container_name: + container_name = f"{bot_name.lower().replace(' ', '-')}-trading-bot" + + template = Template(COMPOSE_TEMPLATE) + + return template.render( + bot_name=bot_name, + algorithm_type=algorithm_type, + environment=environment, + build_context=build_context, + container_name=container_name, + database_enabled=database_enabled, + monitoring_enabled=monitoring_enabled, + cpu_limit=cpu_limit, + memory_limit=memory_limit, + ) + + +def write_compose_file( + output_path: Path, + bot_name: str, + **kwargs: Any, +) -> Path: + """Generate and write a Docker Compose file. + + Args: + output_path: Path where to write the compose file + bot_name: Name of the trading bot + **kwargs: Additional arguments passed to render_compose_file() + + Returns: + Path to the written compose file + """ + compose_content = render_compose_file(bot_name=bot_name, **kwargs) + + output_path = Path(output_path) + output_path.parent.mkdir(parents=True, exist_ok=True) + + output_path.write_text(compose_content) + + return output_path diff --git a/neural/deployment/docker/provider.py b/neural/deployment/docker/provider.py new file mode 100644 index 00000000..2f9318a5 --- /dev/null +++ b/neural/deployment/docker/provider.py @@ -0,0 +1,462 @@ +""" +Docker deployment provider for the Neural SDK. + +This module implements the Docker-based deployment provider for running +trading bots in containers. +""" + +import asyncio +import concurrent.futures +import json +import logging +import os +import subprocess +import uuid +from datetime import datetime +from pathlib import Path +from typing import Any + +import docker +from docker.errors import DockerException, NotFound + +from neural.deployment.base import DeploymentProvider +from neural.deployment.config import ( + DeploymentConfig, + DeploymentInfo, + DeploymentResult, + DeploymentStatus, + DockerConfig, +) +from neural.deployment.docker.compose import write_compose_file +from neural.deployment.docker.templates import render_dockerfile, render_dockerignore +from neural.deployment.exceptions import ( + ConfigurationError, + ContainerNotFoundError, + DeploymentError, + ImageBuildError, + ResourceLimitExceededError, +) + +logger = logging.getLogger(__name__) + + +class DockerDeploymentProvider(DeploymentProvider): + """Docker-based deployment provider. + + This provider deploys trading bots as Docker containers, + supporting both individual containers and Docker Compose stacks. + + Example: + ```python + from neural.deployment import DockerDeploymentProvider, DeploymentConfig + + provider = DockerDeploymentProvider() + config = DeploymentConfig( + bot_name="MyBot", + strategy_type="NFL", + environment="paper" + ) + + result = await provider.deploy(config) + print(f"Deployed: {result.deployment_id}") + ``` + """ + + def __init__( + self, + docker_client: docker.DockerClient | None = None, + project_root: Path | None = None, + ): + """Initialize Docker deployment provider. + + Args: + docker_client: Docker client (created automatically if None) + project_root: Root directory for project files (defaults to cwd) + """ + try: + self.docker_client = docker_client or docker.from_env() + except DockerException as e: + raise DeploymentError(f"Failed to connect to Docker: {e}") from e + + self.project_root = Path(project_root or os.getcwd()) + self.active_deployments: dict[str, dict[str, Any]] = {} + self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=4) + + async def deploy(self, config: DeploymentConfig) -> DeploymentResult: + """Deploy a trading bot to Docker container. + + Args: + config: Deployment configuration + + Returns: + DeploymentResult with deployment details + + Raises: + DeploymentError: If deployment fails + ImageBuildError: If Docker image build fails + """ + deployment_id = str(uuid.uuid4()) + container_name = self._generate_container_name(config.bot_name, deployment_id) + + logger.info(f"Starting deployment {deployment_id} for bot: {config.bot_name}") + + try: + # Build Docker image + image_tag = await self._build_image(config, deployment_id) + + # Prepare environment variables + env_vars = self._prepare_env_vars(config) + + # Create container configuration + container_config = self._create_container_config( + config, image_tag, container_name, env_vars + ) + + # Create and start container (run in executor to avoid blocking event loop) + loop = asyncio.get_event_loop() + container = await loop.run_in_executor( + self._executor, lambda: self.docker_client.containers.create(**container_config) + ) + await loop.run_in_executor(self._executor, container.start) + + # Store deployment info + deployment_info = { + "container_id": container.id, + "container_name": container_name, + "config": config.model_dump(), + "status": "running", + "created_at": datetime.now(), + "image_tag": image_tag, + } + self.active_deployments[deployment_id] = deployment_info + + logger.info(f"Deployment {deployment_id} started successfully") + + return DeploymentResult( + deployment_id=deployment_id, + status="running", + container_id=container.id, + container_name=container_name, + created_at=deployment_info["created_at"], + endpoints={}, + metadata={"image_tag": image_tag}, + ) + + except DockerException as e: + logger.error(f"Docker deployment failed: {e}") + raise DeploymentError(f"Failed to deploy container: {e}") from e + except Exception as e: + logger.error(f"Unexpected deployment error: {e}") + raise DeploymentError(f"Deployment failed: {e}") from e + + async def stop(self, deployment_id: str) -> bool: + """Stop a running deployment. + + Args: + deployment_id: Unique identifier for the deployment + + Returns: + True if successfully stopped + + Raises: + ContainerNotFoundError: If deployment doesn't exist + """ + if deployment_id not in self.active_deployments: + raise ContainerNotFoundError(f"Deployment not found: {deployment_id}") + + deployment = self.active_deployments[deployment_id] + + try: + loop = asyncio.get_event_loop() + container = await loop.run_in_executor( + self._executor, self.docker_client.containers.get, deployment["container_id"] + ) + await loop.run_in_executor(self._executor, lambda: container.stop(timeout=30)) + await loop.run_in_executor(self._executor, container.remove) + + del self.active_deployments[deployment_id] + logger.info(f"Stopped deployment: {deployment_id}") + return True + + except NotFound: + del self.active_deployments[deployment_id] + raise ContainerNotFoundError(f"Container not found: {deployment_id}") + except DockerException as e: + logger.error(f"Failed to stop deployment {deployment_id}: {e}") + raise DeploymentError(f"Failed to stop deployment: {e}") from e + + async def status(self, deployment_id: str) -> DeploymentStatus: + """Get current deployment status. + + Args: + deployment_id: Unique identifier for the deployment + + Returns: + DeploymentStatus with current status info + + Raises: + ContainerNotFoundError: If deployment doesn't exist + """ + if deployment_id not in self.active_deployments: + raise ContainerNotFoundError(f"Deployment not found: {deployment_id}") + + deployment = self.active_deployments[deployment_id] + + try: + loop = asyncio.get_event_loop() + container = await loop.run_in_executor( + self._executor, self.docker_client.containers.get, deployment["container_id"] + ) + await loop.run_in_executor(self._executor, container.reload) + + # Calculate uptime + created_at = deployment["created_at"] + uptime = (datetime.now() - created_at).total_seconds() + + # Get container stats (run in executor to avoid blocking) + stats = await loop.run_in_executor( + self._executor, lambda: container.stats(stream=False) + ) + + return DeploymentStatus( + deployment_id=deployment_id, + status=container.status, + uptime_seconds=uptime, + logs=[], # Use logs() method for full logs + health_status=container.attrs.get("State", {}).get("Health", {}).get("Status"), + metrics=self._extract_metrics(stats), + ) + + except NotFound: + raise ContainerNotFoundError(f"Container not found: {deployment_id}") + except DockerException as e: + raise DeploymentError(f"Failed to get status: {e}") from e + + async def logs(self, deployment_id: str, tail: int = 100) -> list[str]: + """Get recent logs from deployment. + + Args: + deployment_id: Unique identifier for the deployment + tail: Number of recent log lines to return + + Returns: + List of log lines + + Raises: + ContainerNotFoundError: If deployment doesn't exist + """ + if deployment_id not in self.active_deployments: + raise ContainerNotFoundError(f"Deployment not found: {deployment_id}") + + deployment = self.active_deployments[deployment_id] + + try: + loop = asyncio.get_event_loop() + container = await loop.run_in_executor( + self._executor, self.docker_client.containers.get, deployment["container_id"] + ) + logs = await loop.run_in_executor( + self._executor, lambda: container.logs(tail=tail, timestamps=True).decode("utf-8") + ) + return logs.strip().split("\n") if logs else [] + + except NotFound: + raise ContainerNotFoundError(f"Container not found: {deployment_id}") + except DockerException as e: + raise DeploymentError(f"Failed to get logs: {e}") from e + + async def list_deployments(self) -> list[DeploymentInfo]: + """List all active deployments. + + Returns: + List of DeploymentInfo for all active deployments + """ + deployments = [] + loop = asyncio.get_event_loop() + + for dep_id, deployment in self.active_deployments.items(): + try: + container = await loop.run_in_executor( + self._executor, self.docker_client.containers.get, deployment["container_id"] + ) + await loop.run_in_executor(self._executor, container.reload) + + deployments.append( + DeploymentInfo( + deployment_id=dep_id, + bot_name=deployment["config"]["bot_name"], + status=container.status, + environment=deployment["config"]["environment"], + created_at=deployment["created_at"], + deployment_type="docker", + ) + ) + except NotFound: + # Container was removed externally + continue + + return deployments + + # Private helper methods + + def _generate_container_name(self, bot_name: str, deployment_id: str) -> str: + """Generate a unique container name.""" + safe_name = bot_name.lower().replace(" ", "-").replace("_", "-") + short_id = deployment_id[:8] + return f"neural-bot-{safe_name}-{short_id}" + + async def _build_image(self, config: DeploymentConfig, deployment_id: str) -> str: + """Build Docker image for the deployment.""" + image_tag = f"neural-trading-bot:{config.environment}-{deployment_id[:8]}" + + try: + # Generate Dockerfile + dockerfile_content = render_dockerfile( + algorithm_type=config.algorithm_config.get("algorithm_type", "mean_reversion"), + environment=config.environment, + bot_name=config.bot_name, + database_enabled=config.database_enabled, + websocket_enabled=config.websocket_enabled, + monitoring_enabled=config.monitoring_enabled, + ) + + # Write Dockerfile to temp location + build_dir = self.project_root / "build" / deployment_id + build_dir.mkdir(parents=True, exist_ok=True) + + dockerfile_path = build_dir / "Dockerfile" + dockerfile_path.write_text(dockerfile_content) + + # Build image (run in executor - can take several minutes) + logger.info(f"Building Docker image: {image_tag}") + loop = asyncio.get_event_loop() + image, build_logs = await loop.run_in_executor( + self._executor, + lambda: self.docker_client.images.build( + path=str(self.project_root), dockerfile=str(dockerfile_path), tag=image_tag, rm=True + ), + ) + + logger.info(f"Image built successfully: {image_tag}") + return image_tag + + except DockerException as e: + raise ImageBuildError(f"Failed to build Docker image: {e}") from e + + def _prepare_env_vars(self, config: DeploymentConfig) -> dict[str, str]: + """Prepare environment variables for the container.""" + return { + "BOT_NAME": config.bot_name, + "STRATEGY_TYPE": config.strategy_type, + "ENVIRONMENT": config.environment, + "DATABASE_ENABLED": str(config.database_enabled).lower(), + "WEBSOCKET_ENABLED": str(config.websocket_enabled).lower(), + "MONITORING_ENABLED": str(config.monitoring_enabled).lower(), + "KALSHI_API_KEY_ID": os.getenv("KALSHI_API_KEY_ID", ""), + "RISK_CONFIG": json.dumps(config.risk_config), + "ALGORITHM_CONFIG": json.dumps(config.algorithm_config), + } + + def _create_container_config( + self, + config: DeploymentConfig, + image_tag: str, + container_name: str, + env_vars: dict[str, str], + ) -> dict[str, Any]: + """Create Docker container configuration.""" + # Validate secrets directory exists + secrets_path = self.project_root / "secrets" + if not secrets_path.exists(): + raise ConfigurationError( + f"Secrets directory not found: {secrets_path}\n" + f"Please create the secrets directory with your Kalshi API credentials:\n" + f" mkdir -p {secrets_path}\n" + f" echo 'KALSHI_API_KEY=your_key' > {secrets_path}/.env" + ) + + if not secrets_path.is_dir(): + raise ConfigurationError(f"Secrets path exists but is not a directory: {secrets_path}") + + container_config: dict[str, Any] = { + "image": image_tag, + "name": container_name, + "environment": env_vars, + "volumes": { + str(secrets_path): {"bind": "/secrets", "mode": "ro"}, + f"{container_name}_data": {"bind": "/app/data", "mode": "rw"}, + }, + "restart_policy": {"Name": "unless-stopped"}, + "labels": { + "app": "neural-trading-bot", + "bot_name": config.bot_name, + "strategy_type": config.strategy_type, + "environment": config.environment, + }, + "detach": True, + } + + # Add resource limits + resources = config.compute_resources + if resources: + if "cpu_limit" in resources: + container_config["nano_cpus"] = int(float(resources["cpu_limit"]) * 1e9) + if "memory_limit" in resources: + container_config["mem_limit"] = resources["memory_limit"] + + return container_config + + def _extract_metrics(self, stats: dict[str, Any]) -> dict[str, Any]: + """Extract key metrics from container stats.""" + try: + cpu_stats = stats.get("cpu_stats", {}) + memory_stats = stats.get("memory_stats", {}) + + return { + "cpu_percent": self._calculate_cpu_percent(stats), + "memory_usage_mb": memory_stats.get("usage", 0) / (1024 * 1024), + "memory_limit_mb": memory_stats.get("limit", 0) / (1024 * 1024), + } + except Exception as e: + logger.warning(f"Failed to extract metrics: {e}") + return {} + + def _calculate_cpu_percent(self, stats: dict[str, Any]) -> float: + """Calculate CPU percentage from stats.""" + try: + cpu_stats = stats.get("cpu_stats", {}) + precpu_stats = stats.get("precpu_stats", {}) + + cpu_delta = cpu_stats.get("cpu_usage", {}).get("total_usage", 0) - precpu_stats.get( + "cpu_usage", {} + ).get("total_usage", 0) + system_delta = cpu_stats.get("system_cpu_usage", 0) - precpu_stats.get( + "system_cpu_usage", 0 + ) + + if system_delta > 0 and cpu_delta > 0: + num_cpus = len(cpu_stats.get("cpu_usage", {}).get("percpu_usage", [1])) + return (cpu_delta / system_delta) * num_cpus * 100.0 + + return 0.0 + except Exception: + return 0.0 + + async def cleanup(self) -> None: + """Cleanup resources including the thread pool executor. + + This should be called when the provider is no longer needed + to ensure proper shutdown of background threads. + + Example: + ```python + provider = DockerDeploymentProvider() + try: + # Use provider... + pass + finally: + await provider.cleanup() + ``` + """ + logger.info("Shutting down deployment provider executor") + self._executor.shutdown(wait=True) diff --git a/neural/deployment/docker/templates.py b/neural/deployment/docker/templates.py new file mode 100644 index 00000000..66dee0cf --- /dev/null +++ b/neural/deployment/docker/templates.py @@ -0,0 +1,257 @@ +""" +Dockerfile templates for the Neural SDK deployment module. + +This module provides Jinja2-based Dockerfile templates for building +trading bot Docker images with various configurations. +""" + +from jinja2 import Template + +# Base Dockerfile template for trading bots +DOCKERFILE_TEMPLATE = """FROM python:{{ python_version }}-slim + +# Install system dependencies +RUN apt-get update && apt-get install -y \\ + gcc \\ + postgresql-client \\ + curl \\ + jq \\ + vim \\ + htop \\ + && rm -rf /var/lib/apt/lists/* + +# Create trading user +RUN useradd --create-home --shell /bin/bash trading + +# Set working directory +WORKDIR /app + +# Copy requirements first for better caching +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Install Neural SDK{% if install_neural_sdk %} +RUN pip install --no-cache-dir neural-sdk{% if neural_sdk_version %}=={{ neural_sdk_version }}{% endif %}{% if neural_sdk_extras %}[{{ neural_sdk_extras|join(',') }}]{% endif %} +{% endif %} + +# Copy application code +COPY src/ ./src/ +{% if include_examples %}COPY examples/ ./examples/ +{% endif %} + +# Create directories for algorithm injection +# Note: /app/logs removed - use /tmp/monitoring.log for writable logs in sandboxes +RUN mkdir -p /app/algorithms /app/data + +{% if entrypoint_script %}# Copy entrypoint script +COPY {{ entrypoint_script }} /app/entrypoint.sh +RUN chmod +x /app/entrypoint.sh +{% endif %} + +# Set ownership +RUN chown -R trading:trading /app + +# Switch to non-root user +USER trading + +# Environment variables for algorithm configuration +ENV ALGORITHM_TYPE={{ algorithm_type }} +ENV ENVIRONMENT={{ environment }} +ENV BOT_NAME={{ bot_name }} +ENV DATABASE_ENABLED={{ database_enabled|lower }} +ENV WEBSOCKET_ENABLED={{ websocket_enabled|lower }} +ENV MONITORING_ENABLED={{ monitoring_enabled|lower }} + +{% if healthcheck_enabled %}# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\ + CMD python -c "import requests; requests.get('http://localhost:8000/health')" || exit 1 +{% endif %} + +# Default entrypoint +{% if entrypoint_script %}ENTRYPOINT ["/app/entrypoint.sh"] +{% else %}CMD ["python", "-m", "{{ main_module }}"] +{% endif %} +""" + +# Multi-stage Dockerfile template (optimized for production) +MULTISTAGE_DOCKERFILE_TEMPLATE = """# Stage 1: Builder +FROM python:{{ python_version }}-slim as builder + +# Install build dependencies +RUN apt-get update && apt-get install -y \\ + gcc \\ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /build + +# Install Python dependencies +COPY requirements.txt . +RUN pip install --user --no-cache-dir -r requirements.txt + +# Install Neural SDK +RUN pip install --user --no-cache-dir neural-sdk{% if neural_sdk_version %}=={{ neural_sdk_version }}{% endif %}{% if neural_sdk_extras %}[{{ neural_sdk_extras|join(',') }}]{% endif %} + +# Stage 2: Runtime +FROM python:{{ python_version }}-slim + +# Install runtime dependencies only +RUN apt-get update && apt-get install -y \\ + postgresql-client \\ + curl \\ + jq \\ + && rm -rf /var/lib/apt/lists/* + +# Create trading user +RUN useradd --create-home --shell /bin/bash trading + +WORKDIR /app + +# Copy Python packages from builder +COPY --from=builder /root/.local /home/trading/.local + +# Copy application code +COPY --chown=trading:trading src/ ./src/ +{% if include_examples %}COPY --chown=trading:trading examples/ ./examples/ +{% endif %} + +# Create directories +RUN mkdir -p /app/algorithms /app/data && chown -R trading:trading /app + +{% if entrypoint_script %}# Copy entrypoint +COPY --chown=trading:trading {{ entrypoint_script }} /app/entrypoint.sh +RUN chmod +x /app/entrypoint.sh +{% endif %} + +# Switch to non-root user +USER trading + +# Update PATH +ENV PATH=/home/trading/.local/bin:$PATH + +# Environment variables +ENV ALGORITHM_TYPE={{ algorithm_type }} +ENV ENVIRONMENT={{ environment }} +ENV BOT_NAME={{ bot_name }} +ENV DATABASE_ENABLED={{ database_enabled|lower }} +ENV WEBSOCKET_ENABLED={{ websocket_enabled|lower }} +ENV MONITORING_ENABLED={{ monitoring_enabled|lower }} + +{% if healthcheck_enabled %}# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\ + CMD curl -f http://localhost:8000/health || exit 1 +{% endif %} + +# Entrypoint +{% if entrypoint_script %}ENTRYPOINT ["/app/entrypoint.sh"] +{% else %}CMD ["python", "-m", "{{ main_module }}"] +{% endif %} +""" + +# .dockerignore template +DOCKERIGNORE_TEMPLATE = """__pycache__ +*.pyc +*.pyo +*.pyd +.Python +env +venv +.venv +pip-log.txt +pip-delete-this-directory.txt +.tox +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.log +.git +.gitignore +.mypy_cache +.pytest_cache +.hypothesis +secrets/ +logs/ +*.sqlite3 +*.db +.env +.env.* +*.md +docs/ +tests/ +htmlcov/ +dist/ +build/ +*.egg-info/ +""" + + +def render_dockerfile( + python_version: str = "3.11", + algorithm_type: str = "mean_reversion", + environment: str = "sandbox", + bot_name: str = "Neural-Bot", + database_enabled: bool = False, + websocket_enabled: bool = True, + monitoring_enabled: bool = True, + install_neural_sdk: bool = True, + neural_sdk_version: str | None = None, + neural_sdk_extras: list[str] | None = None, + include_examples: bool = False, + entrypoint_script: str | None = None, + main_module: str = "src.main", + healthcheck_enabled: bool = False, + multistage: bool = False, +) -> str: + """Render a Dockerfile from template. + + Args: + python_version: Python version (e.g., "3.11", "3.10") + algorithm_type: Trading algorithm type + environment: Deployment environment (sandbox/paper/live) + bot_name: Name of the trading bot + database_enabled: Enable database persistence + websocket_enabled: Enable WebSocket trading + monitoring_enabled: Enable monitoring + install_neural_sdk: Whether to install neural-sdk package + neural_sdk_version: Specific version to install (None = latest) + neural_sdk_extras: Extra dependencies to install (e.g., ["deployment"]) + include_examples: Include examples directory in image + entrypoint_script: Path to entrypoint script (relative to build context) + main_module: Main Python module to run (if no entrypoint) + healthcheck_enabled: Enable Docker healthcheck + multistage: Use multi-stage build for smaller images + + Returns: + Rendered Dockerfile as string + """ + template_str = MULTISTAGE_DOCKERFILE_TEMPLATE if multistage else DOCKERFILE_TEMPLATE + + template = Template(template_str) + + return template.render( + python_version=python_version, + algorithm_type=algorithm_type, + environment=environment, + bot_name=bot_name, + database_enabled=database_enabled, + websocket_enabled=websocket_enabled, + monitoring_enabled=monitoring_enabled, + install_neural_sdk=install_neural_sdk, + neural_sdk_version=neural_sdk_version, + neural_sdk_extras=neural_sdk_extras or [], + include_examples=include_examples, + entrypoint_script=entrypoint_script, + main_module=main_module, + healthcheck_enabled=healthcheck_enabled, + ) + + +def render_dockerignore() -> str: + """Render a .dockerignore file. + + Returns: + Rendered .dockerignore content as string + """ + return DOCKERIGNORE_TEMPLATE.strip() diff --git a/neural/deployment/exceptions.py b/neural/deployment/exceptions.py new file mode 100644 index 00000000..aee72589 --- /dev/null +++ b/neural/deployment/exceptions.py @@ -0,0 +1,66 @@ +""" +Custom exceptions for the Neural SDK deployment module. + +This module defines deployment-specific exceptions for better error handling +and debugging of deployment operations. +""" + + +class DeploymentError(Exception): + """Base exception for all deployment-related errors.""" + + pass + + +class ProviderNotFoundError(DeploymentError): + """Raised when a deployment provider is not available or not installed.""" + + pass + + +class ContainerNotFoundError(DeploymentError): + """Raised when a Docker container cannot be found.""" + + pass + + +class ResourceLimitExceededError(DeploymentError): + """Raised when resource limits (CPU, memory, etc.) are exceeded.""" + + pass + + +class DeploymentTimeoutError(DeploymentError): + """Raised when a deployment operation times out.""" + + pass + + +class ConfigurationError(DeploymentError): + """Raised when deployment configuration is invalid.""" + + pass + + +class ImageBuildError(DeploymentError): + """Raised when Docker image building fails.""" + + pass + + +class NetworkError(DeploymentError): + """Raised when network-related deployment operations fail.""" + + pass + + +class DatabaseError(DeploymentError): + """Raised when database operations fail during deployment.""" + + pass + + +class MonitoringError(DeploymentError): + """Raised when monitoring setup or data collection fails.""" + + pass diff --git a/neural/deployment/monitoring/__init__.py b/neural/deployment/monitoring/__init__.py new file mode 100644 index 00000000..3081da29 --- /dev/null +++ b/neural/deployment/monitoring/__init__.py @@ -0,0 +1,8 @@ +""" +Monitoring module for Neural SDK deployment. + +Provides metrics collection and monitoring capabilities for deployed trading bots. +This module is a placeholder for future Prometheus/Grafana integration. +""" + +__all__ = [] diff --git a/pyproject.toml b/pyproject.toml index 787595c6..d91e4714 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,6 +62,9 @@ dependencies = [ "aiohttp>=3.8.0", # Data Processing Dependencies "plotly>=5.15.0", + # Deployment Dependencies (v0.4.0) + "pydantic>=2.0.0", + "jinja2>=3.1.0", ] [project.optional-dependencies] @@ -96,6 +99,14 @@ sentiment = [ "torch>=1.12.0", # For transformer models "scikit-learn>=1.1.0", # For ML-based sentiment analysis ] +# Deployment extras for Docker-based bot deployment (v0.4.0) +deployment = [ + "docker>=6.1.0", # Docker SDK for Python + "sqlalchemy>=2.0.0", # Database ORM for trade persistence + "psycopg2-binary>=2.9.0", # PostgreSQL adapter + "fastapi>=0.100.0", # Optional: for deployment API server + "uvicorn>=0.23.0", # Optional: ASGI server for FastAPI +] [project.urls] Homepage = "https://github.com/IntelIP/Neural"