From b5da21f2d07a3c3a80b95454462fde23ec329644 Mon Sep 17 00:00:00 2001 From: hudsonaikins-crown Date: Fri, 5 Sep 2025 22:43:26 -0400 Subject: [PATCH 1/3] Improve Kalshi REST adapter: add pagination, nested markets, case-insensitive filtering - Add _paginate_events for cursor-based pagination on /events - Update get_game_markets to use events with nested markets, flatten, fallback - Update get_nfl_markets and get_cfb_markets with robust filtering and week support - Update get_events to support with_nested_markets and cursor - Remove unused KalshiAuth import - Normalize string matching for case insensitivity --- .../data_sources/kalshi/rest_adapter.py | 579 ++++++++++++++++++ 1 file changed, 579 insertions(+) create mode 100644 neural_sdk/data_sources/kalshi/rest_adapter.py diff --git a/neural_sdk/data_sources/kalshi/rest_adapter.py b/neural_sdk/data_sources/kalshi/rest_adapter.py new file mode 100644 index 00000000..326891a8 --- /dev/null +++ b/neural_sdk/data_sources/kalshi/rest_adapter.py @@ -0,0 +1,579 @@ +""" +Kalshi REST API Adapter + +Adapts existing Kalshi client to the unified REST data source framework. +""" + +from typing import Dict, Any, Optional, List +from datetime import datetime +import logging + +from ..base.rest_source import RESTDataSource +from ..base.auth_strategies import RSASignatureAuth +from neural_sdk.data_pipeline.data_sources.kalshi.client import KalshiClient + +logger = logging.getLogger(__name__) + + +class KalshiRESTAdapter(RESTDataSource): + """ + REST adapter for Kalshi API. + + Provides unified interface for Kalshi market data while + leveraging existing authentication and client implementations. + """ + + def __init__(self, config: Optional[Any] = None): + """ + Initialize Kalshi REST adapter. + + Args: + config: KalshiConfig object or None to use environment + """ + # Use existing Kalshi client for compatibility + self.kalshi_client = KalshiClient(config) + self.config = self.kalshi_client.config + + # Create RSA auth strategy + auth_strategy = RSASignatureAuth( + api_key_id=self.config.api_key_id, + private_key_str=self.config.private_key + ) + + # Initialize base class + super().__init__( + base_url=self.config.api_base_url, + name="KalshiREST", + auth_strategy=auth_strategy, + timeout=30, + cache_ttl=10, # Short cache for market data + rate_limit=30, # Kalshi allows ~30 requests/second + max_retries=3 + ) + + logger.info("Kalshi REST adapter initialized") + + async def validate_response(self, response) -> bool: + """ + Validate Kalshi API response. + + Args: + response: HTTP response object + + Returns: + True if valid, False otherwise + """ + if response.status_code == 200: + return True + + if response.status_code == 401: + logger.error("Kalshi authentication failed") + elif response.status_code == 429: + logger.warning("Kalshi rate limit exceeded") + elif response.status_code >= 500: + logger.error(f"Kalshi server error: {response.status_code}") + + return False + + async def transform_response(self, data: Any, endpoint: str) -> Dict: + """ + Transform Kalshi response to standardized format. + + Args: + data: Raw Kalshi response + endpoint: The endpoint that was called + + Returns: + Standardized response + """ + return { + "source": "kalshi", + "endpoint": endpoint, + "data": data, + "timestamp": datetime.utcnow().isoformat(), + "metadata": { + "environment": self.config.environment, + "api_version": "v2" + } + } + + # Market Data Methods + + async def get_markets( + self, + limit: int = 100, + status: Optional[str] = None, + ticker: Optional[str] = None, + series_ticker: Optional[str] = None, + **kwargs + ) -> Dict: + """ + Get markets from Kalshi. + + Args: + limit: Maximum number of markets to return + status: Market status filter + ticker: Specific market ticker + series_ticker: Series ticker filter + **kwargs: Additional filters + + Returns: + Markets data + """ + params = { + "limit": limit, + **kwargs + } + + if status: + params["status"] = status + if ticker: + params["ticker"] = ticker + if series_ticker: + params["series_ticker"] = series_ticker + + return await self.fetch("/markets", params=params) + + async def get_market(self, ticker: str) -> Dict: + """ + Get single market by ticker. + + Args: + ticker: Market ticker + + Returns: + Market data + """ + return await self.fetch(f"/markets/{ticker}") + + async def get_market_orderbook(self, ticker: str, depth: int = 10) -> Dict: + """ + Get market orderbook. + + Args: + ticker: Market ticker + depth: Orderbook depth + + Returns: + Orderbook data + """ + return await self.fetch( + f"/markets/{ticker}/orderbook", + params={"depth": depth} + ) + + async def get_market_history( + self, + ticker: str, + start_ts: Optional[int] = None, + end_ts: Optional[int] = None, + limit: int = 100 + ) -> Dict: + """ + Get market price history. + + Args: + ticker: Market ticker + start_ts: Start timestamp + end_ts: End timestamp + limit: Maximum results + + Returns: + Price history data + """ + params = {"limit": limit} + + if start_ts: + params["start_ts"] = start_ts + if end_ts: + params["end_ts"] = end_ts + + return await self.fetch(f"/markets/{ticker}/history", params=params) + + # Series Methods + + async def get_series(self, series_ticker: str) -> Dict: + """ + Get series information. + + Args: + series_ticker: Series ticker + + Returns: + Series data + """ + return await self.fetch(f"/series/{series_ticker}") + + # Event Methods + + async def get_events( + self, + limit: int = 100, + status: Optional[str] = None, + series_ticker: Optional[str] = None, + with_nested_markets: bool = False, + cursor: Optional[str] = None, + **kwargs + ) -> Dict: + """ + Get events. + + Args: + limit: Maximum number of events + status: Event status filter + series_ticker: Series ticker filter + with_nested_markets: Include market details + cursor: Pagination cursor + **kwargs: Additional filters + + Returns: + Events data + """ + params = { + "limit": limit, + "with_nested_markets": with_nested_markets, + **kwargs + } + + if status: + params["status"] = status + if series_ticker: + params["series_ticker"] = series_ticker + if cursor: + params["cursor"] = cursor + + return await self.fetch("/events", params=params) + + async def get_event(self, event_ticker: str) -> Dict: + """ + Get single event. + + Args: + event_ticker: Event ticker + + Returns: + Event data + """ + return await self.fetch(f"/events/{event_ticker}") + + # Internal helpers + + async def _paginate_events( + self, + status: Optional[str] = None, + series_ticker: Optional[str] = None, + with_nested_markets: bool = True, + limit_per_page: int = 200, + max_pages: int = 5, + **kwargs + ) -> List[Dict[str, Any]]: + """ + Fetch events with cursor pagination. + + Args: + status: Optional event status (e.g., 'open') + series_ticker: Optional series filter + with_nested_markets: Include nested markets if supported + limit_per_page: Page size per request + max_pages: Maximum number of pages to fetch + **kwargs: Additional filters + + Returns: + List of events + """ + events: List[Dict[str, Any]] = [] + cursor: Optional[str] = None + + for _ in range(max_pages): + resp = await self.get_events( + limit=limit_per_page, + status=status, + series_ticker=series_ticker, + with_nested_markets=with_nested_markets, + cursor=cursor, + **kwargs, + ) + + data = resp.get("data", {}) or {} + page_events = [] + if isinstance(data, dict): + page_events = data.get("events", []) or data.get("data", {}).get("events", []) + cursor = data.get("cursor") or data.get("next_cursor") or data.get("nextCursor") + elif isinstance(data, list): + page_events = data + cursor = None + + if not page_events: + break + + events.extend(page_events) + if not cursor: + break + + return events + + # Specialized Methods for Sports + + async def get_game_markets(self, sport: Optional[str] = None) -> Dict: + """ + Get game/sports betting markets. + + Args: + sport: Optional sport filter ("soccer", "nfl", "bundesliga", "epl") + + Returns: + Game markets + """ + # Prefer events with nested markets and paginate + events = await self._paginate_events( + status="open", + with_nested_markets=True, + limit_per_page=200, + max_pages=5, + ) + + # Flatten markets from events (if present) and include event context + flattened: List[Dict[str, Any]] = [] + for ev in events: + ev_title = (ev.get("title") or "") + ev_title_lower = ev_title.lower() + ev_series = ev.get("series_ticker") or ev.get("series") + ev_markets = ev.get("markets") or [] + for m in ev_markets: + item = dict(m) + item.setdefault("event_ticker", ev.get("ticker")) + item.setdefault("event_title", ev_title) + if ev_series: + item.setdefault("event_series_ticker", ev_series) + flattened.append(item) + + # If no nested markets were returned, fall back to markets scan + markets: List[Dict[str, Any]] + if flattened: + markets = flattened + else: + fallback = await self.get_markets(status="open", limit=500) + data = fallback.get("data", {}) or {} + markets = data.get("markets", []) if isinstance(data, dict) else [] + + # Case-insensitive keywords + keyword_list = ["vs", "winner", "win", "beat", "defeat", "match", "game"] + game_markets: List[Dict[str, Any]] = [] + + for market in markets: + title = market.get("title") or market.get("event_title") or "" + title_lower = title.lower() + + if any(kw in title_lower for kw in keyword_list): + # Simple categorization by hints in title; best-effort only + sport_label = "Other" + league_label = "Various" + tl = title_lower + if any(k in tl for k in ["bayern", "dortmund", "hamburg", "munich"]): + sport_label = "Bundesliga" + league_label = "German Bundesliga" + elif any(k in tl for k in ["liverpool", "chelsea", "manchester", "arsenal", "tottenham"]): + sport_label = "EPL" + league_label = "English Premier League" + elif "nfl" in tl or "football" in tl: + sport_label = "NFL" + league_label = "National Football League" + + # Optional filter by requested sport + if sport: + s = sport.lower() + if s == "soccer" and sport_label in ["Bundesliga", "EPL"]: + pass + elif s != sport_label.lower(): + continue + + market["sport"] = sport_label + market["league"] = league_label + game_markets.append(market) + + # Return standardized response + return await self.transform_response({"markets": game_markets}, "/events:game_markets") + + async def get_soccer_markets(self) -> Dict: + """ + Get soccer/football betting markets. + + Returns: + Soccer markets (Bundesliga, EPL, etc.) + """ + return await self.get_game_markets(sport="soccer") + + async def get_nfl_markets(self, week: Optional[int] = None) -> Dict: + """ + Get NFL-related markets. + Uses events with nested markets when available. + + Args: + week: NFL week number (for filtering) + + Returns: + NFL-related markets + """ + # Fetch events first, with nested markets + events = await self._paginate_events( + status="open", + with_nested_markets=True, + limit_per_page=200, + max_pages=5, + ) + + markets: List[Dict[str, Any]] = [] + for ev in events: + ev_title = (ev.get("title") or "") + ev_markets = ev.get("markets") or [] + for m in ev_markets: + item = dict(m) + item.setdefault("event_title", ev_title) + markets.append(item) + + # Fallback if no nested markets + if not markets: + fallback = await self.get_markets(status="open", limit=500) + data = fallback.get("data", {}) or {} + markets = data.get("markets", []) if isinstance(data, dict) else [] + + nfl_markets: List[Dict[str, Any]] = [] + for market in markets: + title = (market.get("title") or market.get("event_title") or "").lower() + ticker = (market.get("ticker") or "").lower() + if "nfl" in title or "football" in title or "nfl" in ticker: + if week: + if f"week {week}" in title: + market["sport"] = "NFL" + market["league"] = "National Football League" + nfl_markets.append(market) + else: + market["sport"] = "NFL" + market["league"] = "National Football League" + nfl_markets.append(market) + + return await self.transform_response({"markets": nfl_markets}, "/events:nfl_markets") + + async def get_cfb_markets(self, week: Optional[int] = None) -> Dict: + """ + Get college football markets. + Uses events with nested markets when available. + + Args: + week: College football week number + + Returns: + CFB markets + """ + # Fetch events first, with nested markets + events = await self._paginate_events( + status="open", + with_nested_markets=True, + limit_per_page=200, + max_pages=5, + ) + + markets: List[Dict[str, Any]] = [] + for ev in events: + ev_title = (ev.get("title") or "") + ev_markets = ev.get("markets") or [] + for m in ev_markets: + item = dict(m) + item.setdefault("event_title", ev_title) + markets.append(item) + + # Fallback if no nested markets + if not markets: + fallback = await self.get_markets(status="open", limit=500) + data = fallback.get("data", {}) or {} + markets = data.get("markets", []) if isinstance(data, dict) else [] + + cfb_keywords = ["cfb", "college football", "ncaa football", "bowl"] + cfb_markets: List[Dict[str, Any]] = [] + for market in markets: + title = (market.get("title") or market.get("event_title") or "").lower() + if any(kw in title for kw in cfb_keywords): + if week: + if f"week {week}" in title: + market["sport"] = "CFB" + market["league"] = "College Football" + cfb_markets.append(market) + else: + market["sport"] = "CFB" + market["league"] = "College Football" + cfb_markets.append(market) + + return await self.transform_response({"markets": cfb_markets}, "/events:cfb_markets") + + # Batch Operations + + async def get_multiple_markets(self, tickers: List[str]) -> Dict: + """ + Get multiple markets in parallel. + + Args: + tickers: List of market tickers + + Returns: + Dictionary of market data by ticker + """ + requests = [ + {"endpoint": f"/markets/{ticker}"} + for ticker in tickers + ] + + results = await self.batch_fetch(requests) + + # Map results to tickers + market_data = {} + for ticker, result in zip(tickers, results): + if not isinstance(result, Exception): + market_data[ticker] = result + else: + logger.error(f"Failed to fetch market {ticker}: {result}") + market_data[ticker] = None + + return { + "source": "kalshi", + "data": market_data, + "timestamp": datetime.utcnow().isoformat() + } + + # Portfolio Methods (if authenticated) + + async def get_portfolio(self) -> Dict: + """ + Get user portfolio. + + Returns: + Portfolio data + """ + return await self.fetch("/portfolio") + + async def get_positions(self, limit: int = 100) -> Dict: + """ + Get user positions. + + Args: + limit: Maximum number of positions + + Returns: + Positions data + """ + return await self.fetch("/positions", params={"limit": limit}) + + # Health Check + + async def health_check(self) -> bool: + """ + Check Kalshi API health. + + Returns: + True if healthy, False otherwise + """ + try: + result = await self.fetch("/markets", params={"limit": 1}) + return "data" in result + except Exception as e: + logger.error(f"Kalshi health check failed: {e}") + return False From 11a31e6f928345f0e54c07fccd85e3a027dc89af Mon Sep 17 00:00:00 2001 From: hudsonaikins-crown Date: Fri, 5 Sep 2025 22:44:11 -0400 Subject: [PATCH 2/3] Commit all pending changes: Kalshi adapter improvements, new data sources, scripts, and docs updates --- README.md | 422 +++++++++++++++- docs/kalshi_docs.txt | 151 ++++++ examples/nfl_websocket_streaming.py | 293 ----------- examples/rest_data_sources_demo.py | 288 +++++++++++ neural_sdk/__init__.py | 7 - neural_sdk/core/client.py | 46 +- neural_sdk/data_pipeline/__init__.py | 8 +- neural_sdk/data_pipeline/config/settings.py | 13 +- .../data_pipeline/data_sources/kalshi/auth.py | 8 +- .../data_sources/kalshi/cfb_discovery.py | 307 ++++++++++++ .../data_sources/kalshi/client.py | 48 +- .../data_sources/kalshi/nfl_discovery.py | 260 ++++++++++ neural_sdk/data_pipeline/sports_config.py | 16 +- .../data_pipeline/streaming/__init__.py | 8 - .../data_pipeline/streaming/handlers.py | 184 ------- .../data_pipeline/streaming/websocket.py | 411 ---------------- neural_sdk/data_sources/__init__.py | 43 ++ neural_sdk/data_sources/base/__init__.py | 28 ++ .../data_sources/base/auth_strategies.py | 275 +++++++++++ neural_sdk/data_sources/base/cache.py | 339 +++++++++++++ neural_sdk/data_sources/base/rate_limiter.py | 338 +++++++++++++ neural_sdk/data_sources/base/rest_source.py | 356 ++++++++++++++ neural_sdk/data_sources/espn/rest_adapter.py | 406 ++++++++++++++++ neural_sdk/data_sources/weather/__init__.py | 15 + neural_sdk/data_sources/weather/models.py | 264 ++++++++++ .../data_sources/weather/rest_adapter.py | 444 +++++++++++++++++ neural_sdk/streaming/__init__.py | 18 - neural_sdk/streaming/handlers.py | 354 -------------- neural_sdk/streaming/market_stream.py | 256 ---------- neural_sdk/streaming/websocket.py | 377 -------------- scripts/college_football_discovery.json | 10 + scripts/discover_college_football.py | 248 ++++++++++ scripts/discover_todays_cfb_games.py | 192 ++++++++ scripts/stream_chiefs_chargers_rest.py | 391 +++++++++++++++ scripts/stream_college_football_rest.py | 458 ++++++++++++++++++ scripts/test_cfb_dates.py | 40 ++ 36 files changed, 5345 insertions(+), 1977 deletions(-) create mode 100644 docs/kalshi_docs.txt delete mode 100644 examples/nfl_websocket_streaming.py create mode 100644 examples/rest_data_sources_demo.py create mode 100644 neural_sdk/data_pipeline/data_sources/kalshi/cfb_discovery.py create mode 100644 neural_sdk/data_pipeline/data_sources/kalshi/nfl_discovery.py delete mode 100644 neural_sdk/data_pipeline/streaming/__init__.py delete mode 100644 neural_sdk/data_pipeline/streaming/handlers.py delete mode 100644 neural_sdk/data_pipeline/streaming/websocket.py create mode 100644 neural_sdk/data_sources/__init__.py create mode 100644 neural_sdk/data_sources/base/__init__.py create mode 100644 neural_sdk/data_sources/base/auth_strategies.py create mode 100644 neural_sdk/data_sources/base/cache.py create mode 100644 neural_sdk/data_sources/base/rate_limiter.py create mode 100644 neural_sdk/data_sources/base/rest_source.py create mode 100644 neural_sdk/data_sources/espn/rest_adapter.py create mode 100644 neural_sdk/data_sources/weather/__init__.py create mode 100644 neural_sdk/data_sources/weather/models.py create mode 100644 neural_sdk/data_sources/weather/rest_adapter.py delete mode 100644 neural_sdk/streaming/__init__.py delete mode 100644 neural_sdk/streaming/handlers.py delete mode 100644 neural_sdk/streaming/market_stream.py delete mode 100644 neural_sdk/streaming/websocket.py create mode 100644 scripts/college_football_discovery.json create mode 100644 scripts/discover_college_football.py create mode 100644 scripts/discover_todays_cfb_games.py create mode 100644 scripts/stream_chiefs_chargers_rest.py create mode 100644 scripts/stream_college_football_rest.py create mode 100644 scripts/test_cfb_dates.py diff --git a/README.md b/README.md index a20429c9..437814fc 100644 --- a/README.md +++ b/README.md @@ -271,19 +271,429 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file - Past performance doesn't guarantee future results - The SDK is not financial advice +--- + +# ๐Ÿ—บ๏ธ **STRATEGIC ROADMAP: The Future of Neural SDK** + +Neural SDK is evolving through three strategic phases designed to serve different market segments while building sustainable revenue: + +## **๐ŸŽฏ Our Vision: Local โ†’ Cloud โ†’ AI** + +``` +Phase 1: INNOVATORS (Months 1-6) +โ”œโ”€โ”€ Advanced Local SDK with Custom Data Sources +โ”œโ”€โ”€ Enhanced Developer Tools & CLI +โ””โ”€โ”€ Community-Driven Strategy Marketplace + +Phase 2: EARLY ADOPTERS (Months 4-12) +โ”œโ”€โ”€ Cloud Web Application +โ”œโ”€โ”€ Managed Compute & Auto-Scaling +โ””โ”€โ”€ Team Collaboration & Enterprise Features + +Phase 3: EARLY/LATE MAJORITY (Months 8-24) +โ”œโ”€โ”€ AI Strategy Generation +โ”œโ”€โ”€ Natural Language Trading +โ””โ”€โ”€ Automated Portfolio Management +``` + +--- + +## ๐Ÿ“… **PHASE 1: LOCAL SDK POWERHOUSE** +*Timeline: Months 1-6 | Target: 100-200 technical users* + +### **๐ŸŽฏ Customer Profile: Innovators** +- Quantitative developers and algorithmic traders +- Prediction market enthusiasts and researchers +- Users who want full control over their infrastructure +- Developers contributing to open-source ecosystem + +### **๐Ÿš€ Key Features Roadmap** + +#### **Sprint 1-3: Custom Data Source Framework (Months 1-2)** +```python +from neural_sdk.data_sources import DataSourceBuilder + +# Build custom ESPN scraper in 10 lines +espn_source = DataSourceBuilder() +espn_source.add_endpoint("game_stats", scrape_espn_stats) +espn_source.add_transformer("normalize_team_names") +sdk.register_data_source(espn_source) +``` + +**Features Delivered:** +- โœ… **DataSourceBuilder API** - Create custom data integrations +- โœ… **ESPN Sports Connector** - Real-time sports data +- โœ… **Twitter/X Sentiment Feed** - Social sentiment analysis +- โœ… **Weather API Integration** - Weather impact on sports +- โœ… **News Sentiment Feeds** - Reuters, Bloomberg integration + +#### **Sprint 4-6: Neural CLI Power Tools (Months 2-3)** +```bash +# Professional developer workflow +neural init my-strategy --template momentum +neural data-source add --type twitter --config sentiment.yaml +neural backtest --period 2024-01-01:2024-12-31 --metrics sharpe,drawdown +neural optimize --method kelly --risk-budget 0.02 +neural deploy --target local --monitor --alerts slack +``` + +**Features Delivered:** +- โœ… **Professional CLI** - Full workflow automation +- โœ… **VS Code Extension** - Integrated development environment +- โœ… **Strategy Templates** - Pre-built starting points +- โœ… **Advanced Backtesting** - Walk-forward, Monte Carlo +- โœ… **Local Monitoring** - Real-time performance tracking + +#### **Sprint 7-9: Community & Extensibility (Months 3-4)** +- โœ… **GitHub-style Strategy Repository** - Share and discover strategies +- โœ… **Performance Leaderboards** - Community rankings +- โœ… **Plugin Architecture** - Extensible optimization methods +- โœ… **Code Review System** - Community-driven quality + +#### **Sprint 10-12: Advanced Analytics (Months 4-6)** +- โœ… **Multi-timeframe Analysis** - Comprehensive backtesting +- โœ… **Risk Attribution** - Understand strategy risks +- โœ… **Correlation Analysis** - Cross-asset relationships +- โœ… **Custom Risk Metrics** - User-defined measurements + +### **๐Ÿ’ฐ Phase 1 Monetization** +- **๐Ÿ†“ Free Open Source Core** - Build community and adoption +- **๐Ÿ“Š Premium Data Sources** - $29/month for verified feeds +- **๐ŸŽ“ Professional Support** - $199/month for expert help +- **๐Ÿ† Training & Certification** - $500 Neural SDK certification + +**Revenue Target**: $15K MRR from innovator segment + +--- + +## ๐ŸŒ **PHASE 2: CLOUD WEB APPLICATION** +*Timeline: Months 4-12 | Target: 500-1,000 users* + +### **๐ŸŽฏ Customer Profile: Early Adopters** +- Progressive trading firms and sophisticated individuals +- Users wanting professional features without DevOps complexity +- Teams needing collaboration and shared strategies +- Businesses requiring enterprise-grade security and support + +### **๐Ÿš€ Key Features Roadmap** + +#### **Sprint 13-18: Core Web Platform (Months 4-6)** +```typescript +// React-based strategy builder +const StrategyBuilder = () => ( + + + + + +) +``` + +**Features Delivered:** +- โœ… **Strategy Builder Web Interface** - No-code portfolio construction +- โœ… **Visual Backtesting Dashboard** - Interactive performance charts +- โœ… **One-Click Cloud Deployment** - Managed container execution +- โœ… **Team Workspaces** - Collaborative strategy development + +#### **Sprint 19-24: Professional Features (Months 6-9)** +- โœ… **Advanced Analytics Dashboard** - Real-time P&L with attribution +- โœ… **API Management Console** - Keys, usage, rate limits, webhooks +- โœ… **Enterprise Integration Hub** - SSO, audit logs, compliance +- โœ… **Custom Alerting System** - Email, Slack, webhook notifications + +#### **Sprint 25-30: Marketplace & Automation (Months 9-12)** +- โœ… **Strategy Marketplace** - Buy/sell community strategies +- โœ… **Automated Portfolio Management** - Set-and-forget allocation +- โœ… **Performance Attribution** - Understand what's driving returns +- โœ… **Advanced Risk Management** - Dynamic position sizing + +### **๐Ÿ’ฐ Phase 2 Monetization** +- **๐Ÿฅ‰ Starter Plan** - $49/month: Web access, limited compute +- **๐Ÿฅˆ Professional Plan** - $199/month: Full features, teams +- **๐Ÿฅ‡ Enterprise Plan** - $999/month: SSO, compliance, support +- **โšก Compute Credits** - $0.10/CPU-hour for strategy execution +- **๐Ÿช Marketplace Commission** - 30% on strategy sales + +**Revenue Target**: $100K MRR from early adopter segment + +--- + +## ๐Ÿค– **PHASE 3: AI STRATEGY PLATFORM** +*Timeline: Months 8-24 | Target: 2,000-10,000 users* + +### **๐ŸŽฏ Customer Profile: Early/Late Majority** +- Traditional traders entering prediction markets +- Financial advisors and institutional investors +- Users wanting AI-powered automation +- Mass market seeking simple, effective solutions + +### **๐Ÿš€ Key Features Roadmap** + +#### **Sprint 31-40: AI Strategy Generation Core (Months 8-12)** +```python +# Natural language strategy creation +user_prompt = """ +Create a conservative NFL strategy that: +- Only bets on favorites with 70%+ win probability +- Never risks more than 15% on any single game +- Focuses on primetime games for better data +""" + +ai_strategy = neural_ai.generate_strategy( + prompt=user_prompt, + risk_profile="conservative", + market_context=current_market_data +) + +# AI automatically generates, backtests, and deploys +``` + +**Features Delivered:** +- โœ… **Natural Language Strategy Creation** - Describe strategies in plain English +- โœ… **Market Pattern Recognition AI** - Identify profitable inefficiencies +- โœ… **Personalized Risk Assessment** - AI-driven risk profiling +- โœ… **Automated Strategy Optimization** - Continuous improvement + +#### **Sprint 41-50: Advanced AI Features (Months 12-18)** +- โœ… **Continuous Strategy Evolution** - AI monitors and improves 24/7 +- โœ… **AI Trading Assistant** - Intelligent recommendations and insights +- โœ… **Multi-Market Optimization** - Cross-market arbitrage detection +- โœ… **Behavioral Bias Correction** - AI identifies and corrects mistakes + +#### **Sprint 51-60: Enterprise AI Platform (Months 18-24)** +- โœ… **Custom AI Model Training** - Enterprise-specific optimization +- โœ… **White-Label AI Platform** - Branded AI strategy generation +- โœ… **Institutional Features** - Large-scale portfolio optimization +- โœ… **Regulatory AI Compliance** - Automated compliance monitoring + +### **๐Ÿ’ฐ Phase 3 Monetization** +- **๐Ÿค– AI Basic** - $99/month: 10 AI generations, basic optimization +- **๐Ÿš€ AI Pro** - $299/month: Unlimited generations, continuous optimization +- **โญ AI Premium** - $599/month: Custom model training, advanced features +- **๐Ÿข AI Enterprise** - $2999/month: Custom models, white-label, API access +- **๐Ÿ’Ž AI Custom** - $10K+/month: Dedicated infrastructure, professional services + +**Revenue Target**: $500K MRR from majority market segments + +--- + +# ๐Ÿƒโ€โ™‚๏ธ **FEATURE SPRINT METHODOLOGY** + +## **Our Development Philosophy** +> *"Move fast, ship often, validate continuously"* + +### **๐Ÿ”„ 2-Week Sprint Cycle** +``` +Week 1: Development & Testing +โ”œโ”€โ”€ Monday: Sprint planning & design +โ”œโ”€โ”€ Tuesday-Thursday: Core implementation +โ”œโ”€โ”€ Friday: Code review & testing + +Week 2: Validation & Deployment +โ”œโ”€โ”€ Monday-Tuesday: Documentation & polish +โ”œโ”€โ”€ Wednesday: Beta user testing +โ”œโ”€โ”€ Thursday: Feedback incorporation +โ”œโ”€โ”€ Friday: Production deployment & demo +``` + +### **๐Ÿ“‹ Sprint Template** +Each feature follows this proven structure: + +```markdown +## Sprint N: [Feature Name] + +### ๐ŸŽฏ Success Criteria +- [ ] Measurable outcome 1 +- [ ] Measurable outcome 2 +- [ ] User validation metric + +### ๐Ÿ‘ฅ User Story +As a [user type], I want [capability] so that [benefit] + +### ๐Ÿงช Validation Plan +- A/B test with N% of users +- Success metric: X% improvement in Y +- Feedback collection method + +### ๐Ÿ“Š Dependencies +- Requires: [Previous sprint features] +- Blocks: [Future sprint features] +- Integration: [External services] +``` + +### **๐ŸŽš๏ธ Feature Flags & Gradual Rollout** +```python +from neural_sdk.features import feature_flag + +@feature_flag("ai_strategy_generation", rollout_percentage=10) +def generate_ai_strategy(user_prompt): + # New AI feature available to 10% of users + return ai_engine.generate(user_prompt) +``` + +## **๐Ÿ“ˆ Success Metrics by Phase** + +### **Phase 1 (Local SDK) - Developer Adoption** +- **Daily Active Developers**: SDK usage frequency +- **Strategy Creation Rate**: New strategies per week +- **Community Engagement**: GitHub stars, contributions, discussions +- **Data Source Integrations**: Custom integrations built by community + +### **Phase 2 (Web App) - User Growth & Engagement** +- **Monthly Active Users (MAU)**: Platform usage +- **Strategy Deployment Rate**: Web strategies per user +- **Collaboration Activity**: Team workspace usage +- **Compute Utilization**: Cloud resource consumption + +### **Phase 3 (AI Platform) - AI Adoption & Revenue** +- **AI Strategy Generations**: Natural language requests +- **Strategy Success Rate**: AI-generated strategy performance +- **User Retention**: Monthly subscription renewals +- **Enterprise Conversions**: Free โ†’ paid upgrades + +--- + +# ๐Ÿ“ **CHANGELOG & PROGRESS TRACKING** + +## **Automatic Progress Documentation** +Every completed sprint automatically generates changelog entries: + +```markdown +## v1.2.0 - Sprint 15 Complete โœ… +### ๐Ÿ†• New Features +- **Custom Data Source Builder**: Create ESPN, Twitter, weather integrations in 10 lines +- **Advanced CLI Tools**: `neural optimize --method kelly` command +- **Performance Boost**: 3x faster backtesting with parallel execution + +### ๐Ÿ“Š Impact Metrics +- 45% increase in strategy creation rate +- 12 new community data source integrations +- 89% user satisfaction score from beta testers + +### ๐Ÿ› Bug Fixes +- Fixed WebSocket reconnection issues in high-volume trading +- Resolved portfolio optimization edge case with <5 assets + +### ๐Ÿ“š Community Highlights +- @quantdev123 created amazing momentum strategy template +- @sports_algo contributed weather impact analysis +- 15 new contributors joined this sprint + +### โญ๏ธ Next Sprint Preview +Sprint 16 focuses on **Strategy Sharing Platform** - GitHub-style repository for community strategies +``` + +## **Community Feedback Integration** +```python +# Embedded feedback collection +from neural_sdk.feedback import collect_feedback + +@collect_feedback("strategy_builder") +def create_strategy_ui(): + # Feature usage automatically tracked + # User satisfaction surveyed weekly + # Performance metrics measured continuously +``` + +--- + +# ๐Ÿค **COMMUNITY ENGAGEMENT** + +## **๐Ÿ—ณ๏ธ Feature Request Democracy** +Community drives our roadmap through: + +- **๐Ÿ“Š Monthly Feature Polls**: Vote on next sprint priorities +- **๐Ÿ’ก GitHub Discussions**: Propose and debate new features +- **๐Ÿงช Beta Testing Program**: Early access for feedback +- **๐Ÿ† Community Challenges**: Monthly trading competitions + +## **๐ŸŽ“ Developer Success Program** + +### **Learning Path** +1. **๐Ÿ“š Neural SDK Fundamentals** (Free) +2. **โšก Advanced Strategy Development** ($199) +3. **๐Ÿค– AI-Powered Trading** ($399) +4. **๐Ÿ† Neural SDK Certification** ($500) + +### **Community Support** +- **๐Ÿ’ฌ Discord Server**: Real-time help and discussions +- **๐Ÿ“– Comprehensive Documentation**: Step-by-step guides +- **๐ŸŽฅ Video Tutorials**: Visual learning for all skill levels +- **๐Ÿ“… Weekly Office Hours**: Live Q&A with the team + +--- + +# ๐Ÿ’ฐ **REVENUE TRAJECTORY** + +## **Financial Milestones** +``` +Year 1 (Months 1-12): +โ”œโ”€โ”€ Phase 1: $15K MRR (Innovators) +โ”œโ”€โ”€ Phase 2: $100K MRR (Early Adopters) +โ””โ”€โ”€ Total: $115K MRR + +Year 2 (Months 13-24): +โ”œโ”€โ”€ Phase 1: $25K MRR (Mature innovators) +โ”œโ”€โ”€ Phase 2: $200K MRR (Scaled web users) +โ”œโ”€โ”€ Phase 3: $500K MRR (AI features launch) +โ””โ”€โ”€ Total: $725K MRR + +Year 3+: +โ”œโ”€โ”€ Enterprise AI: $1M+ MRR +โ”œโ”€โ”€ Consumer AI: $2M+ MRR +โ”œโ”€โ”€ Marketplace: $500K+ MRR +โ””โ”€โ”€ Total: $3.5M+ MRR +``` + +## **Customer Journey Progression** +``` +Developer (Free SDK) + โ†“ (Needs more data sources) +Professional User ($49-199/month) + โ†“ (Wants team collaboration) +Enterprise Customer ($999+/month) + โ†“ (Needs AI automation) +AI Platform User ($299-2999/month) +``` + +--- + +# ๐ŸŽฏ **GET INVOLVED** + +## **For Developers** +```bash +git clone https://github.com/IntelIP/Neural-Trading-Platform.git +cd Neural-Trading-Platform +pip install -e ".[dev]" +neural init my-first-strategy +``` + +## **For Traders** +- ๐Ÿ“ˆ **[Join Beta Program](https://neural-sdk.dev/beta)** - Early access to new features +- ๐Ÿ’ฌ **[Discord Community](https://discord.gg/neural-sdk)** - Connect with other traders +- ๐Ÿ“š **[Documentation](https://docs.neural-sdk.dev)** - Learn the platform + +## **For Enterprises** +- ๐Ÿ“ž **[Schedule Demo](https://calendly.com/neural-sdk/demo)** - See enterprise features +- ๐Ÿ’ผ **[Partnership Inquiry](mailto:partnerships@neural-sdk.dev)** - Integration opportunities +- ๐Ÿข **[Custom Solutions](mailto:enterprise@neural-sdk.dev)** - Tailored implementations + +--- + ## ๐Ÿ”— Links -- **Documentation**: [https://kalshi-trading-sdk.readthedocs.io/](https://kalshi-trading-sdk.readthedocs.io/) -- **PyPI Package**: [https://pypi.org/project/kalshi-trading-sdk/](https://pypi.org/project/kalshi-trading-sdk/) -- **Issues**: [https://github.com/kalshi/kalshi-trading-sdk/issues](https://github.com/kalshi/kalshi-trading-sdk/issues) -- **Discussions**: [https://github.com/kalshi/kalshi-trading-sdk/discussions](https://github.com/kalshi/kalshi-trading-sdk/discussions) +- **๐Ÿ  Website**: [https://neural-sdk.dev](https://neural-sdk.dev) +- **๐Ÿ“š Documentation**: [https://docs.neural-sdk.dev](https://docs.neural-sdk.dev) +- **๐Ÿ“ฆ PyPI Package**: [https://pypi.org/project/neural-sdk/](https://pypi.org/project/neural-sdk/) +- **๐Ÿ› Issues**: [https://github.com/IntelIP/Neural-Trading-Platform/issues](https://github.com/IntelIP/Neural-Trading-Platform/issues) +- **๐Ÿ’ฌ Discussions**: [https://github.com/IntelIP/Neural-Trading-Platform/discussions](https://github.com/IntelIP/Neural-Trading-Platform/discussions) ## ๐Ÿ† Acknowledgments - Kalshi for providing the trading platform -- Contributors and beta testers +- Our amazing community of contributors and beta testers - Open source community for excellent libraries --- -**Built for algorithmic trading โ€ข Trade responsibly โ€ข Performance matters** +**๐Ÿง  Built for algorithmic trading โ€ข ๐Ÿš€ Ship fast, validate often โ€ข ๐Ÿ“ˆ Performance matters** diff --git a/docs/kalshi_docs.txt b/docs/kalshi_docs.txt new file mode 100644 index 00000000..dc88c3a1 --- /dev/null +++ b/docs/kalshi_docs.txt @@ -0,0 +1,151 @@ +# API Documentation + +## Docs + +- [Create API Key](https://docs.kalshi.com/api-reference/api-keys/create-api-key.md): Endpoint for creating a new API key with a user-provided public key. This endpoint allows users with Premier or Market Maker API usage levels to create API keys by providing their own RSA public key. The platform will use this public key to verify signatures on API requests. +- [Delete API Key](https://docs.kalshi.com/api-reference/api-keys/delete-api-key.md): Endpoint for deleting an existing API key. This endpoint permanently deletes an API key. Once deleted, the key can no longer be used for authentication. This action cannot be undone. +- [Generate API Key](https://docs.kalshi.com/api-reference/api-keys/generate-api-key.md): Endpoint for generating a new API key with an automatically created key pair. This endpoint generates both a public and private RSA key pair. The public key is stored on the platform, while the private key is returned to the user and must be stored securely. The private key cannot be retrieved again. +- [Get API Keys](https://docs.kalshi.com/api-reference/api-keys/get-api-keys.md): Endpoint for retrieving all API keys associated with the authenticated user. API keys allow programmatic access to the platform without requiring username/password authentication. Each key has a unique identifier and name. +- [Create Market In Multivariate Event Collection](https://docs.kalshi.com/api-reference/collection/create-market-in-multivariate-event-collection.md): Endpoint for looking up an individual market in a multivariate event collection. This endpoint must be hit at least once before trading or looking up a market. +- [Get Multivariate Event Collection](https://docs.kalshi.com/api-reference/collection/get-multivariate-event-collection.md): Endpoint for getting data about a multivariate event collection by its ticker. +- [Get Multivariate Event Collection Lookup History](https://docs.kalshi.com/api-reference/collection/get-multivariate-event-collection-lookup-history.md): Endpoint for retrieving which markets in an event collection were recently looked up. +- [Get Multivariate Event Collections](https://docs.kalshi.com/api-reference/collection/get-multivariate-event-collections.md): Endpoint for getting data about multivariate event collections. +- [Lookup Tickers For Market In Multivariate Event Collection](https://docs.kalshi.com/api-reference/collection/lookup-tickers-for-market-in-multivariate-event-collection.md): Endpoint for looking up an individual market in a multivariate event collection. If CreateMarketInMultivariateEventCollection has never been hit with that variable combination before, this will return a 404. +- [Accept Quote](https://docs.kalshi.com/api-reference/communications/accept-quote.md): Endpoint for accepting a quote. This will require the quoter to confirm +- [Confirm Quote](https://docs.kalshi.com/api-reference/communications/confirm-quote.md): Endpoint for confirming a quote. This will start a timer for order execution +- [Create Quote](https://docs.kalshi.com/api-reference/communications/create-quote.md): Endpoint for creating a quote in response to an RFQ +- [Create RFQ](https://docs.kalshi.com/api-reference/communications/create-rfq.md): Endpoint for creating a new RFQ +- [Delete Quote](https://docs.kalshi.com/api-reference/communications/delete-quote.md): Endpoint for deleting a quote, which means it can no longer be accepted. +- [Delete RFQ](https://docs.kalshi.com/api-reference/communications/delete-rfq.md): Endpoint for deleting an RFQ by ID +- [Get Communications ID](https://docs.kalshi.com/api-reference/communications/get-communications-id.md): Endpoint for getting the communications ID of the logged-in user. +- [Get Quote](https://docs.kalshi.com/api-reference/communications/get-quote.md): Endpoint for getting a particular quote +- [Get Quotes](https://docs.kalshi.com/api-reference/communications/get-quotes.md): Endpoint for getting quotes +- [Get RFQ](https://docs.kalshi.com/api-reference/communications/get-rfq.md): Endpoint for getting a single RFQ by id +- [Get RFQs](https://docs.kalshi.com/api-reference/communications/get-rfqs.md): Endpoint for getting RFQs +- [Get Exchange Announcements](https://docs.kalshi.com/api-reference/get-exchange-announcements.md): Endpoint for getting all exchange-wide announcements. +- [Get Exchange Schedule](https://docs.kalshi.com/api-reference/get-exchange-schedule.md): Endpoint for getting the exchange schedule. +- [Get Exchange Status](https://docs.kalshi.com/api-reference/get-exchange-status.md): Endpoint for getting the exchange status. +- [Get User Data Timestamp](https://docs.kalshi.com/api-reference/get-user-data-timestamp.md): There is typically a short delay before exchange events are reflected in the API endpoints. Whenever possible, combine API responses to PUT/POST/DELETE requests with websocket data to obtain the most accurate view of the exchange state. This endpoint provides an approximate indication of when the data from the following endpoints was last validated: GetBalance, GetOrder(s), GetFills, GetPositions +- [Get Event](https://docs.kalshi.com/api-reference/market/get-event.md): Endpoint for getting data about an event by its ticker. An event represents a real-world occurrence that can be traded on, such as an election, sports game, or economic indicator release. Events contain one or more markets where users can place trades on different outcomes. +- [Get Event Metadata](https://docs.kalshi.com/api-reference/market/get-event-metadata.md): Endpoint for getting metadata about an event by its ticker. Returns only the metadata information for an event. +- [Get Events](https://docs.kalshi.com/api-reference/market/get-events.md): Endpoint for getting data about all events. An event represents a real-world occurrence that can be traded on, such as an election, sports game, or economic indicator release. Events contain one or more markets where users can place trades on different outcomes. This endpoint returns a paginated response. Use the 'limit' parameter to control page size (1-200, defaults to 100). The response includes a 'cursor' field - pass this value in the 'cursor' parameter of your next request to get the next page. An empty cursor indicates no more pages are available. +- [Get Market](https://docs.kalshi.com/api-reference/market/get-market.md): Endpoint for getting data about a specific market by its ticker. A market represents a specific binary outcome within an event that users can trade on (e.g., "Will candidate X win?"). Markets have yes/no positions, current prices, volume, and settlement rules. +- [Get Market Candlesticks](https://docs.kalshi.com/api-reference/market/get-market-candlesticks.md): Endpoint for getting historical candlestick data for a specific market. Candlesticks provide OHLC (Open, High, Low, Close) price data aggregated over specific time intervals. Each candlestick represents the price movement during that period, including opening and closing prices, as well as the highest and lowest prices reached. The period_interval determines the time length of each candlestick and must be one of: 1 (1 minute), 60 (1 hour), or 1440 (1 day). The start_ts and end_ts parameters define the time range for the data. +- [Get Market Order Book](https://docs.kalshi.com/api-reference/market/get-market-order-book.md): Endpoint for getting the current order book for a specific market. The order book shows all active bid orders for both yes and no sides of a binary market. It returns yes bids and no bids only (no asks are returned). This is because in binary markets, a bid for yes at price X is equivalent to an ask for no at price (100-X). For example, a yes bid at 7ยข is the same as a no ask at 93ยข, with identical contract sizes. Each side shows price levels with their corresponding quantities and order counts, organized from best to worst prices. +- [Get Markets](https://docs.kalshi.com/api-reference/market/get-markets.md): Endpoint for listing and discovering markets on Kalshi. A market represents a specific binary outcome within an event that users can trade on (e.g., "Will candidate X win?"). Markets have yes/no positions, current prices, volume, and settlement rules. This endpoint returns a paginated response. Use the 'limit' parameter to control page size (1-1000, defaults to 100). The response includes a 'cursor' field - pass this value in the 'cursor' parameter of your next request to get the next page. An empty cursor indicates no more pages are available. +- [Get Series](https://docs.kalshi.com/api-reference/market/get-series.md): Endpoint for getting data about a specific series by its ticker. A series represents a template for recurring events that follow the same format and rules (e.g., "Monthly Jobs Report", "Weekly Initial Jobless Claims", "Daily Weather in NYC"). Series define the structure, settlement sources, and metadata that will be applied to each recurring event instance within that series. +- [Get Series List](https://docs.kalshi.com/api-reference/market/get-series-list.md): Endpoint for getting data about multiple series with specified filters. A series represents a template for recurring events that follow the same format and rules (e.g., "Monthly Jobs Report", "Weekly Initial Jobless Claims", "Daily Weather in NYC"). This endpoint allows you to browse and discover available series templates by category. +- [Get Trades](https://docs.kalshi.com/api-reference/market/get-trades.md): Endpoint for getting all trades for all markets. A trade represents a completed transaction between two users on a specific market. Each trade includes the market ticker, price, quantity, and timestamp information. This endpoint returns a paginated response. Use the 'limit' parameter to control page size (1-1000, defaults to 100). The response includes a 'cursor' field - pass this value in the 'cursor' parameter of your next request to get the next page. An empty cursor indicates no more pages are available. +- [Get Milestone](https://docs.kalshi.com/api-reference/milestone/get-milestone.md): Endpoint for getting data about a specific milestone by its ID. +- [Get Milestones](https://docs.kalshi.com/api-reference/milestone/get-milestones.md): Endpoint for getting data about milestones with optional filtering. +- [Amend Order](https://docs.kalshi.com/api-reference/portfolio/amend-order.md): Endpoint for amending the max number of fillable contracts and/or price in an existing order. Max fillable contracts is `remaining_count` + `fill_count`. +- [Batch Cancel Orders](https://docs.kalshi.com/api-reference/portfolio/batch-cancel-orders.md): Endpoint for cancelling up to 20 orders at once. Available to members with advanced access only. +- [Batch Create Orders](https://docs.kalshi.com/api-reference/portfolio/batch-create-orders.md): Endpoint for submitting a batch of orders. Each order in the batch is counted against the total rate limit for order operations. Consequently, the size of the batch is capped by the current per-second rate-limit configuration applicable to the user. At the moment of writing, the limit is 20 orders per batch. Available to members with advanced access only. +- [Cancel Order](https://docs.kalshi.com/api-reference/portfolio/cancel-order.md): Endpoint for canceling orders. The value for the orderId should match the id field of the order you want to decrease. Commonly, DELETE-type endpoints return 204 status with no body content on success. But we can't completely delete the order, as it may be partially filled already. Instead, the DeleteOrder endpoint reduce the order completely, essentially zeroing the remaining resting contracts on it. The zeroed order is returned on the response payload as a form of validation for the client. +- [Create Order](https://docs.kalshi.com/api-reference/portfolio/create-order.md): Endpoint for submitting orders in a market. +- [Create Order Group](https://docs.kalshi.com/api-reference/portfolio/create-order-group.md): Creates a new order group with a contracts limit. When the limit is hit, all orders in the group are cancelled and no new orders can be placed until reset. +- [Decrease Order](https://docs.kalshi.com/api-reference/portfolio/decrease-order.md): Endpoint for decreasing the number of contracts in an existing order. This is the only kind of edit available on order quantity. Cancelling an order is equivalent to decreasing an order amount to zero. +- [Delete Order Group](https://docs.kalshi.com/api-reference/portfolio/delete-order-group.md): Deletes an order group and cancels all orders within it. This permanently removes the group. +- [Get Balance](https://docs.kalshi.com/api-reference/portfolio/get-balance.md): Endpoint for getting the balance of a member. The balance value is returned in cents. +- [Get Fills](https://docs.kalshi.com/api-reference/portfolio/get-fills.md): Endpoint for getting all fills for the member. A fill is when a trade you have is matched. +- [Get Order](https://docs.kalshi.com/api-reference/portfolio/get-order.md): Endpoint for getting a single order. +- [Get Order Group](https://docs.kalshi.com/api-reference/portfolio/get-order-group.md): Retrieves details for a single order group including all order IDs and auto-cancel status. +- [Get Order Groups](https://docs.kalshi.com/api-reference/portfolio/get-order-groups.md): Retrieves all order groups for the authenticated user. +- [Get Orders](https://docs.kalshi.com/api-reference/portfolio/get-orders.md): Endpoint for getting all orders for the member. +- [Get Portfolio Resting Order Total Value](https://docs.kalshi.com/api-reference/portfolio/get-portfolio-resting-order-total-value.md): Endpoint for getting the total value, in cents, of resting orders. This endpoint is only intended for use by FCM members (rare). Note: If you're uncertain about this endpoint, it likely does not apply to you. +- [Get Positions](https://docs.kalshi.com/api-reference/portfolio/get-positions.md): Endpoint for getting all market positions for the member. +- [Get Queue Position for Order](https://docs.kalshi.com/api-reference/portfolio/get-queue-position-for-order.md): Endpoint for getting an order's queue position in the order book. This represents the amount of orders that need to be matched before this order receives a partial or full match. Queue position is determined using a price-time priority. +- [Get Queue Positions for Orders](https://docs.kalshi.com/api-reference/portfolio/get-queue-positions-for-orders.md): Endpoint for getting queue positions for all resting orders. Queue position represents the number of contracts that need to be matched before an order receives a partial or full match, determined using price-time priority. +- [Get Settlements](https://docs.kalshi.com/api-reference/portfolio/get-settlements.md): Endpoint for getting the member's settlements historical track. +- [Reset Order Group](https://docs.kalshi.com/api-reference/portfolio/reset-order-group.md): Resets the order group's matched contracts counter to zero, allowing new orders to be placed again after the limit was hit. +- [Get Structured Target](https://docs.kalshi.com/api-reference/structured_target/get-structured-target.md): Endpoint for getting data about a specific structured target by its ID. +- [Get Structured Targets](https://docs.kalshi.com/api-reference/structured_target/get-structured-targets.md): Endpoint for getting data about structured targets. +- [Market & Event Lifecycle](https://docs.kalshi.com/api-reference/websockets/market-&-event-lifecycle.md): Market state changes and event creation notifications. + +**Requirements:** +- Market specification optional (omit to receive all events) +- Event creation notifications + +**Use case:** Tracking market lifecycle including creation, de(activation), close date changes, determination, settlement + +- [Market Positions](https://docs.kalshi.com/api-reference/websockets/market-positions.md): Real-time updates of your positions in markets. Requires authentication. + +**Requirements:** +- Authentication required +- Market specification optional (omit to receive all positions) +- Updates sent when your position changes due to trades, settlements, etc. + +**Monetary Values:** +All monetary values (position_cost, realized_pnl, fees_paid) are returned in centi-cents (1/10,000th of a dollar). +To convert to dollars, divide by 10,000. + +**Use case:** Portfolio tracking, position monitoring, P&L calculations + +- [Market Ticker](https://docs.kalshi.com/api-reference/websockets/market-ticker.md): Market price, volume, and open interest updates. + +**Requirements:** +- Market specification optional (omit to receive all markets) +- Updates sent whenever any ticker field changes + +**Use case:** Displaying current market prices and statistics + +- [Multivariate Lookups](https://docs.kalshi.com/api-reference/websockets/multivariate-lookups.md): Multivariate collection lookup notifications. + +**Requirements:** +- Market specification optional + +**Use case:** Tracking multivariate market relationships + +- [Orderbook Updates](https://docs.kalshi.com/api-reference/websockets/orderbook-updates.md): Real-time orderbook price level changes. Provides incremental updates to maintain a live orderbook. + +**Requirements:** +- Market specification required: + - Use `market_ticker` (string) for a single market + - Use `market_tickers` (array of strings) for multiple markets +- Sends `orderbook_snapshot` first, then incremental `orderbook_delta` updates + +**Use case:** Building and maintaining a real-time orderbook + +- [Public Trades](https://docs.kalshi.com/api-reference/websockets/public-trades.md): Public trade notifications when trades occur. + +**Requirements:** +- Market specification optional (omit to receive all trades) +- Updates sent immediately after trade execution + +**Use case:** Trade feed, volume analysis + +- [User Fills](https://docs.kalshi.com/api-reference/websockets/user-fills.md): Your order fill notifications. Requires authentication. + +**Requirements:** +- Authentication required +- Market specification ignored (always sends all your fills) +- Updates sent immediately when your orders are filled + +**Use case:** Tracking your trading activity + +- [WebSocket Connection](https://docs.kalshi.com/api-reference/websockets/websocket-connection.md): Main WebSocket connection endpoint. All communication happens through this single connection. +Use the subscribe command to subscribe to specific data channels. For more information, see the [Getting Started](https://docs.kalshi.com/getting_started/websocket_connection) guide. + +- [API Changelog](https://docs.kalshi.com/changelog/index.md): Stay updated with API changes and version history +- [Connectivity](https://docs.kalshi.com/fix/connectivity.md): Connection setup and endpoints for Kalshi FIX API +- [Drop Copy Session](https://docs.kalshi.com/fix/drop-copy.md): Recover missed execution reports and query historical order events +- [Error Handling](https://docs.kalshi.com/fix/error-handling.md): Understanding and handling errors in the FIX protocol +- [FIX API Overview](https://docs.kalshi.com/fix/index.md): Financial Information eXchange (FIX) protocol implementation for Kalshi +- [Market Settlement](https://docs.kalshi.com/fix/market-settlement.md): Settlement reports for market outcomes and position resolution +- [Order Entry Messages](https://docs.kalshi.com/fix/order-entry.md): Submit, modify, and cancel orders through FIX messages +- [Order Group Messages](https://docs.kalshi.com/fix/order-groups.md): Manage order groups for automatic position management +- [RFQ Messages](https://docs.kalshi.com/fix/rfq-messages.md): Request for Quote functionality for market makers +- [Session Management](https://docs.kalshi.com/fix/session-management.md): Managing FIX sessions including logon, logout, and message sequencing +- [API Keys](https://docs.kalshi.com/getting_started/api_keys.md): API Key usage +- [Test In The Demo Environment](https://docs.kalshi.com/getting_started/demo_env.md): Set up and test with Kalshi's demo environment +- [Making Your First Request](https://docs.kalshi.com/getting_started/making_your_first_request.md): Start trading with Kalshi API in under 5 minutes +- [Orderbook Responses](https://docs.kalshi.com/getting_started/orderbook_responses.md): Understanding Kalshi orderbook structure and binary prediction market mechanics +- [Understanding Pagination](https://docs.kalshi.com/getting_started/pagination.md): Learn how to navigate through large datasets using cursor-based pagination +- [Quick Start: Authenticated Requests](https://docs.kalshi.com/getting_started/quick_start_authenticated_requests.md): Three simple steps to make your first authenticated API request to Kalshi +- [Quick Start: Create your first order](https://docs.kalshi.com/getting_started/quick_start_create_order.md): Learn how to find markets, place orders, check status, and cancel orders on Kalshi +- [Quick Start: Market Data](https://docs.kalshi.com/getting_started/quick_start_market_data.md): Learn how to access real-time market data without authentication +- [Quick Start: WebSockets](https://docs.kalshi.com/getting_started/quick_start_websockets.md): Learn how to establish and maintain a WebSocket connection to stream real-time market data +- [Rate Limits and Tiers](https://docs.kalshi.com/getting_started/rate_limits.md): Understanding API rate limits and access tiers +- [Subpenny Pricing](https://docs.kalshi.com/getting_started/subpenny_pricing.md): Understanding Kalshi subpenny pricing. +- [Kalshi Glossary](https://docs.kalshi.com/getting_started/terms.md): Core terminology used in the Kalshi exchange +- [Introduction](https://docs.kalshi.com/welcome/index.md): Welcome to the Kalshi API documentation \ No newline at end of file diff --git a/examples/nfl_websocket_streaming.py b/examples/nfl_websocket_streaming.py deleted file mode 100644 index c53467af..00000000 --- a/examples/nfl_websocket_streaming.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 -""" -NFL WebSocket Streaming Example - -This example demonstrates how to use the Neural SDK's WebSocket functionality -to stream live NFL market data and handle real-time price updates. - -Features demonstrated: -- Creating WebSocket connections -- Subscribing to NFL markets -- Handling market data events -- Game-specific streaming -- Team-specific filtering -""" - -import asyncio -import logging -from typing import Dict, Any - -from neural_sdk import NeuralSDK - -# Setup logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -async def basic_websocket_example(): - """Basic WebSocket streaming example.""" - - logger.info("=== Basic WebSocket Example ===") - - # Initialize SDK - sdk = NeuralSDK.from_env() - - # Create WebSocket client - websocket = sdk.create_websocket() - - # Set up event handlers - @websocket.on_market_data - async def handle_market_updates(market_data: Dict[str, Any]): - """Handle real-time market data updates.""" - ticker = market_data.get('market_ticker', 'Unknown') - yes_price = market_data.get('yes_price', 0) - volume = market_data.get('volume', 0) - - logger.info(f"๐Ÿ“Š {ticker}: ${yes_price:.4f} (Volume: {volume})") - - @websocket.on_trade - async def handle_trades(trade_data: Dict[str, Any]): - """Handle trade executions.""" - ticker = trade_data.get('market_ticker', 'Unknown') - size = trade_data.get('size', 0) - price = trade_data.get('price', 0) - - logger.info(f"๐Ÿ’ฐ Trade: {ticker} - {size} contracts @ ${price:.4f}") - - @websocket.on_connection - async def handle_connection_events(event_data: Dict[str, Any]): - """Handle connection status changes.""" - status = event_data.get('status', 'unknown') - logger.info(f"๐Ÿ”Œ Connection: {status}") - - try: - # Connect to WebSocket - await websocket.connect() - - # Subscribe to NFL markets (pattern matching) - await websocket.subscribe_markets(['KXNFLGAME*']) - - # Stream for 30 seconds - logger.info("๐ŸŽฎ Streaming NFL markets for 30 seconds...") - await asyncio.sleep(30) - - # Get status - status = websocket.get_status() - logger.info(f"๐Ÿ“ˆ WebSocket Status: {status}") - - finally: - await websocket.disconnect() - - -async def nfl_game_streaming_example(): - """NFL game-specific streaming example.""" - - logger.info("=== NFL Game Streaming Example ===") - - # Initialize SDK - sdk = NeuralSDK.from_env() - - # Create NFL-specific stream - nfl_stream = sdk.create_nfl_stream() - - try: - # Connect - await nfl_stream.connect() - - # Subscribe to specific game (you'd get this from current NFL schedule) - game_id = "25SEP04DALPHI" # Eagles vs Cowboys example - await nfl_stream.subscribe_to_game(game_id) - - # Stream for 60 seconds - logger.info(f"๐Ÿˆ Streaming NFL game {game_id} for 60 seconds...") - await asyncio.sleep(60) - - # Get game summary - summary = nfl_stream.get_game_summary(game_id) - if summary: - logger.info("๐ŸŽฏ Game Summary:") - logger.info(f" Teams: {summary['away_team']} @ {summary['home_team']}") - logger.info(f" Markets: {summary['markets_count']}") - logger.info(f" Win Probability: {summary.get('win_probability', 'N/A')}") - - # Get active games - active_games = nfl_stream.get_active_games() - logger.info(f"๐Ÿ“‹ Active Games: {active_games}") - - finally: - await nfl_stream.disconnect() - - -async def team_specific_streaming_example(): - """Team-specific streaming example.""" - - logger.info("=== Team Streaming Example ===") - - # Initialize SDK - sdk = NeuralSDK.from_env() - - # Create NFL stream - nfl_stream = sdk.create_nfl_stream() - - # Track team-specific data - eagles_data = {} - - @nfl_stream.websocket.on_market_data - async def track_eagles_markets(market_data: Dict[str, Any]): - """Track Eagles-specific market data.""" - ticker = market_data.get('market_ticker', '') - - if 'PHI' in ticker.upper(): - eagles_data[ticker] = { - 'yes_price': market_data.get('yes_price'), - 'volume': market_data.get('volume', 0), - 'timestamp': market_data.get('timestamp') - } - - logger.info(f"๐Ÿฆ… Eagles Market: {ticker} = ${market_data.get('yes_price', 0):.4f}") - - try: - # Connect - await nfl_stream.connect() - - # Subscribe to Eagles markets - await nfl_stream.subscribe_to_team("PHI") - - # Stream for 45 seconds - logger.info("๐Ÿฆ… Streaming Eagles markets for 45 seconds...") - await asyncio.sleep(45) - - # Show Eagles market summary - logger.info(f"๐Ÿ“Š Eagles Markets Tracked: {len(eagles_data)}") - for ticker, data in eagles_data.items(): - logger.info(f" {ticker}: ${data['yes_price']:.4f} (Vol: {data['volume']})") - - finally: - await nfl_stream.disconnect() - - -async def sdk_integrated_streaming_example(): - """Example using SDK's integrated streaming methods.""" - - logger.info("=== SDK Integrated Streaming Example ===") - - # Initialize SDK - sdk = NeuralSDK.from_env() - - # Use SDK's built-in streaming event handlers - @sdk.on_market_data - async def handle_sdk_market_data(market_data): - """Handle market data through SDK.""" - ticker = market_data.get('market_ticker', 'Unknown') - yes_price = market_data.get('yes_price', 0) - - # Only log significant price movements - if yes_price < 0.1 or yes_price > 0.9: - logger.info(f"๐Ÿšจ Extreme Price: {ticker} = ${yes_price:.4f}") - - @sdk.on_trade - async def handle_sdk_trades(trade_data): - """Handle trades through SDK.""" - ticker = trade_data.get('market_ticker', 'Unknown') - size = trade_data.get('size', 0) - - if size > 100: # Large trades - logger.info(f"๐Ÿ‹ Large Trade: {ticker} - {size} contracts") - - try: - # Start streaming using SDK's convenience method - await sdk.start_streaming(['KXNFLGAME*']) - - # Stream for 30 seconds - logger.info("โšก Using SDK integrated streaming for 30 seconds...") - await asyncio.sleep(30) - - finally: - await sdk.stop_streaming() - - -async def price_alert_example(): - """Example with price alerts and filtering.""" - - logger.info("=== Price Alert Example ===") - - # Initialize SDK - sdk = NeuralSDK.from_env() - websocket = sdk.create_websocket() - - # Track price history for alerts - price_history = {} - alert_threshold = 0.05 # 5% price movement - - @websocket.on_market_data - async def price_alert_handler(market_data: Dict[str, Any]): - """Handle price alerts.""" - ticker = market_data.get('market_ticker', '') - yes_price = market_data.get('yes_price') - - if not yes_price or 'NFL' not in ticker.upper(): - return - - # Check for significant price movement - if ticker in price_history: - prev_price = price_history[ticker] - price_change = abs(yes_price - prev_price) - - if price_change >= alert_threshold: - direction = "๐Ÿ“ˆ UP" if yes_price > prev_price else "๐Ÿ“‰ DOWN" - logger.warning( - f"{direction} ALERT: {ticker} moved {price_change:.3f} " - f"({prev_price:.3f} โ†’ {yes_price:.3f})" - ) - - price_history[ticker] = yes_price - - try: - # Connect and subscribe - await websocket.connect() - await websocket.subscribe_markets(['KXNFLGAME*']) - - # Stream with alerts for 60 seconds - logger.info("๐Ÿšจ Monitoring for price alerts for 60 seconds...") - await asyncio.sleep(60) - - logger.info(f"๐Ÿ“Š Tracked {len(price_history)} NFL markets") - - finally: - await websocket.disconnect() - - -async def main(): - """Run all examples.""" - - print("=" * 60) - print("Neural SDK WebSocket Streaming Examples") - print("=" * 60) - - try: - # Run examples sequentially - await basic_websocket_example() - await asyncio.sleep(2) - - await nfl_game_streaming_example() - await asyncio.sleep(2) - - await team_specific_streaming_example() - await asyncio.sleep(2) - - await sdk_integrated_streaming_example() - await asyncio.sleep(2) - - await price_alert_example() - - print("\nโœ… All examples completed successfully!") - - except KeyboardInterrupt: - print("\nโน๏ธ Examples interrupted by user") - except Exception as e: - print(f"\nโŒ Error running examples: {e}") - logger.error(f"Example error: {e}", exc_info=True) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/rest_data_sources_demo.py b/examples/rest_data_sources_demo.py new file mode 100644 index 00000000..b5c26fd8 --- /dev/null +++ b/examples/rest_data_sources_demo.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +""" +REST Data Sources Demo + +Demonstrates the unified REST API data source framework with +Kalshi, ESPN, and Weather integrations. +""" + +import asyncio +import logging +from datetime import datetime +import os +from pathlib import Path + +# Add parent directory to path for imports +import sys +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from neural_sdk.data_sources.kalshi.rest_adapter import KalshiRESTAdapter +from neural_sdk.data_sources.espn.rest_adapter import ESPNRESTAdapter +from neural_sdk.data_sources.weather.rest_adapter import WeatherRESTAdapter + +# Setup logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + + +async def demo_kalshi_rest(): + """Demonstrate Kalshi REST API adapter.""" + print("\n" + "="*60) + print("๐ŸŽฒ KALSHI REST API DEMO") + print("="*60) + + try: + # Initialize Kalshi adapter + kalshi = KalshiRESTAdapter() + + # Connect + await kalshi.connect() + + # Get NFL markets + print("\n๐Ÿ“Š Fetching NFL markets...") + nfl_markets = await kalshi.get_nfl_markets() + + if "data" in nfl_markets and "markets" in nfl_markets["data"]: + markets = nfl_markets["data"]["markets"][:5] # Show first 5 + print(f"Found {len(nfl_markets['data']['markets'])} NFL markets") + + for market in markets: + print(f"\n โ€ข {market.get('title', 'Unknown')}") + print(f" Ticker: {market.get('ticker')}") + print(f" Yes Price: ${market.get('yes_bid', 0):.2f}") + print(f" Status: {market.get('status')}") + else: + print("No NFL markets found") + + # Get stats + stats = kalshi.get_stats() + print(f"\n๐Ÿ“ˆ Kalshi Stats:") + print(f" โ€ข Total Requests: {stats['total_requests']}") + print(f" โ€ข Cache Hit Rate: {stats['cache_hit_rate']:.1%}") + print(f" โ€ข Average Latency: {stats['average_latency']:.3f}s") + + # Disconnect + await kalshi.disconnect() + + except Exception as e: + logger.error(f"Kalshi demo error: {e}") + print(f"โŒ Kalshi demo failed: {e}") + + +async def demo_espn_rest(): + """Demonstrate ESPN REST API adapter.""" + print("\n" + "="*60) + print("๐Ÿˆ ESPN REST API DEMO") + print("="*60) + + try: + # Initialize ESPN adapter + espn = ESPNRESTAdapter() + + # Connect + await espn.connect() + + # Get NFL scoreboard + print("\n๐Ÿ“‹ Fetching NFL scoreboard...") + scoreboard = await espn.get_nfl_games() + + if "games" in scoreboard: + games = scoreboard["games"][:3] # Show first 3 + print(f"Found {len(scoreboard['games'])} NFL games") + + for game in games: + print(f"\n โ€ข {game.get('name', 'Unknown')}") + print(f" Status: {game.get('status')}") + + home = game.get("home_team", {}) + away = game.get("away_team", {}) + + if home and away: + print(f" {away.get('name', 'Away')}: {away.get('score', 0)}") + print(f" {home.get('name', 'Home')}: {home.get('score', 0)}") + else: + print("No games found") + + # Get stats + stats = espn.get_stats() + print(f"\n๐Ÿ“ˆ ESPN Stats:") + print(f" โ€ข Total Requests: {stats['total_requests']}") + print(f" โ€ข Cache Hit Rate: {stats['cache_hit_rate']:.1%}") + + # Disconnect + await espn.disconnect() + + except Exception as e: + logger.error(f"ESPN demo error: {e}") + print(f"โŒ ESPN demo failed: {e}") + + +async def demo_weather_rest(): + """Demonstrate Weather REST API adapter.""" + print("\n" + "="*60) + print("๐ŸŒค๏ธ WEATHER REST API DEMO") + print("="*60) + + try: + # Initialize Weather adapter + weather = WeatherRESTAdapter() + + # Check if API key is configured + if not weather.api_key: + print("โš ๏ธ No OpenWeatherMap API key configured") + print("Set OPENWEATHER_API_KEY environment variable to enable weather features") + return + + # Connect + await weather.connect() + + # Get weather for NFL stadiums + print("\n๐ŸŸ๏ธ Fetching weather for outdoor NFL stadiums...") + + # Demo with a few stadiums + stadiums = ["Lambeau Field", "Arrowhead Stadium", "Soldier Field"] + + for stadium_name in stadiums: + print(f"\n โ€ข {stadium_name}") + + result = await weather.get_nfl_stadium_weather(stadium_name) + + if "parsed" in result: + w = result["parsed"] + print(f" Team: {result.get('team', 'Unknown')}") + print(f" Temperature: {w['temperature']['actual']:.1f}ยฐF") + print(f" Wind: {w['wind']['speed']:.1f} mph") + print(f" Conditions: {w['conditions']['description']}") + + if "impact" in result: + impact = result["impact"] + print(f" Impact: {impact['summary']}") + + # Show betting considerations + if impact.get("betting_considerations", {}).get("recommendations"): + print(" Betting notes:") + for rec in impact["betting_considerations"]["recommendations"]: + print(f" - {rec}") + else: + print(f" โŒ Could not get weather data") + + # Get stats + stats = weather.get_stats() + print(f"\n๐Ÿ“ˆ Weather Stats:") + print(f" โ€ข Total Requests: {stats['total_requests']}") + print(f" โ€ข Cache Hit Rate: {stats['cache_hit_rate']:.1%}") + + # Disconnect + await weather.disconnect() + + except Exception as e: + logger.error(f"Weather demo error: {e}") + print(f"โŒ Weather demo failed: {e}") + + +async def demo_unified_data(): + """Demonstrate unified data fetching from multiple sources.""" + print("\n" + "="*60) + print("๐Ÿ”„ UNIFIED DATA FETCHING DEMO") + print("="*60) + + try: + # Initialize all adapters + kalshi = KalshiRESTAdapter() + espn = ESPNRESTAdapter() + weather = WeatherRESTAdapter() + + # Connect all + await asyncio.gather( + kalshi.connect(), + espn.connect(), + weather.connect() if weather.api_key else asyncio.sleep(0) + ) + + print("\n๐Ÿ“Š Fetching data from all sources concurrently...") + + # Fetch from all sources in parallel + results = await asyncio.gather( + kalshi.get_nfl_markets(), + espn.get_nfl_games(), + weather.get_nfl_stadium_weather("Lambeau Field") if weather.api_key else asyncio.sleep(0), + return_exceptions=True + ) + + kalshi_data, espn_data, weather_data = results + + # Process results + print("\nโœ… Data fetched from all sources:") + + if not isinstance(kalshi_data, Exception) and "data" in kalshi_data: + print(f" โ€ข Kalshi: {len(kalshi_data['data'].get('markets', []))} markets") + + if not isinstance(espn_data, Exception) and "games" in espn_data: + print(f" โ€ข ESPN: {len(espn_data['games'])} games") + + if weather.api_key and not isinstance(weather_data, Exception) and "parsed" in weather_data: + w = weather_data["parsed"] + print(f" โ€ข Weather: {w['temperature']['actual']:.1f}ยฐF at Lambeau Field") + + # Show combined stats + print("\n๐Ÿ“ˆ Combined Statistics:") + total_requests = 0 + total_cache_hits = 0 + + for adapter in [kalshi, espn, weather]: + stats = adapter.get_stats() + total_requests += stats['total_requests'] + total_cache_hits += stats['cache_hits'] + + print(f" โ€ข Total API Requests: {total_requests}") + print(f" โ€ข Total Cache Hits: {total_cache_hits}") + print(f" โ€ข Overall Cache Rate: {total_cache_hits/total_requests:.1%}" if total_requests > 0 else "N/A") + + # Disconnect all + await asyncio.gather( + kalshi.disconnect(), + espn.disconnect(), + weather.disconnect() if weather.api_key else asyncio.sleep(0) + ) + + except Exception as e: + logger.error(f"Unified demo error: {e}") + print(f"โŒ Unified demo failed: {e}") + + +async def main(): + """Run all REST data source demos.""" + print("\n" + "="*60) + print("๐Ÿš€ NEURAL SDK - REST DATA SOURCES DEMO") + print("="*60) + print("\nThis demo shows the unified REST API framework with:") + print(" โ€ข Kalshi market data") + print(" โ€ข ESPN sports data") + print(" โ€ข Weather impact analysis") + print(" โ€ข Unified data fetching") + + # Run individual demos + await demo_kalshi_rest() + await demo_espn_rest() + await demo_weather_rest() + + # Run unified demo + await demo_unified_data() + + print("\n" + "="*60) + print("โœ… REST DATA SOURCES DEMO COMPLETE") + print("="*60) + print("\nKey Features Demonstrated:") + print(" โœ“ Unified REST interface for all sources") + print(" โœ“ Automatic authentication handling") + print(" โœ“ Rate limiting and caching") + print(" โœ“ Concurrent data fetching") + print(" โœ“ Weather impact analysis for betting") + print(" โœ“ Error handling and retries") + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/neural_sdk/__init__.py b/neural_sdk/__init__.py index dc2553cf..604d5e70 100644 --- a/neural_sdk/__init__.py +++ b/neural_sdk/__init__.py @@ -54,9 +54,6 @@ def my_strategy(market_data): from .strategies import BaseStrategy, StrategySignal from .utils import setup_logging -# Import streaming functionality -from .streaming import NeuralWebSocket, NFLMarketStream, MarketStream - # Convenience imports for common use cases __all__ = [ "NeuralSDK", @@ -79,8 +76,4 @@ def my_strategy(market_data): "StrategySignal", # Utils "setup_logging", - # Streaming functionality - "NeuralWebSocket", - "NFLMarketStream", - "MarketStream", ] diff --git a/neural_sdk/core/client.py b/neural_sdk/core/client.py index 8528b2b3..cd7fc92e 100644 --- a/neural_sdk/core/client.py +++ b/neural_sdk/core/client.py @@ -1221,52 +1221,30 @@ async def health_check(self) -> Dict[str, Any]: return health - # WebSocket Methods + # Streaming Methods - def create_websocket(self): + def create_rest_stream(self, poll_interval: float = 2.0): """ - Create a WebSocket client for real-time market data streaming. + Create a REST API market streamer for real-time data. - Returns: - NeuralWebSocket: WebSocket client instance - - Example: - ```python - sdk = NeuralSDK.from_env() - websocket = sdk.create_websocket() - - @websocket.on_market_data - async def handle_price_update(market_data): - print(f"Price: {market_data['yes_price']}") - - await websocket.connect() - await websocket.subscribe_markets(['NFL-*']) - ``` - """ - from ..streaming.websocket import NeuralWebSocket - return NeuralWebSocket(self.config) - - def create_nfl_stream(self): - """ - Create an NFL-specific market stream. + Args: + poll_interval: Seconds between polls (default 2.0) Returns: - NFLMarketStream: NFL market streaming client + RESTMarketStream: REST-based market streaming client Example: ```python sdk = NeuralSDK.from_env() - nfl_stream = sdk.create_nfl_stream() - - await nfl_stream.connect() - await nfl_stream.subscribe_to_game("25SEP04DALPHI") + streamer = sdk.create_rest_stream(poll_interval=1.5) - game_summary = nfl_stream.get_game_summary("25SEP04DALPHI") - print(f"Win probability: {game_summary['win_probability']}") + # Stream markets + markets = ['KXNFLGAME-25SEP05KCLAC-KC'] + await streamer.stream_markets(markets, duration=300) ``` """ - from ..streaming.market_stream import NFLMarketStream - return NFLMarketStream(self.config) + from ..streaming.rest_stream import RESTMarketStream + return RESTMarketStream(poll_interval=poll_interval) async def start_streaming(self, markets: List[str] = None): """ diff --git a/neural_sdk/data_pipeline/__init__.py b/neural_sdk/data_pipeline/__init__.py index b005354d..dfc04853 100644 --- a/neural_sdk/data_pipeline/__init__.py +++ b/neural_sdk/data_pipeline/__init__.py @@ -1,14 +1,13 @@ """ -Kalshi WebSocket Infrastructure +Kalshi API Infrastructure -A Python library for streaming real-time market data from Kalshi prediction markets. +A Python library for accessing market data from Kalshi prediction markets. """ # Public API re-exports (aligned with current package layout) from .config.settings import KalshiConfig, get_config # type: ignore from .data_sources.kalshi.auth import KalshiAuth # type: ignore from .data_sources.kalshi.client import KalshiClient # type: ignore -from .streaming import KalshiWebSocket, MessageHandler, DefaultMessageHandler # type: ignore from .utils import setup_logging # type: ignore __version__ = "1.0.1" @@ -18,8 +17,5 @@ 'get_config', 'KalshiAuth', 'KalshiClient', - 'KalshiWebSocket', - 'MessageHandler', - 'DefaultMessageHandler', 'setup_logging' ] diff --git a/neural_sdk/data_pipeline/config/settings.py b/neural_sdk/data_pipeline/config/settings.py index 7ecb97b6..70e9d06d 100644 --- a/neural_sdk/data_pipeline/config/settings.py +++ b/neural_sdk/data_pipeline/config/settings.py @@ -1,5 +1,5 @@ """ -Kalshi WebSocket Infrastructure - Configuration Settings +Kalshi API Configuration Settings """ import os @@ -21,7 +21,7 @@ @dataclass class KalshiConfig: - """Configuration for Kalshi WebSocket infrastructure""" + """Configuration for Kalshi API""" # API Credentials api_key_id: str @@ -32,14 +32,12 @@ class KalshiConfig: # API URLs api_base_url: str - ws_url: str # Connection Settings - heartbeat_interval: int = 30 # seconds reconnect_attempts: int = 5 reconnect_delay: int = 5 # seconds - # Subscription Settings + # Request Settings max_subscriptions: int = 100 batch_size: int = 10 # for batch operations @@ -70,14 +68,11 @@ def get_config() -> KalshiConfig: # Set URLs based on environment if environment == 'prod': api_base_url = 'https://api.elections.kalshi.com/trade-api/v2/' - ws_url = 'wss://api.elections.kalshi.com/trade-api/ws/v2' else: # demo api_base_url = 'https://demo-api.kalshi.co/trade-api/v2/' - ws_url = 'wss://demo-api.kalshi.co/trade-api/ws/v2' # Override with explicit URL if provided api_base_url = os.getenv('KALSHI_API_BASE', api_base_url) - ws_url = os.getenv('KALSHI_WS_URL', ws_url) # Get credentials api_key_id = os.getenv('KALSHI_API_KEY_ID') @@ -128,8 +123,6 @@ def get_config() -> KalshiConfig: private_key=private_key, environment=environment, api_base_url=api_base_url, - ws_url=ws_url, - heartbeat_interval=int(os.getenv('KALSHI_HEARTBEAT_INTERVAL', '30')), reconnect_attempts=int(os.getenv('KALSHI_RECONNECT_ATTEMPTS', '5')), reconnect_delay=int(os.getenv('KALSHI_RECONNECT_DELAY', '5')), max_subscriptions=int(os.getenv('KALSHI_MAX_SUBSCRIPTIONS', '100')), diff --git a/neural_sdk/data_pipeline/data_sources/kalshi/auth.py b/neural_sdk/data_pipeline/data_sources/kalshi/auth.py index cf9bd944..4980537e 100644 --- a/neural_sdk/data_pipeline/data_sources/kalshi/auth.py +++ b/neural_sdk/data_pipeline/data_sources/kalshi/auth.py @@ -37,7 +37,6 @@ def __init__(self, config: Optional[KalshiConfig] = None): environment = os.getenv("KALSHI_ENVIRONMENT", "prod") api_base_url = "https://api.elections.kalshi.com/trade-api/v2" if environment == "prod" else "https://demo-api.kalshi.co/trade-api/v2" - ws_url = "wss://api.elections.kalshi.com/trade-api/ws/v2" if environment == "prod" else "wss://demo-api.kalshi.co/trade-api/ws/v2" # Load private key from file private_key_file = os.getenv("KALSHI_PRIVATE_KEY_FILE", "./keys/kalshi_prod_private.key") @@ -55,8 +54,7 @@ def __init__(self, config: Optional[KalshiConfig] = None): api_key_id=os.getenv("KALSHI_API_KEY_ID"), private_key=private_key, environment=environment, - api_base_url=api_base_url, - ws_url=ws_url + api_base_url=api_base_url ) self.config = config @@ -142,8 +140,8 @@ def get_websocket_headers(self) -> Dict[str, str]: Returns: Dictionary of WebSocket authentication headers """ - # WebSocket connections use GET method - return self.get_auth_headers('GET', '/trade-api/ws/v2') + # WebSocket connections use GET method with root path + return self.get_auth_headers('GET', '/') def validate_timestamp(self, timestamp: str, max_age_seconds: int = 30) -> bool: """ diff --git a/neural_sdk/data_pipeline/data_sources/kalshi/cfb_discovery.py b/neural_sdk/data_pipeline/data_sources/kalshi/cfb_discovery.py new file mode 100644 index 00000000..9124747a --- /dev/null +++ b/neural_sdk/data_pipeline/data_sources/kalshi/cfb_discovery.py @@ -0,0 +1,307 @@ +""" +College Football Market Discovery Service +Discovers college football markets using the Kalshi Events and Markets API +""" + +import logging +from typing import List, Dict, Any, Optional +from datetime import datetime, timedelta, date + +from .client import KalshiClient + +logger = logging.getLogger(__name__) + + +class CFBMarketDiscovery: + """Service for discovering College Football markets through the Events โ†’ Markets hierarchy""" + + # Major conferences + CONFERENCES = { + 'SEC': ['Alabama', 'Georgia', 'LSU', 'Florida', 'Tennessee', 'Auburn', 'Texas A&M', + 'Mississippi', 'South Carolina', 'Arkansas', 'Kentucky', 'Missouri', 'Vanderbilt'], + 'Big Ten': ['Ohio State', 'Michigan', 'Penn State', 'Wisconsin', 'Iowa', 'Minnesota', + 'Michigan State', 'Indiana', 'Northwestern', 'Illinois', 'Purdue', 'Nebraska', + 'Maryland', 'Rutgers'], + 'ACC': ['Clemson', 'Florida State', 'Miami', 'North Carolina', 'NC State', 'Virginia Tech', + 'Virginia', 'Louisville', 'Pittsburgh', 'Syracuse', 'Boston College', 'Wake Forest', + 'Duke', 'Georgia Tech'], + 'Big 12': ['Oklahoma', 'Texas', 'Oklahoma State', 'Baylor', 'TCU', 'Kansas State', + 'Iowa State', 'West Virginia', 'Kansas', 'Texas Tech'], + 'Pac-12': ['USC', 'UCLA', 'Oregon', 'Washington', 'Stanford', 'California', 'Utah', + 'Arizona State', 'Arizona', 'Colorado', 'Washington State', 'Oregon State'], + 'Independent': ['Notre Dame', 'BYU', 'Army', 'Navy', 'Air Force'] + } + + def __init__(self, client: Optional[KalshiClient] = None): + """ + Initialize CFB market discovery service + + Args: + client: Optional KalshiClient instance + """ + self.client = client or KalshiClient() + self.cfb_game_series = "KXNCAAFGAME" + self.cfb_championship_series = "KXNCAAF" + + def get_all_cfb_events(self, status: str = "open") -> List[Dict[str, Any]]: + """ + Get all College Football events + + Args: + status: Event status filter (open, closed, settled) + + Returns: + List of CFB events + """ + try: + logger.info(f"Fetching CFB events with status: {status}") + + all_events = [] + cursor = None + + while True: + params = { + 'limit': 200, + 'status': status, + 'series_ticker': self.cfb_game_series, + 'with_nested_markets': True + } + + if cursor: + params['cursor'] = cursor + + response = self.client.get('/events', params=params) + events = response.get('events', []) + all_events.extend(events) + + cursor = response.get('cursor') + if not cursor or len(events) < 200: + break + + logger.info(f"Found {len(all_events)} CFB events") + return all_events + + except Exception as e: + logger.error(f"Error fetching CFB events: {e}") + return [] + + def get_events_by_date(self, target_date: Optional[date] = None, status: str = "open") -> List[Dict[str, Any]]: + """ + Get CFB events for a specific date + + Args: + target_date: Date to filter events (None for today) + status: Event status filter + + Returns: + List of events for the specified date + """ + if target_date is None: + target_date = datetime.now().date() + + # Get all events + all_events = self.get_all_cfb_events(status) + + # Filter by date + date_events = [] + next_day = target_date + timedelta(days=1) + + for event in all_events: + # Check expected expiration time + exp_time_str = event.get('expected_expiration_time') + if exp_time_str: + try: + exp_time = datetime.fromisoformat(exp_time_str.replace('Z', '+00:00')) + event_date = exp_time.date() + + # Games typically expire shortly after they end + # So check if the expiration is on target date or next day + if target_date <= event_date <= next_day: + date_events.append(event) + except Exception as e: + logger.debug(f"Error parsing date for event: {e}") + + logger.info(f"Found {len(date_events)} events for {target_date}") + return date_events + + def get_team_events(self, team_name: str, status: str = "open") -> List[Dict[str, Any]]: + """ + Get all events for a specific team + + Args: + team_name: Team name to search for + status: Event status filter + + Returns: + List of events involving the team + """ + all_events = self.get_all_cfb_events(status) + + team_events = [] + team_upper = team_name.upper() + + for event in all_events: + title = event.get('title', '') + + # Check if team is in the title + if team_upper in title.upper(): + team_events.append(event) + logger.info(f"Found team event: {event.get('ticker')} - {title}") + + logger.info(f"Found {len(team_events)} events for team {team_name}") + return team_events + + def get_conference_events(self, conference: str, status: str = "open") -> List[Dict[str, Any]]: + """ + Get all events for teams in a specific conference + + Args: + conference: Conference name (SEC, Big Ten, ACC, etc.) + status: Event status filter + + Returns: + List of events for conference teams + """ + if conference not in self.CONFERENCES: + logger.warning(f"Unknown conference: {conference}") + return [] + + conference_teams = self.CONFERENCES[conference] + all_events = self.get_all_cfb_events(status) + + conference_events = [] + + for event in all_events: + title = event.get('title', '').upper() + + # Check if any conference team is in the title + for team in conference_teams: + if team.upper() in title: + conference_events.append(event) + break + + logger.info(f"Found {len(conference_events)} events for {conference} conference") + return conference_events + + def get_game_markets(self, home_team: str, away_team: str, status: str = "open") -> List[str]: + """ + Get market tickers for a specific game + + Args: + home_team: Home team name or abbreviation + away_team: Away team name or abbreviation + status: Market status filter + + Returns: + List of market tickers for the game + """ + logger.info(f"Getting markets for {away_team} @ {home_team}") + + # Get all events + all_events = self.get_all_cfb_events(status) + + # Find matching game + home_upper = home_team.upper() + away_upper = away_team.upper() + + for event in all_events: + title = event.get('title', '').upper() + + # Check if both teams are in the title + if home_upper in title and away_upper in title: + # Extract market tickers from nested markets + markets = event.get('markets', []) + tickers = [m.get('ticker') for m in markets if m.get('ticker')] + + logger.info(f"Found {len(tickers)} markets for {away_team} @ {home_team}") + return tickers + + logger.info(f"No markets found for {away_team} @ {home_team}") + return [] + + def get_events_with_markets(self, status: str = "open") -> Dict[str, List[str]]: + """ + Get all events with their associated market tickers + + Args: + status: Event status filter + + Returns: + Dictionary mapping event titles to market tickers + """ + events = self.get_all_cfb_events(status) + + event_markets = {} + + for event in events: + title = event.get('title', 'Unknown') + markets = event.get('markets', []) + + if markets: + tickers = [m.get('ticker') for m in markets if m.get('ticker')] + event_markets[title] = tickers + + return event_markets + + def get_championship_markets(self) -> List[Dict[str, Any]]: + """ + Get College Football Championship/Playoff markets + + Returns: + List of championship markets + """ + try: + logger.info("Fetching CFB Championship markets") + + params = { + 'series_ticker': self.cfb_championship_series, + 'status': 'open', + 'limit': 200 + } + + response = self.client.get('/markets', params=params) + markets = response.get('markets', []) + + logger.info(f"Found {len(markets)} championship markets") + return markets + + except Exception as e: + logger.error(f"Error fetching championship markets: {e}") + return [] + + def format_game_info(self, event: Dict[str, Any]) -> Dict[str, Any]: + """ + Format event information for display + + Args: + event: Event data + + Returns: + Formatted game information + """ + markets = event.get('markets', []) + + # Extract team names from markets + teams = set() + for market in markets: + if market.get('yes_sub_title'): + teams.add(market['yes_sub_title']) + + # Parse date + exp_time_str = event.get('expected_expiration_time') + game_date = None + if exp_time_str: + try: + exp_time = datetime.fromisoformat(exp_time_str.replace('Z', '+00:00')) + game_date = exp_time.date() + except: + pass + + return { + 'title': event.get('title', 'Unknown Game'), + 'ticker': event.get('ticker'), + 'teams': list(teams), + 'date': game_date, + 'market_count': len(markets), + 'markets': [m.get('ticker') for m in markets if m.get('ticker')] + } \ No newline at end of file diff --git a/neural_sdk/data_pipeline/data_sources/kalshi/client.py b/neural_sdk/data_pipeline/data_sources/kalshi/client.py index 1cf9966b..b4ad6a55 100644 --- a/neural_sdk/data_pipeline/data_sources/kalshi/client.py +++ b/neural_sdk/data_pipeline/data_sources/kalshi/client.py @@ -42,20 +42,25 @@ def __init__(self, config: Optional[KalshiConfig] = None, auth: Optional[KalshiA environment = os.getenv("KALSHI_ENVIRONMENT", "prod") # FORCE PRODUCTION ENDPOINTS - NO DEMO ALLOWED api_base_url = "https://api.elections.kalshi.com/trade-api/v2" - ws_url = "wss://api.elections.kalshi.com/trade-api/ws/v2" # Log which endpoint we're using print(f"๐Ÿญ Kalshi Client: Forcing production endpoints") print(f"๐Ÿ”— API: {api_base_url}") - print(f"๐Ÿ”Œ WebSocket: {ws_url}") print(f"๐ŸŒ Environment variable: {environment}") + # Load private key from file if not in environment + private_key = os.getenv("KALSHI_PRIVATE_KEY") + if not private_key: + private_key_file = os.getenv("KALSHI_PRIVATE_KEY_FILE") + if private_key_file: + with open(private_key_file, 'r') as f: + private_key = f.read() + config = KalshiConfig( api_key_id=os.getenv("KALSHI_API_KEY_ID"), - private_key=os.getenv("KALSHI_PRIVATE_KEY"), + private_key=private_key, environment=environment, - api_base_url=api_base_url, - ws_url=ws_url + api_base_url=api_base_url ) self.config = config @@ -225,7 +230,9 @@ def get_events( self, limit: int = 100, cursor: Optional[str] = None, - status: Optional[str] = None + status: Optional[str] = None, + series_ticker: Optional[str] = None, + with_nested_markets: bool = False ) -> Dict[str, Any]: """ Get events from the API @@ -234,6 +241,8 @@ def get_events( limit: Number of events to retrieve cursor: Pagination cursor status: Filter by event status + series_ticker: Filter by series ticker + with_nested_markets: Include nested market data in response Returns: Events response @@ -243,9 +252,36 @@ def get_events( params['cursor'] = cursor if status: params['status'] = status + if series_ticker: + params['series_ticker'] = series_ticker + if with_nested_markets: + params['with_nested_markets'] = True return self.get('/events', params=params) + def get_events_with_markets( + self, + series_ticker: Optional[str] = None, + status: str = 'open' + ) -> List[Dict[str, Any]]: + """ + Get events with their nested markets + + Args: + series_ticker: Filter by series ticker (e.g., 'KXNFLGAME') + status: Event status filter + + Returns: + List of events with nested markets + """ + response = self.get_events( + limit=200, + status=status, + series_ticker=series_ticker, + with_nested_markets=True + ) + return response.get('events', []) + def get_series( self, limit: int = 100, diff --git a/neural_sdk/data_pipeline/data_sources/kalshi/nfl_discovery.py b/neural_sdk/data_pipeline/data_sources/kalshi/nfl_discovery.py new file mode 100644 index 00000000..43817df9 --- /dev/null +++ b/neural_sdk/data_pipeline/data_sources/kalshi/nfl_discovery.py @@ -0,0 +1,260 @@ +""" +NFL Market Discovery Service +Discovers actual NFL markets using the Kalshi Events and Markets API +""" + +import logging +from typing import List, Dict, Any, Optional +from datetime import datetime + +from .client import KalshiClient + +logger = logging.getLogger(__name__) + + +class NFLMarketDiscovery: + """Service for discovering NFL markets through the Events โ†’ Markets hierarchy""" + + def __init__(self, client: Optional[KalshiClient] = None): + """ + Initialize NFL market discovery service + + Args: + client: Optional KalshiClient instance + """ + self.client = client or KalshiClient() + self.nfl_series_ticker = "KXNFLGAME" + + def get_all_nfl_events(self, status: str = "open") -> List[Dict[str, Any]]: + """ + Get all NFL events + + Args: + status: Event status filter (open, closed, settled) + + Returns: + List of NFL events + """ + try: + logger.info(f"Fetching NFL events with status: {status}") + + all_events = [] + cursor = None + + while True: + params = { + 'limit': 200, + 'status': status, + 'series_ticker': self.nfl_series_ticker, + 'with_nested_markets': True # Include markets in response + } + + if cursor: + params['cursor'] = cursor + + response = self.client.get('/events', params=params) + events = response.get('events', []) + all_events.extend(events) + + cursor = response.get('cursor') + if not cursor: + break + + logger.info(f"Found {len(all_events)} NFL events") + return all_events + + except Exception as e: + logger.error(f"Failed to get NFL events: {e}") + return [] + + def get_team_game_events(self, team_code: str, status: str = "open") -> List[Dict[str, Any]]: + """ + Get events for a specific NFL team + + Args: + team_code: Team code (e.g., 'KC', 'LAC', 'PHI') + status: Event status filter + + Returns: + List of events involving the team + """ + all_events = self.get_all_nfl_events(status) + team_events = [] + + team_code_upper = team_code.upper() + + for event in all_events: + event_ticker = event.get('event_ticker', '') + title = event.get('title', '') + + # Check if team is in event ticker or title + if team_code_upper in event_ticker.upper() or team_code_upper in title.upper(): + team_events.append(event) + logger.info(f"Found team event: {event_ticker} - {title}") + + logger.info(f"Found {len(team_events)} events for team {team_code}") + return team_events + + def get_event_markets(self, event_ticker: str) -> List[str]: + """ + Get all market tickers for a specific event + + Args: + event_ticker: Event ticker to get markets for + + Returns: + List of market tickers + """ + try: + logger.info(f"Fetching markets for event: {event_ticker}") + + response = self.client.get_markets( + event_ticker=event_ticker, + status='open', + limit=200 + ) + + markets = response.get('markets', []) + market_tickers = [market.get('ticker') for market in markets if market.get('ticker')] + + logger.info(f"Found {len(market_tickers)} markets for event {event_ticker}") + for ticker in market_tickers[:5]: # Log first 5 + logger.debug(f" - {ticker}") + + return market_tickers + + except Exception as e: + logger.error(f"Failed to get markets for event {event_ticker}: {e}") + return [] + + def get_team_markets(self, team_code: str, status: str = "open") -> List[str]: + """ + Complete flow to get all market tickers for a team + + Args: + team_code: Team code (e.g., 'KC', 'LAC') + status: Status filter for events/markets + + Returns: + List of exact market tickers + """ + logger.info(f"Getting all markets for team: {team_code}") + + # Step 1: Get events for the team + team_events = self.get_team_game_events(team_code, status) + + if not team_events: + logger.warning(f"No events found for team {team_code}") + return [] + + # Step 2: Get markets for each event + all_market_tickers = [] + + for event in team_events: + event_ticker = event.get('event_ticker') + + # Check if markets are already included (with_nested_markets=True) + if 'markets' in event: + markets = event.get('markets', []) + tickers = [m.get('ticker') for m in markets if m.get('ticker')] + all_market_tickers.extend(tickers) + logger.info(f"Event {event_ticker} has {len(tickers)} nested markets") + else: + # Fetch markets separately + tickers = self.get_event_markets(event_ticker) + all_market_tickers.extend(tickers) + + # Remove duplicates + unique_tickers = list(set(all_market_tickers)) + + logger.info(f"Found {len(unique_tickers)} unique markets for team {team_code}") + return unique_tickers + + def get_game_markets(self, home_team: str, away_team: str, status: str = "open") -> List[str]: + """ + Get markets for a specific game between two teams + + Args: + home_team: Home team code + away_team: Away team code + status: Market status filter + + Returns: + List of market tickers for the game + """ + logger.info(f"Getting markets for {away_team} @ {home_team}") + + # Get markets for both teams + home_markets = set(self.get_team_markets(home_team, status)) + away_markets = set(self.get_team_markets(away_team, status)) + + # Find intersection (markets that involve both teams) + game_markets = list(home_markets & away_markets) + + if not game_markets: + logger.warning(f"No specific game markets found for {away_team} @ {home_team}") + # Return union instead if no intersection + game_markets = list(home_markets | away_markets) + + logger.info(f"Found {len(game_markets)} markets for {away_team} @ {home_team}") + return game_markets + + def discover_all_nfl_markets(self, status: str = "open") -> Dict[str, List[str]]: + """ + Discover all NFL markets organized by event + + Args: + status: Market status filter + + Returns: + Dictionary mapping event_ticker to list of market tickers + """ + logger.info("Discovering all NFL markets...") + + events = self.get_all_nfl_events(status) + markets_by_event = {} + + for event in events: + event_ticker = event.get('event_ticker') + + if 'markets' in event: + # Markets included in response + markets = event.get('markets', []) + tickers = [m.get('ticker') for m in markets if m.get('ticker')] + else: + # Fetch markets separately + tickers = self.get_event_markets(event_ticker) + + if tickers: + markets_by_event[event_ticker] = tickers + + total_markets = sum(len(tickers) for tickers in markets_by_event.values()) + logger.info(f"Discovered {total_markets} total markets across {len(markets_by_event)} events") + + return markets_by_event + + def search_markets_by_pattern(self, pattern: str) -> List[str]: + """ + Search for markets containing a pattern (for backwards compatibility) + Note: This fetches all markets first, so it's less efficient than team-specific methods + + Args: + pattern: Pattern to search for in market tickers + + Returns: + List of matching market tickers + """ + logger.info(f"Searching for markets matching pattern: {pattern}") + + all_markets = self.discover_all_nfl_markets() + matching_tickers = [] + + pattern_upper = pattern.upper() + + for event_ticker, market_tickers in all_markets.items(): + for ticker in market_tickers: + if pattern_upper in ticker.upper(): + matching_tickers.append(ticker) + + logger.info(f"Found {len(matching_tickers)} markets matching pattern {pattern}") + return matching_tickers \ No newline at end of file diff --git a/neural_sdk/data_pipeline/sports_config.py b/neural_sdk/data_pipeline/sports_config.py index c42a623a..577a6710 100644 --- a/neural_sdk/data_pipeline/sports_config.py +++ b/neural_sdk/data_pipeline/sports_config.py @@ -4,13 +4,14 @@ """ from dataclasses import dataclass -from typing import Dict, List +from typing import Dict, List, Optional from enum import Enum class Sport(Enum): """Supported sports""" NFL = "nfl" + NCAAF = "ncaaf" CFP = "cfp" @@ -22,6 +23,7 @@ class SportConfig: series_ticker: str season_active: bool = True market_types: List[str] = None + championship_ticker: Optional[str] = None def __post_init__(self): if self.market_types is None: @@ -35,7 +37,17 @@ def __post_init__(self): display_name="NFL", series_ticker="KXNFLGAME", season_active=True, - market_types=["game_winner", "spread", "total", "player_props"] + market_types=["game_winner", "spread", "total", "player_props"], + championship_ticker="KXSB" # Super Bowl + ), + + Sport.NCAAF: SportConfig( + name="ncaaf", + display_name="NCAA Football", + series_ticker="KXNCAAFGAME", # Individual games + season_active=True, + market_types=["game_winner", "spread", "total"], + championship_ticker="KXNCAAF" # Championship winner ), Sport.CFP: SportConfig( diff --git a/neural_sdk/data_pipeline/streaming/__init__.py b/neural_sdk/data_pipeline/streaming/__init__.py deleted file mode 100644 index f11cb00c..00000000 --- a/neural_sdk/data_pipeline/streaming/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Kalshi WebSocket Infrastructure - Streaming Module -""" - -from .websocket import KalshiWebSocket -from .handlers import MessageHandler, DefaultMessageHandler - -__all__ = ['KalshiWebSocket', 'MessageHandler', 'DefaultMessageHandler'] \ No newline at end of file diff --git a/neural_sdk/data_pipeline/streaming/handlers.py b/neural_sdk/data_pipeline/streaming/handlers.py deleted file mode 100644 index 16ae2c7d..00000000 --- a/neural_sdk/data_pipeline/streaming/handlers.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Kalshi WebSocket Infrastructure - Message Handlers -Process different types of WebSocket messages -""" - -import logging -from abc import ABC, abstractmethod -from typing import Dict, Any, Optional, Callable -from datetime import datetime - -logger = logging.getLogger(__name__) - - -class MessageHandler(ABC): - """Abstract base class for WebSocket message handlers""" - - @abstractmethod - async def handle_message(self, message: Dict[str, Any]) -> None: - """ - Handle a WebSocket message - - Args: - message: Parsed JSON message from WebSocket - """ - pass - - -class DefaultMessageHandler(MessageHandler): - """Default message handler that logs and processes standard message types""" - - def __init__(self): - """Initialize the default message handler""" - self.ticker_callback: Optional[Callable] = None - self.trade_callback: Optional[Callable] = None - self.orderbook_callback: Optional[Callable] = None - self.error_callback: Optional[Callable] = None - - # Statistics - self.message_count = 0 - self.last_message_time = None - - async def handle_message(self, message: Dict[str, Any]) -> None: - """ - Handle incoming WebSocket message - - Args: - message: Parsed JSON message - """ - self.message_count += 1 - self.last_message_time = datetime.now() - - # Get message type - msg_type = message.get('type') - - if msg_type == 'ticker': - await self._handle_ticker(message) - elif msg_type == 'trade': - await self._handle_trade(message) - elif msg_type == 'orderbook_delta': - await self._handle_orderbook(message) - elif msg_type == 'subscribed': - await self._handle_subscribed(message) - elif msg_type == 'error': - await self._handle_error(message) - else: - logger.debug(f"Received message type: {msg_type}") - - async def _handle_ticker(self, message: Dict[str, Any]) -> None: - """Handle ticker update message""" - data = message.get('msg', {}) - market_ticker = data.get('market_ticker') - - # Convert prices from centi-cents to dollars - price_data = { - 'market_ticker': market_ticker, - 'yes_price': self._convert_price(data.get('yes_ask')), - 'no_price': self._convert_price(data.get('no_ask')), - 'yes_bid': self._convert_price(data.get('yes_bid')), - 'no_bid': self._convert_price(data.get('no_bid')), - 'volume': data.get('volume'), - 'open_interest': data.get('open_interest'), - 'timestamp': datetime.now().isoformat() - } - - logger.info(f"Ticker update for {market_ticker}: Yes=${price_data['yes_price']:.4f}, No=${price_data['no_price']:.4f}") - - if self.ticker_callback: - await self.ticker_callback(price_data) - - async def _handle_trade(self, message: Dict[str, Any]) -> None: - """Handle trade execution message""" - data = message.get('msg', {}) - market_ticker = data.get('market_ticker') - - trade_data = { - 'market_ticker': market_ticker, - 'trade_id': data.get('trade_id'), - 'price': self._convert_price(data.get('yes_price') or data.get('no_price')), - 'size': data.get('count'), - 'side': data.get('taker_side'), - 'timestamp': data.get('ts_millis') - } - - logger.info(f"Trade on {market_ticker}: {trade_data['size']} @ ${trade_data['price']:.4f}") - - if self.trade_callback: - await self.trade_callback(trade_data) - - async def _handle_orderbook(self, message: Dict[str, Any]) -> None: - """Handle orderbook update message""" - data = message.get('msg', {}) - market_ticker = data.get('market_ticker') - - orderbook_data = { - 'market_ticker': market_ticker, - 'yes_bids': self._convert_orderbook_levels(data.get('yes_bids', [])), - 'yes_asks': self._convert_orderbook_levels(data.get('yes_asks', [])), - 'no_bids': self._convert_orderbook_levels(data.get('no_bids', [])), - 'no_asks': self._convert_orderbook_levels(data.get('no_asks', [])), - 'timestamp': datetime.now().isoformat() - } - - logger.debug(f"Orderbook update for {market_ticker}") - - if self.orderbook_callback: - await self.orderbook_callback(orderbook_data) - - async def _handle_subscribed(self, message: Dict[str, Any]) -> None: - """Handle subscription confirmation""" - data = message.get('msg', {}) - markets = data.get('markets', []) - channels = data.get('channels', []) - - logger.info(f"Subscription confirmed for {len(markets)} markets on channels: {channels}") - - async def _handle_error(self, message: Dict[str, Any]) -> None: - """Handle error message""" - error = message.get('error', {}) - code = error.get('code') - msg = error.get('message') - - logger.error(f"WebSocket error {code}: {msg}") - - if self.error_callback: - await self.error_callback(error) - - def _convert_price(self, centi_cents: Optional[int]) -> Optional[float]: - """Convert price from centi-cents to dollars""" - if centi_cents is None: - return None - return round(centi_cents / 10000, 4) - - def _convert_orderbook_levels(self, levels: list) -> list: - """Convert orderbook price levels from centi-cents""" - converted = [] - for level in levels: - converted.append([ - self._convert_price(level[0]), # price - level[1] # quantity - ]) - return converted - - def set_ticker_callback(self, callback: Callable) -> None: - """Set callback for ticker updates""" - self.ticker_callback = callback - - def set_trade_callback(self, callback: Callable) -> None: - """Set callback for trade updates""" - self.trade_callback = callback - - def set_orderbook_callback(self, callback: Callable) -> None: - """Set callback for orderbook updates""" - self.orderbook_callback = callback - - def set_error_callback(self, callback: Callable) -> None: - """Set callback for errors""" - self.error_callback = callback - - def get_stats(self) -> Dict[str, Any]: - """Get handler statistics""" - return { - 'message_count': self.message_count, - 'last_message_time': self.last_message_time.isoformat() if self.last_message_time else None - } \ No newline at end of file diff --git a/neural_sdk/data_pipeline/streaming/websocket.py b/neural_sdk/data_pipeline/streaming/websocket.py deleted file mode 100644 index fd7aa15f..00000000 --- a/neural_sdk/data_pipeline/streaming/websocket.py +++ /dev/null @@ -1,411 +0,0 @@ -""" -Kalshi WebSocket Infrastructure - WebSocket Client -Handles real-time market data streaming from Kalshi -With circuit breaker protection, flow control, and automatic reconnection -""" - -import asyncio -import json -import logging -import time -from typing import List, Optional, Dict, Any - -import websockets -from websockets.client import WebSocketClientProtocol - -from ..config import KalshiConfig -from ..data_sources.kalshi.auth import KalshiAuth -from .handlers import MessageHandler, DefaultMessageHandler -from ..reliability.circuit_breaker import CircuitBreaker, CircuitBreakerConfig, CircuitOpenException -from ..reliability.flow_controller import CreditBasedFlowController -from ..reliability.rate_limiter import TokenBucket - -logger = logging.getLogger(__name__) - - -class KalshiWebSocket: - """WebSocket client for Kalshi market data streaming""" - - def __init__( - self, - config: Optional[KalshiConfig] = None, - auth: Optional[KalshiAuth] = None, - message_handler: Optional[MessageHandler] = None, - enable_circuit_breaker: bool = True, - auto_reconnect: bool = True, - enable_flow_control: bool = True - ): - """ - Initialize WebSocket client - - Args: - config: Optional KalshiConfig instance - auth: Optional KalshiAuth instance - message_handler: Optional custom message handler - enable_circuit_breaker: Enable circuit breaker protection - auto_reconnect: Enable automatic reconnection - """ - if config is None: - from ..config import get_config - config = get_config() - - self.config = config - self.auth = auth or KalshiAuth(config) - self.message_handler = message_handler or DefaultMessageHandler() - - self.ws: Optional[WebSocketClientProtocol] = None - self.subscribed_markets: set = set() - self.running = False - self._heartbeat_task = None - self._receive_task = None - - # Circuit breaker configuration - self.enable_circuit_breaker = enable_circuit_breaker - self.auto_reconnect = auto_reconnect - - if enable_circuit_breaker: - breaker_config = CircuitBreakerConfig( - name="kalshi_websocket", - failure_threshold=3, # 3 failures trigger open - success_threshold=2, # 2 successes to close - timeout=30.0, - half_open_interval=10.0, - window_size=60 - ) - self.circuit_breaker = CircuitBreaker(breaker_config) - else: - self.circuit_breaker = None - - # Connection stats - self.connection_attempts = 0 - self.last_connection_time: Optional[float] = None - self.reconnect_task: Optional[asyncio.Task] = None - - # Flow control and rate limiting - self.enable_flow_control = enable_flow_control - if enable_flow_control: - self.flow_controller = CreditBasedFlowController( - initial_credits=1000, - max_credits=5000, - refill_rate=100, # 100 credits/second - window_size=500 - ) - # Set flow control callbacks - self.flow_controller.on_pause = self._handle_flow_pause - self.flow_controller.on_resume = self._handle_flow_resume - else: - self.flow_controller = None - - # Subscription rate limiter (10 subscriptions per second max) - self.subscription_limiter = TokenBucket( - capacity=10, - refill_rate=1.0, # 1 subscription per second - burst_size=10 - ) - - async def connect(self) -> None: - """Connect to Kalshi WebSocket with circuit breaker protection""" - if self.enable_circuit_breaker: - try: - await self.circuit_breaker.call(self._connect_internal) - except CircuitOpenException: - logger.warning("Circuit breaker is open, connection attempt blocked") - if self.auto_reconnect: - await self._schedule_reconnect() - raise - else: - await self._connect_internal() - - async def _connect_internal(self) -> None: - """Internal connection logic""" - try: - self.connection_attempts += 1 - start_time = time.time() - - # Get authentication headers - headers = self.auth.get_websocket_headers() - - # Connect with authentication - # Use additional_headers instead of extra_headers for compatibility - try: - self.ws = await websockets.connect( - self.config.ws_url, - additional_headers=headers - ) - except TypeError: - # Fallback if additional_headers is not supported - self.ws = await websockets.connect( - self.config.ws_url - ) - - self.running = True - self.last_connection_time = time.time() - connection_time = self.last_connection_time - start_time - - logger.info( - f"Connected to Kalshi WebSocket at {self.config.ws_url} " - f"(attempt {self.connection_attempts}, {connection_time:.2f}s)" - ) - - # Reset connection attempts on success - self.connection_attempts = 0 - - # Re-subscribe to markets if we had subscriptions - if self.subscribed_markets: - markets = list(self.subscribed_markets) - self.subscribed_markets.clear() - await self.subscribe_markets(markets) - - # Start heartbeat and receive tasks - self._heartbeat_task = asyncio.create_task(self._heartbeat()) - self._receive_task = asyncio.create_task(self._receive_messages()) - - except Exception as e: - logger.error(f"Failed to connect to WebSocket: {e}") - - # Schedule reconnect if enabled - if self.auto_reconnect: - await self._schedule_reconnect() - - raise - - async def disconnect(self) -> None: - """Disconnect from WebSocket""" - self.running = False - - # Cancel tasks - if self._heartbeat_task: - self._heartbeat_task.cancel() - if self._receive_task: - self._receive_task.cancel() - - # Close WebSocket - if self.ws: - await self.ws.close() - self.ws = None - - logger.info("Disconnected from Kalshi WebSocket") - - async def subscribe_markets(self, tickers: List[str]) -> None: - """ - Subscribe to market tickers for real-time updates - - Args: - tickers: List of market tickers to subscribe to - """ - if not self.ws: - raise RuntimeError("WebSocket not connected") - - # Apply subscription rate limiting - for ticker in tickers: - if not self.subscription_limiter.try_consume(1): - # Wait for rate limit - await self.subscription_limiter.consume(1, timeout=5.0) - - # Build subscription message - message = { - "id": 1, - "cmd": "subscribe", - "params": { - "channels": ["ticker", "orderbook_delta", "trade"], - "market_tickers": tickers - } - } - - # Send subscription - await self.ws.send(json.dumps(message)) - - # Track subscriptions - self.subscribed_markets.update(tickers) - - logger.info(f"Subscribed to markets: {tickers}") - - async def unsubscribe_markets(self, tickers: List[str]) -> None: - """ - Unsubscribe from market tickers - - Args: - tickers: List of market tickers to unsubscribe from - """ - if not self.ws: - raise RuntimeError("WebSocket not connected") - - # Build unsubscribe message - message = { - "id": 2, - "cmd": "unsubscribe", - "params": { - "channels": ["ticker", "orderbook_delta", "trade"], - "market_tickers": tickers - } - } - - # Send unsubscribe - await self.ws.send(json.dumps(message)) - - # Update tracked subscriptions - for ticker in tickers: - self.subscribed_markets.discard(ticker) - - logger.info(f"Unsubscribed from markets: {tickers}") - - async def _heartbeat(self) -> None: - """Send periodic heartbeat to keep connection alive""" - while self.running: - try: - await asyncio.sleep(self.config.heartbeat_interval) - - if self.ws: - # Send ping - pong_waiter = await self.ws.ping() - await asyncio.wait_for(pong_waiter, timeout=10) - logger.debug("Heartbeat sent and pong received") - - # Record successful heartbeat if circuit breaker enabled - if self.circuit_breaker: - self.circuit_breaker.metrics.successful_calls += 1 - - except asyncio.CancelledError: - break - except Exception as e: - logger.warning(f"Heartbeat failed: {e}") - - # Record failure if circuit breaker enabled - if self.circuit_breaker: - await self.circuit_breaker._on_failure() - - # Connection might be lost, trigger reconnect - if self.running: - if self.auto_reconnect: - await self._schedule_reconnect() - else: - await self._reconnect() - - async def _receive_messages(self) -> None: - """Receive and process messages from WebSocket""" - while self.running: - try: - if not self.ws: - await asyncio.sleep(1) - continue - - # Receive message - message = await self.ws.recv() - - # Apply flow control if enabled - if self.flow_controller: - # Check if we have credits to process - if not self.flow_controller.consume_credits(1): - logger.warning("Message dropped due to flow control") - continue - - # Track processing start time - start_time = time.time() - - # Parse JSON - data = json.loads(message) - - # Process message through handler - await self.message_handler.handle_message(data) - - # Update flow control metrics - if self.flow_controller: - processing_time_ms = (time.time() - start_time) * 1000 - self.flow_controller.release_credits(1) - self.flow_controller.update_metrics(processing_time_ms) - - except asyncio.CancelledError: - break - except websockets.exceptions.ConnectionClosed: - logger.warning("WebSocket connection closed") - if self.running: - await self._reconnect() - except json.JSONDecodeError as e: - logger.error(f"Failed to parse message: {e}") - except Exception as e: - logger.error(f"Error receiving message: {e}") - - async def _reconnect(self) -> None: - """Reconnect to WebSocket with exponential backoff""" - if not self.running: - return - - for attempt in range(self.config.reconnect_attempts): - try: - logger.info(f"Reconnection attempt {attempt + 1}/{self.config.reconnect_attempts}") - - # Disconnect cleanly - if self.ws: - await self.ws.close() - self.ws = None - - # Wait with exponential backoff - delay = self.config.reconnect_delay * (2 ** attempt) - await asyncio.sleep(delay) - - # Reconnect - await self.connect() - - # Resubscribe to markets - if self.subscribed_markets: - await self.subscribe_markets(list(self.subscribed_markets)) - - logger.info("Reconnection successful") - return - - except Exception as e: - logger.error(f"Reconnection attempt {attempt + 1} failed: {e}") - - logger.error("All reconnection attempts failed") - self.running = False - - async def _schedule_reconnect(self): - """Schedule reconnection with exponential backoff""" - if self.reconnect_task and not self.reconnect_task.done(): - return # Already scheduled - - self.reconnect_task = asyncio.create_task(self._reconnect()) - - async def run_forever(self) -> None: - """Run the WebSocket client until stopped""" - try: - await self.connect() - - # Wait until stopped - while self.running: - await asyncio.sleep(1) - - finally: - await self.disconnect() - - async def _handle_flow_pause(self): - """Handle flow control pause event""" - logger.warning("Flow control PAUSED - too many messages") - # Could send a pause message to server if protocol supports it - - async def _handle_flow_resume(self): - """Handle flow control resume event""" - logger.info("Flow control RESUMED") - # Could send a resume message to server if protocol supports it - - def get_flow_stats(self) -> Optional[Dict[str, Any]]: - """Get flow control statistics""" - if self.flow_controller: - return self.flow_controller.get_stats() - return None - - def get_rate_limit_stats(self) -> Dict[str, Any]: - """Get rate limiter statistics""" - return self.subscription_limiter.get_stats() - - def convert_price(self, centi_cents: int) -> float: - """ - Convert price from centi-cents to dollars - - Args: - centi_cents: Price in centi-cents (1/10000 of a dollar) - - Returns: - Price in dollars - """ - return round(centi_cents / 10000, self.config.price_precision) diff --git a/neural_sdk/data_sources/__init__.py b/neural_sdk/data_sources/__init__.py new file mode 100644 index 00000000..730b0c17 --- /dev/null +++ b/neural_sdk/data_sources/__init__.py @@ -0,0 +1,43 @@ +""" +Neural SDK Data Sources + +Unified data source framework for REST APIs and WebSocket streams. +""" + +# Base infrastructure +from .base import ( + RESTDataSource, + AuthStrategy, + APIKeyAuth, + BearerTokenAuth, + RSASignatureAuth, + NoAuth, + RateLimiter, + ResponseCache +) + +# REST Adapters +from .kalshi.rest_adapter import KalshiRESTAdapter +from .espn.rest_adapter import ESPNRESTAdapter +from .weather import WeatherRESTAdapter, WeatherData, WeatherImpact + +__all__ = [ + # Base classes + 'RESTDataSource', + 'AuthStrategy', + 'APIKeyAuth', + 'BearerTokenAuth', + 'RSASignatureAuth', + 'NoAuth', + 'RateLimiter', + 'ResponseCache', + + # REST Adapters + 'KalshiRESTAdapter', + 'ESPNRESTAdapter', + 'WeatherRESTAdapter', + + # Data models + 'WeatherData', + 'WeatherImpact' +] \ No newline at end of file diff --git a/neural_sdk/data_sources/base/__init__.py b/neural_sdk/data_sources/base/__init__.py new file mode 100644 index 00000000..f7f2e380 --- /dev/null +++ b/neural_sdk/data_sources/base/__init__.py @@ -0,0 +1,28 @@ +""" +Base Data Source Infrastructure + +Provides abstract base classes and utilities for building +data source integrations with both REST API and WebSocket support. +""" + +from .rest_source import RESTDataSource +from .auth_strategies import ( + AuthStrategy, + APIKeyAuth, + BearerTokenAuth, + RSASignatureAuth, + NoAuth +) +from .rate_limiter import RateLimiter +from .cache import ResponseCache + +__all__ = [ + 'RESTDataSource', + 'AuthStrategy', + 'APIKeyAuth', + 'BearerTokenAuth', + 'RSASignatureAuth', + 'NoAuth', + 'RateLimiter', + 'ResponseCache' +] \ No newline at end of file diff --git a/neural_sdk/data_sources/base/auth_strategies.py b/neural_sdk/data_sources/base/auth_strategies.py new file mode 100644 index 00000000..dac51214 --- /dev/null +++ b/neural_sdk/data_sources/base/auth_strategies.py @@ -0,0 +1,275 @@ +""" +Authentication Strategies for REST Data Sources + +Provides various authentication methods for different APIs. +""" + +from abc import ABC, abstractmethod +from typing import Dict, Optional +import base64 +import hashlib +import hmac +from datetime import datetime +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import padding + + +class AuthStrategy(ABC): + """Abstract base class for authentication strategies.""" + + @abstractmethod + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """ + Get authentication headers for request. + + Args: + method: HTTP method + path: Request path + + Returns: + Dictionary of headers + """ + pass + + +class NoAuth(AuthStrategy): + """No authentication required.""" + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Return empty headers for no auth.""" + return {} + + +class APIKeyAuth(AuthStrategy): + """API key authentication in header or query parameter.""" + + def __init__(self, api_key: str, header_name: str = "X-API-Key", in_header: bool = True): + """ + Initialize API key authentication. + + Args: + api_key: The API key + header_name: Name of the header field + in_header: If True, put in header; if False, put in query params + """ + self.api_key = api_key + self.header_name = header_name + self.in_header = in_header + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Get API key headers.""" + if self.in_header: + return {self.header_name: self.api_key} + return {} + + def get_params(self) -> Dict[str, str]: + """Get API key as query parameter.""" + if not self.in_header: + return {"api_key": self.api_key} + return {} + + +class BearerTokenAuth(AuthStrategy): + """Bearer token authentication (OAuth 2.0 style).""" + + def __init__(self, token: str, prefix: str = "Bearer"): + """ + Initialize bearer token authentication. + + Args: + token: The bearer token + prefix: Token prefix (usually "Bearer") + """ + self.token = token + self.prefix = prefix + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Get bearer token headers.""" + return {"Authorization": f"{self.prefix} {self.token}"} + + +class BasicAuth(AuthStrategy): + """HTTP Basic authentication.""" + + def __init__(self, username: str, password: str): + """ + Initialize basic authentication. + + Args: + username: Username + password: Password + """ + self.username = username + self.password = password + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Get basic auth headers.""" + credentials = f"{self.username}:{self.password}" + encoded = base64.b64encode(credentials.encode()).decode() + return {"Authorization": f"Basic {encoded}"} + + +class HMACAuth(AuthStrategy): + """HMAC-based authentication.""" + + def __init__(self, api_key: str, secret_key: str): + """ + Initialize HMAC authentication. + + Args: + api_key: API key + secret_key: Secret key for HMAC + """ + self.api_key = api_key + self.secret_key = secret_key + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Get HMAC auth headers.""" + timestamp = str(int(datetime.utcnow().timestamp())) + message = f"{method}{path}{timestamp}" + + signature = hmac.new( + self.secret_key.encode(), + message.encode(), + hashlib.sha256 + ).hexdigest() + + return { + "X-API-Key": self.api_key, + "X-Timestamp": timestamp, + "X-Signature": signature + } + + +class RSASignatureAuth(AuthStrategy): + """ + RSA-PSS signature authentication (Kalshi-style). + + This is specifically designed for APIs that require + RSA-PSS signatures like Kalshi's API. + """ + + def __init__(self, api_key_id: str, private_key_str: str): + """ + Initialize RSA signature authentication. + + Args: + api_key_id: API key identifier + private_key_str: PEM-encoded private key string + """ + self.api_key_id = api_key_id + + # Load private key + self.private_key = serialization.load_pem_private_key( + private_key_str.encode(), + password=None + ) + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """ + Get RSA signature headers. + + This follows Kalshi's signature scheme: + 1. Create message from timestamp + method + path + 2. Sign with RSA-PSS + 3. Include in headers + """ + timestamp_ms = str(int(datetime.utcnow().timestamp() * 1000)) + message = f"{timestamp_ms}{method}{path}" + + # Sign message with RSA-PSS + signature = self.private_key.sign( + message.encode(), + padding.PSS( + mgf=padding.MGF1(hashes.SHA256()), + salt_length=padding.PSS.MAX_LENGTH + ), + hashes.SHA256() + ) + + # Encode signature as base64 + signature_b64 = base64.b64encode(signature).decode() + + return { + "KALSHI-ACCESS-KEY": self.api_key_id, + "KALSHI-ACCESS-SIGNATURE": signature_b64, + "KALSHI-ACCESS-TIMESTAMP": timestamp_ms + } + + +class OAuth2Auth(AuthStrategy): + """ + OAuth 2.0 authentication with automatic token refresh. + """ + + def __init__( + self, + client_id: str, + client_secret: str, + token_url: str, + scope: Optional[str] = None + ): + """ + Initialize OAuth 2.0 authentication. + + Args: + client_id: OAuth client ID + client_secret: OAuth client secret + token_url: Token endpoint URL + scope: Optional scope string + """ + self.client_id = client_id + self.client_secret = client_secret + self.token_url = token_url + self.scope = scope + self.access_token = None + self.token_expiry = None + + async def refresh_token(self): + """Refresh the access token.""" + import httpx + + async with httpx.AsyncClient() as client: + data = { + "grant_type": "client_credentials", + "client_id": self.client_id, + "client_secret": self.client_secret + } + + if self.scope: + data["scope"] = self.scope + + response = await client.post(self.token_url, data=data) + response.raise_for_status() + + token_data = response.json() + self.access_token = token_data["access_token"] + + # Calculate expiry time + expires_in = token_data.get("expires_in", 3600) + self.token_expiry = datetime.utcnow() + timedelta(seconds=expires_in - 60) + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Get OAuth 2.0 headers with automatic refresh.""" + # Refresh token if needed + if not self.access_token or datetime.utcnow() >= self.token_expiry: + await self.refresh_token() + + return {"Authorization": f"Bearer {self.access_token}"} + + +class CustomHeaderAuth(AuthStrategy): + """Custom header-based authentication.""" + + def __init__(self, headers: Dict[str, str]): + """ + Initialize custom header authentication. + + Args: + headers: Dictionary of custom headers + """ + self.headers = headers + + async def get_headers(self, method: str = "GET", path: str = "/") -> Dict[str, str]: + """Return custom headers.""" + return self.headers.copy() \ No newline at end of file diff --git a/neural_sdk/data_sources/base/cache.py b/neural_sdk/data_sources/base/cache.py new file mode 100644 index 00000000..c693cd03 --- /dev/null +++ b/neural_sdk/data_sources/base/cache.py @@ -0,0 +1,339 @@ +""" +Response Caching for REST Data Sources + +Provides caching functionality to reduce API calls and improve performance. +""" + +import time +import hashlib +import json +from typing import Any, Optional, Dict +from collections import OrderedDict +import logging + +logger = logging.getLogger(__name__) + + +class ResponseCache: + """ + LRU cache for API responses with TTL support. + + Features: + - Least Recently Used (LRU) eviction + - Time-to-live (TTL) for entries + - Size limits + - Statistics tracking + """ + + def __init__( + self, + ttl: int = 60, + max_size: int = 1000, + name: str = "ResponseCache" + ): + """ + Initialize response cache. + + Args: + ttl: Time-to-live in seconds + max_size: Maximum number of entries + name: Name for logging + """ + self.ttl = ttl + self.max_size = max_size + self.name = name + + # Use OrderedDict for LRU behavior + self.cache: OrderedDict[str, Dict[str, Any]] = OrderedDict() + + # Statistics + self.stats = { + 'hits': 0, + 'misses': 0, + 'evictions': 0, + 'expired': 0 + } + + def _is_expired(self, entry: Dict[str, Any]) -> bool: + """Check if cache entry is expired.""" + if self.ttl <= 0: + return False + + age = time.time() - entry['timestamp'] + return age > self.ttl + + def get(self, key: str) -> Optional[Any]: + """ + Get value from cache. + + Args: + key: Cache key + + Returns: + Cached value or None if not found/expired + """ + if key not in self.cache: + self.stats['misses'] += 1 + return None + + entry = self.cache[key] + + # Check expiration + if self._is_expired(entry): + self.stats['expired'] += 1 + self.stats['misses'] += 1 + del self.cache[key] + return None + + # Move to end (most recently used) + self.cache.move_to_end(key) + self.stats['hits'] += 1 + + return entry['value'] + + def set(self, key: str, value: Any) -> None: + """ + Set value in cache. + + Args: + key: Cache key + value: Value to cache + """ + # Remove if already exists (to update position) + if key in self.cache: + del self.cache[key] + + # Add to end (most recently used) + self.cache[key] = { + 'value': value, + 'timestamp': time.time() + } + + # Evict oldest if over size limit + while len(self.cache) > self.max_size: + oldest = next(iter(self.cache)) + del self.cache[oldest] + self.stats['evictions'] += 1 + + def delete(self, key: str) -> bool: + """ + Delete entry from cache. + + Args: + key: Cache key + + Returns: + True if deleted, False if not found + """ + if key in self.cache: + del self.cache[key] + return True + return False + + def clear(self) -> None: + """Clear all cache entries.""" + self.cache.clear() + + def cleanup_expired(self) -> int: + """ + Remove all expired entries. + + Returns: + Number of entries removed + """ + if self.ttl <= 0: + return 0 + + expired_keys = [ + key for key, entry in self.cache.items() + if self._is_expired(entry) + ] + + for key in expired_keys: + del self.cache[key] + self.stats['expired'] += 1 + + return len(expired_keys) + + def get_stats(self) -> Dict: + """Get cache statistics.""" + total_requests = self.stats['hits'] + self.stats['misses'] + hit_rate = ( + self.stats['hits'] / total_requests + if total_requests > 0 else 0 + ) + + return { + 'name': self.name, + 'size': len(self.cache), + 'max_size': self.max_size, + 'ttl': self.ttl, + 'hits': self.stats['hits'], + 'misses': self.stats['misses'], + 'hit_rate': hit_rate, + 'evictions': self.stats['evictions'], + 'expired': self.stats['expired'] + } + + def __len__(self) -> int: + """Get number of cached entries.""" + return len(self.cache) + + def __contains__(self, key: str) -> bool: + """Check if key exists and is not expired.""" + if key not in self.cache: + return False + + if self._is_expired(self.cache[key]): + del self.cache[key] + return False + + return True + + +class MultiLevelCache: + """ + Multi-level cache with memory and optional persistent storage. + + Provides a two-level cache system: + - L1: Fast in-memory cache + - L2: Optional persistent cache (Redis, disk, etc.) + """ + + def __init__( + self, + memory_ttl: int = 60, + memory_size: int = 1000, + persistent_cache: Optional[Any] = None, + name: str = "MultiLevelCache" + ): + """ + Initialize multi-level cache. + + Args: + memory_ttl: TTL for memory cache + memory_size: Max size for memory cache + persistent_cache: Optional persistent cache backend + name: Name for logging + """ + self.memory_cache = ResponseCache( + ttl=memory_ttl, + max_size=memory_size, + name=f"{name}_L1" + ) + self.persistent_cache = persistent_cache + self.name = name + + async def get(self, key: str) -> Optional[Any]: + """ + Get value from cache (checks both levels). + + Args: + key: Cache key + + Returns: + Cached value or None + """ + # Check L1 (memory) + value = self.memory_cache.get(key) + if value is not None: + return value + + # Check L2 (persistent) if available + if self.persistent_cache: + try: + value = await self.persistent_cache.get(key) + if value is not None: + # Promote to L1 + self.memory_cache.set(key, value) + return value + except Exception as e: + logger.error(f"{self.name}: Error reading from L2 cache: {e}") + + return None + + async def set(self, key: str, value: Any) -> None: + """ + Set value in cache (both levels). + + Args: + key: Cache key + value: Value to cache + """ + # Set in L1 (memory) + self.memory_cache.set(key, value) + + # Set in L2 (persistent) if available + if self.persistent_cache: + try: + await self.persistent_cache.set(key, value) + except Exception as e: + logger.error(f"{self.name}: Error writing to L2 cache: {e}") + + async def delete(self, key: str) -> bool: + """ + Delete from both cache levels. + + Args: + key: Cache key + + Returns: + True if deleted from at least one level + """ + deleted_l1 = self.memory_cache.delete(key) + deleted_l2 = False + + if self.persistent_cache: + try: + deleted_l2 = await self.persistent_cache.delete(key) + except Exception as e: + logger.error(f"{self.name}: Error deleting from L2 cache: {e}") + + return deleted_l1 or deleted_l2 + + def get_stats(self) -> Dict: + """Get statistics for both cache levels.""" + stats = { + 'name': self.name, + 'L1': self.memory_cache.get_stats() + } + + if self.persistent_cache and hasattr(self.persistent_cache, 'get_stats'): + stats['L2'] = self.persistent_cache.get_stats() + + return stats + + +def make_cache_key(*args, **kwargs) -> str: + """ + Generate a cache key from arguments. + + Args: + *args: Positional arguments + **kwargs: Keyword arguments + + Returns: + Cache key string + """ + # Create a unique string from all arguments + key_parts = [] + + # Add positional arguments + for arg in args: + if isinstance(arg, (dict, list)): + key_parts.append(json.dumps(arg, sort_keys=True)) + else: + key_parts.append(str(arg)) + + # Add keyword arguments + for k, v in sorted(kwargs.items()): + if isinstance(v, (dict, list)): + key_parts.append(f"{k}={json.dumps(v, sort_keys=True)}") + else: + key_parts.append(f"{k}={v}") + + # Create hash of the key for consistent length + key_str = "|".join(key_parts) + key_hash = hashlib.md5(key_str.encode()).hexdigest() + + return key_hash \ No newline at end of file diff --git a/neural_sdk/data_sources/base/rate_limiter.py b/neural_sdk/data_sources/base/rate_limiter.py new file mode 100644 index 00000000..db365ff8 --- /dev/null +++ b/neural_sdk/data_sources/base/rate_limiter.py @@ -0,0 +1,338 @@ +""" +Rate Limiting for REST Data Sources + +Provides rate limiting functionality to prevent API throttling. +""" + +import asyncio +import time +from typing import Optional, Dict +from collections import deque +from datetime import datetime, timedelta +import logging + +logger = logging.getLogger(__name__) + + +class RateLimiter: + """ + Token bucket rate limiter for API requests. + + Implements a token bucket algorithm that allows bursts + while maintaining an average rate limit. + """ + + def __init__( + self, + requests_per_second: float = 10, + burst_size: Optional[int] = None, + name: str = "RateLimiter" + ): + """ + Initialize rate limiter. + + Args: + requests_per_second: Maximum average requests per second + burst_size: Maximum burst size (defaults to requests_per_second) + name: Name for logging + """ + self.requests_per_second = requests_per_second + self.burst_size = burst_size or int(requests_per_second) + self.name = name + + # Token bucket + self.tokens = float(self.burst_size) + self.max_tokens = float(self.burst_size) + self.refill_rate = requests_per_second + self.last_refill = time.monotonic() + + # Lock for thread safety + self.lock = asyncio.Lock() + + # Statistics + self.stats = { + 'requests': 0, + 'throttled': 0, + 'total_wait_time': 0 + } + + def _refill_tokens(self): + """Refill tokens based on elapsed time.""" + now = time.monotonic() + elapsed = now - self.last_refill + + # Add tokens based on elapsed time + tokens_to_add = elapsed * self.refill_rate + self.tokens = min(self.tokens + tokens_to_add, self.max_tokens) + self.last_refill = now + + async def acquire(self, tokens: float = 1.0) -> float: + """ + Acquire tokens, waiting if necessary. + + Args: + tokens: Number of tokens to acquire + + Returns: + Time waited in seconds + """ + async with self.lock: + start_time = time.monotonic() + self.stats['requests'] += 1 + + # Refill tokens + self._refill_tokens() + + # Wait if not enough tokens + wait_time = 0 + if self.tokens < tokens: + # Calculate wait time + tokens_needed = tokens - self.tokens + wait_time = tokens_needed / self.refill_rate + + logger.debug(f"{self.name}: Rate limited, waiting {wait_time:.2f}s") + self.stats['throttled'] += 1 + self.stats['total_wait_time'] += wait_time + + await asyncio.sleep(wait_time) + + # Refill again after waiting + self._refill_tokens() + + # Consume tokens + self.tokens -= tokens + + actual_wait = time.monotonic() - start_time + return actual_wait + + async def try_acquire(self, tokens: float = 1.0) -> bool: + """ + Try to acquire tokens without waiting. + + Args: + tokens: Number of tokens to acquire + + Returns: + True if acquired, False if would need to wait + """ + async with self.lock: + self._refill_tokens() + + if self.tokens >= tokens: + self.tokens -= tokens + self.stats['requests'] += 1 + return True + + return False + + def get_stats(self) -> Dict: + """Get rate limiter statistics.""" + return { + 'name': self.name, + 'requests': self.stats['requests'], + 'throttled': self.stats['throttled'], + 'throttle_rate': ( + self.stats['throttled'] / self.stats['requests'] + if self.stats['requests'] > 0 else 0 + ), + 'total_wait_time': self.stats['total_wait_time'], + 'average_wait_time': ( + self.stats['total_wait_time'] / self.stats['throttled'] + if self.stats['throttled'] > 0 else 0 + ), + 'current_tokens': self.tokens, + 'max_tokens': self.max_tokens + } + + def reset(self): + """Reset the rate limiter to full capacity.""" + self.tokens = self.max_tokens + self.last_refill = time.monotonic() + + +class HierarchicalRateLimiter: + """ + Hierarchical rate limiter for multiple tiers of limits. + + Useful for APIs with multiple rate limits (e.g., per second, per minute, per hour). + """ + + def __init__(self, name: str = "HierarchicalRateLimiter"): + """ + Initialize hierarchical rate limiter. + + Args: + name: Name for logging + """ + self.name = name + self.limiters: Dict[str, RateLimiter] = {} + + def add_limit( + self, + tier: str, + requests: int, + period_seconds: float, + burst_size: Optional[int] = None + ): + """ + Add a rate limit tier. + + Args: + tier: Name of the tier (e.g., "second", "minute", "hour") + requests: Number of requests allowed + period_seconds: Period in seconds + burst_size: Optional burst size + """ + rate = requests / period_seconds + self.limiters[tier] = RateLimiter( + requests_per_second=rate, + burst_size=burst_size, + name=f"{self.name}_{tier}" + ) + + async def acquire(self, tokens: float = 1.0) -> float: + """ + Acquire tokens from all tiers. + + Args: + tokens: Number of tokens to acquire + + Returns: + Total time waited + """ + total_wait = 0 + + # Acquire from all limiters + for tier, limiter in self.limiters.items(): + wait_time = await limiter.acquire(tokens) + total_wait = max(total_wait, wait_time) + + return total_wait + + async def try_acquire(self, tokens: float = 1.0) -> bool: + """ + Try to acquire tokens from all tiers without waiting. + + Args: + tokens: Number of tokens to acquire + + Returns: + True if acquired from all tiers, False otherwise + """ + # Check all limiters first + for limiter in self.limiters.values(): + async with limiter.lock: + limiter._refill_tokens() + if limiter.tokens < tokens: + return False + + # If all have tokens, acquire from all + for limiter in self.limiters.values(): + await limiter.try_acquire(tokens) + + return True + + def get_stats(self) -> Dict: + """Get statistics for all tiers.""" + return { + tier: limiter.get_stats() + for tier, limiter in self.limiters.items() + } + + +class AdaptiveRateLimiter(RateLimiter): + """ + Adaptive rate limiter that adjusts based on API responses. + + Automatically reduces rate when encountering 429 errors + and gradually increases back to normal. + """ + + def __init__( + self, + initial_rate: float = 10, + min_rate: float = 1, + max_rate: float = 100, + name: str = "AdaptiveRateLimiter" + ): + """ + Initialize adaptive rate limiter. + + Args: + initial_rate: Initial requests per second + min_rate: Minimum requests per second + max_rate: Maximum requests per second + name: Name for logging + """ + super().__init__(requests_per_second=initial_rate, name=name) + + self.min_rate = min_rate + self.max_rate = max_rate + self.initial_rate = initial_rate + + # Adaptation parameters + self.backoff_factor = 0.5 # Reduce rate by 50% on error + self.recovery_factor = 1.1 # Increase rate by 10% on success + self.success_streak = 0 + self.recovery_threshold = 10 # Successes before increasing rate + + async def on_success(self): + """Called on successful request.""" + self.success_streak += 1 + + # Gradually increase rate after consistent success + if self.success_streak >= self.recovery_threshold: + new_rate = min( + self.requests_per_second * self.recovery_factor, + self.max_rate + ) + + if new_rate > self.requests_per_second: + logger.info( + f"{self.name}: Increasing rate from " + f"{self.requests_per_second:.1f} to {new_rate:.1f} rps" + ) + self.requests_per_second = new_rate + self.refill_rate = new_rate + self.success_streak = 0 + + async def on_rate_limit(self, retry_after: Optional[float] = None): + """ + Called when rate limited by the API. + + Args: + retry_after: Optional retry-after header value in seconds + """ + self.success_streak = 0 + + if retry_after: + # Use retry-after to calculate new rate + new_rate = max(1.0 / retry_after, self.min_rate) + else: + # Reduce rate by backoff factor + new_rate = max( + self.requests_per_second * self.backoff_factor, + self.min_rate + ) + + if new_rate < self.requests_per_second: + logger.warning( + f"{self.name}: Reducing rate from " + f"{self.requests_per_second:.1f} to {new_rate:.1f} rps" + ) + self.requests_per_second = new_rate + self.refill_rate = new_rate + + # Also reduce current tokens to prevent burst + self.tokens = min(self.tokens, new_rate) + self.max_tokens = new_rate + self.burst_size = int(new_rate) + + def reset_to_initial(self): + """Reset rate to initial value.""" + self.requests_per_second = self.initial_rate + self.refill_rate = self.initial_rate + self.burst_size = int(self.initial_rate) + self.max_tokens = float(self.burst_size) + self.tokens = float(self.burst_size) + self.success_streak = 0 \ No newline at end of file diff --git a/neural_sdk/data_sources/base/rest_source.py b/neural_sdk/data_sources/base/rest_source.py new file mode 100644 index 00000000..661e1b78 --- /dev/null +++ b/neural_sdk/data_sources/base/rest_source.py @@ -0,0 +1,356 @@ +""" +REST Data Source Base Class + +Abstract base class for all REST API data sources in Neural SDK. +Provides common functionality for authentication, rate limiting, +caching, and error handling. +""" + +from abc import ABC, abstractmethod +from typing import Dict, Any, Optional, List, Union +import asyncio +import logging +from datetime import datetime, timedelta +import httpx +from tenacity import ( + retry, + stop_after_attempt, + wait_exponential, + retry_if_exception_type +) + +from .rate_limiter import RateLimiter +from .cache import ResponseCache +from .auth_strategies import AuthStrategy, NoAuth + +logger = logging.getLogger(__name__) + + +class RESTDataSource(ABC): + """ + Abstract base class for REST API data sources. + + Provides: + - Automatic authentication + - Rate limiting + - Response caching + - Retry logic with exponential backoff + - Error handling + - Response transformation + """ + + def __init__( + self, + base_url: str, + name: str = None, + auth_strategy: AuthStrategy = None, + timeout: int = 30, + cache_ttl: int = 60, + rate_limit: int = 10, # requests per second + max_retries: int = 3 + ): + """ + Initialize REST data source. + + Args: + base_url: Base URL for the API + name: Name of the data source + auth_strategy: Authentication strategy to use + timeout: Request timeout in seconds + cache_ttl: Cache time-to-live in seconds + rate_limit: Maximum requests per second + max_retries: Maximum number of retry attempts + """ + self.base_url = base_url.rstrip('/') + self.name = name or self.__class__.__name__ + self.auth_strategy = auth_strategy or NoAuth() + self.timeout = timeout + self.max_retries = max_retries + + # Initialize components + self.cache = ResponseCache(ttl=cache_ttl) + self.rate_limiter = RateLimiter(requests_per_second=rate_limit) + self.session: Optional[httpx.AsyncClient] = None + + # Statistics + self.stats = { + 'requests': 0, + 'cache_hits': 0, + 'errors': 0, + 'total_latency': 0 + } + + async def __aenter__(self): + """Async context manager entry.""" + await self.connect() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Async context manager exit.""" + await self.disconnect() + + async def connect(self): + """Initialize HTTP session.""" + if not self.session: + self.session = httpx.AsyncClient( + base_url=self.base_url, + timeout=self.timeout + ) + logger.info(f"{self.name}: Connected to {self.base_url}") + + async def disconnect(self): + """Close HTTP session.""" + if self.session: + await self.session.aclose() + self.session = None + logger.info(f"{self.name}: Disconnected") + + @abstractmethod + async def transform_response(self, data: Any, endpoint: str) -> Dict: + """ + Transform API response to standardized format. + + Args: + data: Raw response data + endpoint: The endpoint that was called + + Returns: + Standardized response dictionary + """ + pass + + @abstractmethod + async def validate_response(self, response: httpx.Response) -> bool: + """ + Validate that the response is successful. + + Args: + response: HTTP response object + + Returns: + True if response is valid, False otherwise + """ + pass + + def _get_cache_key(self, method: str, endpoint: str, params: Dict = None) -> str: + """Generate cache key for request.""" + param_str = "&".join(f"{k}={v}" for k, v in sorted((params or {}).items())) + return f"{self.name}:{method}:{endpoint}:{param_str}" + + @retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=2, max=10), + retry=retry_if_exception_type((httpx.TimeoutException, httpx.NetworkError)) + ) + async def _make_request( + self, + method: str, + endpoint: str, + params: Optional[Dict] = None, + json_data: Optional[Dict] = None, + use_cache: bool = True + ) -> Any: + """ + Make HTTP request with retry logic. + + Args: + method: HTTP method + endpoint: API endpoint + params: Query parameters + json_data: JSON body data + use_cache: Whether to use caching + + Returns: + Response data + """ + # Check cache for GET requests + cache_key = self._get_cache_key(method, endpoint, params) + if use_cache and method == "GET": + cached = self.cache.get(cache_key) + if cached is not None: + self.stats['cache_hits'] += 1 + logger.debug(f"{self.name}: Cache hit for {endpoint}") + return cached + + # Apply rate limiting + await self.rate_limiter.acquire() + + # Get authentication headers + auth_headers = await self.auth_strategy.get_headers(method, endpoint) + + # Make request + start_time = datetime.utcnow() + self.stats['requests'] += 1 + + try: + response = await self.session.request( + method=method, + url=endpoint, + params=params, + json=json_data, + headers=auth_headers + ) + + # Calculate latency + latency = (datetime.utcnow() - start_time).total_seconds() + self.stats['total_latency'] += latency + + # Validate response + if not await self.validate_response(response): + raise ValueError(f"Invalid response from {endpoint}: {response.status_code}") + + # Parse response + data = response.json() if response.content else {} + + # Cache successful GET responses + if use_cache and method == "GET": + self.cache.set(cache_key, data) + + return data + + except Exception as e: + self.stats['errors'] += 1 + logger.error(f"{self.name}: Error fetching {endpoint}: {e}") + raise + + async def fetch( + self, + endpoint: str, + params: Optional[Dict] = None, + use_cache: bool = True, + transform: bool = True + ) -> Dict: + """ + Fetch data from REST endpoint. + + Args: + endpoint: API endpoint (relative to base_url) + params: Query parameters + use_cache: Whether to use caching + transform: Whether to transform response + + Returns: + Transformed response data + """ + if not self.session: + await self.connect() + + # Ensure endpoint starts with / + if not endpoint.startswith('/'): + endpoint = '/' + endpoint + + # Make request + data = await self._make_request( + method="GET", + endpoint=endpoint, + params=params, + use_cache=use_cache + ) + + # Transform response if requested + if transform: + return await self.transform_response(data, endpoint) + + return data + + async def post( + self, + endpoint: str, + json_data: Optional[Dict] = None, + params: Optional[Dict] = None, + transform: bool = True + ) -> Dict: + """ + Post data to REST endpoint. + + Args: + endpoint: API endpoint + json_data: JSON body data + params: Query parameters + transform: Whether to transform response + + Returns: + Response data + """ + if not self.session: + await self.connect() + + if not endpoint.startswith('/'): + endpoint = '/' + endpoint + + data = await self._make_request( + method="POST", + endpoint=endpoint, + params=params, + json_data=json_data, + use_cache=False + ) + + if transform: + return await self.transform_response(data, endpoint) + + return data + + async def batch_fetch( + self, + requests: List[Dict[str, Any]], + max_concurrent: int = 5 + ) -> List[Dict]: + """ + Fetch multiple endpoints concurrently. + + Args: + requests: List of request configurations + max_concurrent: Maximum concurrent requests + + Returns: + List of responses + """ + semaphore = asyncio.Semaphore(max_concurrent) + + async def fetch_with_semaphore(request): + async with semaphore: + return await self.fetch( + endpoint=request['endpoint'], + params=request.get('params'), + use_cache=request.get('use_cache', True) + ) + + tasks = [fetch_with_semaphore(req) for req in requests] + return await asyncio.gather(*tasks, return_exceptions=True) + + def get_stats(self) -> Dict: + """Get data source statistics.""" + avg_latency = ( + self.stats['total_latency'] / self.stats['requests'] + if self.stats['requests'] > 0 else 0 + ) + + cache_hit_rate = ( + self.stats['cache_hits'] / self.stats['requests'] + if self.stats['requests'] > 0 else 0 + ) + + return { + 'name': self.name, + 'total_requests': self.stats['requests'], + 'cache_hits': self.stats['cache_hits'], + 'cache_hit_rate': cache_hit_rate, + 'errors': self.stats['errors'], + 'average_latency': avg_latency + } + + async def health_check(self) -> bool: + """ + Check if the data source is healthy. + + Returns: + True if healthy, False otherwise + """ + try: + # Make a simple request to check connectivity + await self.fetch("/", use_cache=False) + return True + except Exception as e: + logger.error(f"{self.name}: Health check failed: {e}") + return False \ No newline at end of file diff --git a/neural_sdk/data_sources/espn/rest_adapter.py b/neural_sdk/data_sources/espn/rest_adapter.py new file mode 100644 index 00000000..3fc8c258 --- /dev/null +++ b/neural_sdk/data_sources/espn/rest_adapter.py @@ -0,0 +1,406 @@ +""" +ESPN REST API Adapter + +Provides unified interface for ESPN sports data. +""" + +from typing import Dict, Any, Optional, List +from datetime import datetime, date +import logging + +from ..base.rest_source import RESTDataSource +from ..base.auth_strategies import NoAuth +from neural_sdk.data_pipeline.data_sources.espn.client import ESPNClient +from neural_sdk.data_pipeline.data_sources.espn.processor import PlayByPlayProcessor + +logger = logging.getLogger(__name__) + + +class ESPNRESTAdapter(RESTDataSource): + """ + REST adapter for ESPN API. + + Provides sports data including scores, stats, play-by-play, + and team information through a unified interface. + """ + + # Sport path mappings + SPORT_PATHS = { + "nfl": "football/nfl", + "nba": "basketball/nba", + "cfb": "football/college-football", + "college-football": "football/college-football", + "mlb": "baseball/mlb", + "nhl": "hockey/nhl" + } + + def __init__(self): + """Initialize ESPN REST adapter.""" + # ESPN doesn't require authentication + super().__init__( + base_url="http://site.api.espn.com/apis/site/v2/sports", + name="ESPNREST", + auth_strategy=NoAuth(), + timeout=30, + cache_ttl=30, # Cache for 30 seconds + rate_limit=10, # ESPN is less strict on rate limits + max_retries=3 + ) + + # Use existing processor for play-by-play + self.processor = PlayByPlayProcessor() + + logger.info("ESPN REST adapter initialized") + + async def validate_response(self, response) -> bool: + """ + Validate ESPN API response. + + Args: + response: HTTP response object + + Returns: + True if valid, False otherwise + """ + if response.status_code == 200: + return True + + if response.status_code == 404: + logger.warning("ESPN resource not found") + elif response.status_code == 429: + logger.warning("ESPN rate limit exceeded") + elif response.status_code >= 500: + logger.error(f"ESPN server error: {response.status_code}") + + return False + + async def transform_response(self, data: Any, endpoint: str) -> Dict: + """ + Transform ESPN response to standardized format. + + Args: + data: Raw ESPN response + endpoint: The endpoint that was called + + Returns: + Standardized response + """ + return { + "source": "espn", + "endpoint": endpoint, + "data": data, + "timestamp": datetime.utcnow().isoformat(), + "metadata": { + "sport": self._extract_sport_from_endpoint(endpoint) + } + } + + def _extract_sport_from_endpoint(self, endpoint: str) -> Optional[str]: + """Extract sport from endpoint path.""" + for sport, path in self.SPORT_PATHS.items(): + if path in endpoint: + return sport + return None + + def _get_sport_path(self, sport: str) -> str: + """Get API path for sport.""" + return self.SPORT_PATHS.get(sport.lower(), sport.lower()) + + # Scoreboard Methods + + async def get_scoreboard( + self, + sport: str, + date_str: Optional[str] = None, + week: Optional[int] = None, + groups: Optional[str] = None + ) -> Dict: + """ + Get scoreboard for a sport. + + Args: + sport: Sport name (nfl, nba, cfb, etc.) + date_str: Date in YYYYMMDD format + week: Week number (for NFL/CFB) + groups: Conference/division filter + + Returns: + Scoreboard data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/scoreboard" + + params = {} + if date_str: + params["dates"] = date_str + if week: + params["week"] = week + if groups: + params["groups"] = groups + + return await self.fetch(endpoint, params=params) + + async def get_game_summary(self, sport: str, game_id: str) -> Dict: + """ + Get detailed game summary. + + Args: + sport: Sport name + game_id: ESPN game ID + + Returns: + Game summary data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/summary" + + return await self.fetch(endpoint, params={"event": game_id}) + + # Play-by-Play Methods + + async def get_play_by_play(self, sport: str, game_id: str) -> Dict: + """ + Get play-by-play data for a game. + + Args: + sport: Sport name + game_id: ESPN game ID + + Returns: + Play-by-play data with processed events + """ + # Get raw play-by-play + result = await self.get_game_summary(sport, game_id) + + # Process with existing processor + if "data" in result and "drives" in result["data"]: + processed = self.processor.process_game(result["data"]) + result["processed"] = processed + + return result + + # Team Methods + + async def get_teams(self, sport: str, limit: int = 100) -> Dict: + """ + Get teams for a sport. + + Args: + sport: Sport name + limit: Maximum number of teams + + Returns: + Teams data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/teams" + + return await self.fetch(endpoint, params={"limit": limit}) + + async def get_team(self, sport: str, team_id: str) -> Dict: + """ + Get single team information. + + Args: + sport: Sport name + team_id: ESPN team ID + + Returns: + Team data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/teams/{team_id}" + + return await self.fetch(endpoint) + + async def get_team_roster(self, sport: str, team_id: str) -> Dict: + """ + Get team roster. + + Args: + sport: Sport name + team_id: ESPN team ID + + Returns: + Roster data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/teams/{team_id}/roster" + + return await self.fetch(endpoint) + + # NFL-Specific Methods + + async def get_nfl_games(self, week: Optional[int] = None) -> Dict: + """ + Get NFL games. + + Args: + week: NFL week number + + Returns: + NFL games data + """ + params = {} + if week: + params["week"] = week + + result = await self.get_scoreboard("nfl", **params) + + # Extract game information + if "data" in result and "events" in result["data"]: + games = [] + for event in result["data"]["events"]: + game_info = { + "id": event.get("id"), + "name": event.get("name"), + "short_name": event.get("shortName"), + "date": event.get("date"), + "status": event.get("status", {}).get("type", {}).get("name"), + "completed": event.get("status", {}).get("type", {}).get("completed", False) + } + + # Add competition details + if "competitions" in event and event["competitions"]: + comp = event["competitions"][0] + game_info["venue"] = comp.get("venue", {}).get("fullName") + game_info["attendance"] = comp.get("attendance") + + # Add competitor information + if "competitors" in comp: + for competitor in comp["competitors"]: + team_type = "home" if competitor.get("homeAway") == "home" else "away" + game_info[f"{team_type}_team"] = { + "id": competitor.get("id"), + "name": competitor.get("team", {}).get("displayName"), + "abbreviation": competitor.get("team", {}).get("abbreviation"), + "score": competitor.get("score"), + "record": competitor.get("records", [{}])[0].get("summary") if competitor.get("records") else None + } + + games.append(game_info) + + result["games"] = games + + return result + + async def get_cfb_games(self, week: Optional[int] = None, group: Optional[str] = None) -> Dict: + """ + Get college football games. + + Args: + week: CFB week number + group: Conference filter (e.g., "80" for Big Ten) + + Returns: + CFB games data + """ + params = {} + if week: + params["week"] = week + if group: + params["groups"] = group + + result = await self.get_scoreboard("cfb", **params) + + # Process similar to NFL games + if "data" in result and "events" in result["data"]: + games = [] + for event in result["data"]["events"]: + game_info = { + "id": event.get("id"), + "name": event.get("name"), + "short_name": event.get("shortName"), + "date": event.get("date"), + "status": event.get("status", {}).get("type", {}).get("name") + } + + # Add teams and scores + if "competitions" in event and event["competitions"]: + comp = event["competitions"][0] + if "competitors" in comp: + for competitor in comp["competitors"]: + team_type = "home" if competitor.get("homeAway") == "home" else "away" + game_info[f"{team_type}_team"] = { + "name": competitor.get("team", {}).get("displayName"), + "score": competitor.get("score"), + "rank": competitor.get("curatedRank", {}).get("current") + } + + games.append(game_info) + + result["games"] = games + + return result + + # Batch Operations + + async def get_multiple_games(self, sport: str, game_ids: List[str]) -> Dict: + """ + Get multiple games in parallel. + + Args: + sport: Sport name + game_ids: List of game IDs + + Returns: + Dictionary of game data by ID + """ + sport_path = self._get_sport_path(sport) + + requests = [ + {"endpoint": f"/{sport_path}/summary", "params": {"event": game_id}} + for game_id in game_ids + ] + + results = await self.batch_fetch(requests) + + # Map results to game IDs + game_data = {} + for game_id, result in zip(game_ids, results): + if not isinstance(result, Exception): + game_data[game_id] = result + else: + logger.error(f"Failed to fetch game {game_id}: {result}") + game_data[game_id] = None + + return { + "source": "espn", + "sport": sport, + "data": game_data, + "timestamp": datetime.utcnow().isoformat() + } + + # Odds Methods + + async def get_odds(self, sport: str, game_id: str) -> Dict: + """ + Get betting odds for a game. + + Args: + sport: Sport name + game_id: ESPN game ID + + Returns: + Odds data + """ + sport_path = self._get_sport_path(sport) + endpoint = f"/{sport_path}/odds" + + return await self.fetch(endpoint, params={"event": game_id}) + + # Health Check + + async def health_check(self) -> bool: + """ + Check ESPN API health. + + Returns: + True if healthy, False otherwise + """ + try: + result = await self.get_scoreboard("nfl") + return "data" in result + except Exception as e: + logger.error(f"ESPN health check failed: {e}") + return False \ No newline at end of file diff --git a/neural_sdk/data_sources/weather/__init__.py b/neural_sdk/data_sources/weather/__init__.py new file mode 100644 index 00000000..420475d8 --- /dev/null +++ b/neural_sdk/data_sources/weather/__init__.py @@ -0,0 +1,15 @@ +""" +Weather Data Integration + +Provides weather data for sports impact analysis. +""" + +from .rest_adapter import WeatherRESTAdapter +from .models import WeatherData, WeatherCondition, WeatherImpact + +__all__ = [ + 'WeatherRESTAdapter', + 'WeatherData', + 'WeatherCondition', + 'WeatherImpact' +] \ No newline at end of file diff --git a/neural_sdk/data_sources/weather/models.py b/neural_sdk/data_sources/weather/models.py new file mode 100644 index 00000000..ad915894 --- /dev/null +++ b/neural_sdk/data_sources/weather/models.py @@ -0,0 +1,264 @@ +""" +Weather Data Models + +Data structures for weather information and sports impact analysis. +""" + +from dataclasses import dataclass +from typing import Optional, Dict, Any +from datetime import datetime +from enum import Enum + + +class WeatherCondition(Enum): + """Weather condition categories.""" + CLEAR = "clear" + CLOUDY = "cloudy" + RAIN = "rain" + SNOW = "snow" + FOG = "fog" + WIND = "wind" + STORM = "storm" + EXTREME = "extreme" + + +@dataclass +class WeatherData: + """Weather data for a location.""" + + # Location + latitude: float + longitude: float + + # Current conditions (required) + temperature: float # Fahrenheit + feels_like: float + humidity: float # Percentage + wind_speed: float # MPH + wind_direction: float # Degrees + + # Optional location info + city: Optional[str] = None + venue: Optional[str] = None + wind_gust: Optional[float] = None + + # Precipitation + precipitation: float = 0.0 # Inches per hour + rain: float = 0.0 + snow: float = 0.0 + + # Conditions + condition: WeatherCondition = WeatherCondition.CLEAR + description: str = "" + visibility: float = 10.0 # Miles + pressure: float = 1013.0 # mb + cloud_cover: float = 0.0 # Percentage + + # Timestamps + timestamp: datetime = None + sunrise: Optional[datetime] = None + sunset: Optional[datetime] = None + + def __post_init__(self): + """Initialize timestamp if not provided.""" + if self.timestamp is None: + self.timestamp = datetime.utcnow() + + @property + def is_outdoor_friendly(self) -> bool: + """Check if weather is suitable for outdoor sports.""" + return ( + self.condition not in [WeatherCondition.STORM, WeatherCondition.EXTREME] and + self.wind_speed < 30 and + self.precipitation < 0.5 and + self.visibility > 0.5 + ) + + @property + def has_precipitation(self) -> bool: + """Check if there's active precipitation.""" + return self.precipitation > 0 or self.rain > 0 or self.snow > 0 + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary.""" + return { + "location": { + "latitude": self.latitude, + "longitude": self.longitude, + "city": self.city, + "venue": self.venue + }, + "temperature": { + "actual": self.temperature, + "feels_like": self.feels_like + }, + "wind": { + "speed": self.wind_speed, + "direction": self.wind_direction, + "gust": self.wind_gust + }, + "precipitation": { + "total": self.precipitation, + "rain": self.rain, + "snow": self.snow + }, + "conditions": { + "main": self.condition.value, + "description": self.description, + "humidity": self.humidity, + "visibility": self.visibility, + "pressure": self.pressure, + "cloud_cover": self.cloud_cover + }, + "outdoor_friendly": self.is_outdoor_friendly, + "has_precipitation": self.has_precipitation, + "timestamp": self.timestamp.isoformat() if self.timestamp else None + } + + +@dataclass +class WeatherImpact: + """ + Weather impact analysis for sports betting. + + Analyzes how weather conditions might affect game outcomes. + """ + + weather_data: WeatherData + sport: str + is_outdoor: bool = True + + # Impact scores (0-100, higher = more impact) + passing_impact: float = 0.0 + rushing_impact: float = 0.0 + kicking_impact: float = 0.0 + scoring_impact: float = 0.0 + home_advantage_impact: float = 0.0 + + # Overall impact + total_impact: float = 0.0 + impact_summary: str = "" + + def calculate_football_impact(self): + """Calculate weather impact for football games.""" + weather = self.weather_data + + # Wind impact on passing and kicking + if weather.wind_speed > 20: + self.passing_impact = min(weather.wind_speed * 2, 80) + self.kicking_impact = min(weather.wind_speed * 2.5, 90) + elif weather.wind_speed > 10: + self.passing_impact = weather.wind_speed * 1.5 + self.kicking_impact = weather.wind_speed * 2 + + # Precipitation impact + if weather.has_precipitation: + precip_factor = min(weather.precipitation * 50, 70) + self.passing_impact += precip_factor * 0.7 + self.rushing_impact -= precip_factor * 0.3 # Rush advantage in rain + self.kicking_impact += precip_factor * 0.5 + + # Snow has different impact + if weather.snow > 0: + snow_factor = min(weather.snow * 30, 80) + self.scoring_impact += snow_factor + self.passing_impact += snow_factor * 0.3 + + # Temperature impact + if weather.temperature < 32: + cold_factor = (32 - weather.temperature) * 1.5 + self.passing_impact += cold_factor * 0.3 + self.kicking_impact += cold_factor * 0.4 + elif weather.temperature > 90: + heat_factor = (weather.temperature - 90) * 2 + self.scoring_impact -= heat_factor * 0.2 # More scoring in heat + + # Visibility impact + if weather.visibility < 1: + vis_factor = (1 - weather.visibility) * 50 + self.passing_impact += vis_factor + + # Home team advantage in bad weather + if not weather.is_outdoor_friendly: + self.home_advantage_impact = 20 + (self.total_impact * 0.3) + + # Calculate total impact + self.total_impact = ( + self.passing_impact * 0.3 + + self.rushing_impact * 0.2 + + self.kicking_impact * 0.2 + + self.scoring_impact * 0.2 + + self.home_advantage_impact * 0.1 + ) + + # Generate summary + self._generate_summary() + + def _generate_summary(self): + """Generate human-readable impact summary.""" + if self.total_impact < 10: + self.impact_summary = "Minimal weather impact expected" + elif self.total_impact < 25: + self.impact_summary = "Slight weather impact on gameplay" + elif self.total_impact < 50: + self.impact_summary = "Moderate weather impact - expect adjusted play calling" + elif self.total_impact < 75: + self.impact_summary = "Significant weather impact - favors running game and defense" + else: + self.impact_summary = "Extreme weather impact - major gameplay disruption expected" + + # Add specific concerns + concerns = [] + if self.passing_impact > 40: + concerns.append("passing game affected") + if self.kicking_impact > 40: + concerns.append("field goals risky") + if self.weather_data.wind_speed > 20: + concerns.append(f"high winds ({self.weather_data.wind_speed:.0f} mph)") + if self.weather_data.has_precipitation: + concerns.append("wet conditions") + + if concerns: + self.impact_summary += f" ({', '.join(concerns)})" + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary.""" + return { + "weather": self.weather_data.to_dict(), + "sport": self.sport, + "is_outdoor": self.is_outdoor, + "impacts": { + "passing": round(self.passing_impact, 1), + "rushing": round(self.rushing_impact, 1), + "kicking": round(self.kicking_impact, 1), + "scoring": round(self.scoring_impact, 1), + "home_advantage": round(self.home_advantage_impact, 1), + "total": round(self.total_impact, 1) + }, + "summary": self.impact_summary, + "betting_considerations": self._get_betting_considerations() + } + + def _get_betting_considerations(self) -> Dict[str, Any]: + """Get betting-specific considerations.""" + considerations = { + "favor_under": self.scoring_impact > 30, + "favor_home": self.home_advantage_impact > 15, + "favor_running": self.rushing_impact < self.passing_impact, + "avoid_player_props_passing": self.passing_impact > 40, + "avoid_field_goal_props": self.kicking_impact > 50 + } + + # Recommendations + recs = [] + if considerations["favor_under"]: + recs.append("Consider UNDER on total points") + if considerations["favor_home"]: + recs.append("Home team has weather advantage") + if considerations["favor_running"]: + recs.append("Running backs may exceed expectations") + if considerations["avoid_player_props_passing"]: + recs.append("Avoid passing yards props") + + considerations["recommendations"] = recs + return considerations \ No newline at end of file diff --git a/neural_sdk/data_sources/weather/rest_adapter.py b/neural_sdk/data_sources/weather/rest_adapter.py new file mode 100644 index 00000000..4530894b --- /dev/null +++ b/neural_sdk/data_sources/weather/rest_adapter.py @@ -0,0 +1,444 @@ +""" +Weather REST API Adapter + +Integrates OpenWeatherMap API for weather data and sports impact analysis. +""" + +import os +from typing import Dict, Any, Optional, List, Tuple +from datetime import datetime, timedelta +import logging + +from ..base.rest_source import RESTDataSource +from ..base.auth_strategies import APIKeyAuth +from .models import WeatherData, WeatherCondition, WeatherImpact + +logger = logging.getLogger(__name__) + + +class WeatherRESTAdapter(RESTDataSource): + """ + REST adapter for OpenWeatherMap API. + + Provides weather data for sports venues and impact analysis + for prediction market trading. + """ + + # NFL Stadium coordinates (outdoor stadiums only) + NFL_STADIUMS = { + "Highmark Stadium": (42.7738, -78.7870, "Buffalo Bills"), # Buffalo + "Gillette Stadium": (42.0909, -71.2643, "New England Patriots"), + "MetLife Stadium": (40.8135, -74.0745, "NY Giants/Jets"), + "M&T Bank Stadium": (39.2780, -76.6227, "Baltimore Ravens"), + "Paycor Stadium": (39.0954, -84.5160, "Cincinnati Bengals"), + "Cleveland Browns Stadium": (41.5061, -81.6995, "Cleveland Browns"), + "Heinz Field": (40.4468, -80.0158, "Pittsburgh Steelers"), + "TIAA Bank Field": (30.3239, -81.6373, "Jacksonville Jaguars"), + "Nissan Stadium": (36.1665, -86.7713, "Tennessee Titans"), + "Empower Field": (39.7439, -105.0201, "Denver Broncos"), + "Arrowhead Stadium": (39.0489, -94.4839, "Kansas City Chiefs"), + "Lambeau Field": (44.5013, -88.0622, "Green Bay Packers"), + "Soldier Field": (41.8623, -87.6167, "Chicago Bears"), + "Bank of America Stadium": (35.2258, -80.8528, "Carolina Panthers"), + "Raymond James Stadium": (27.9759, -82.5033, "Tampa Bay Buccaneers"), + "Lumen Field": (47.5952, -122.3316, "Seattle Seahawks"), + "Levi's Stadium": (37.4033, -121.9694, "San Francisco 49ers"), + "Lincoln Financial Field": (39.9008, -75.1675, "Philadelphia Eagles"), + "FedExField": (38.9076, -76.8645, "Washington Commanders"), + "Acrisure Stadium": (40.4468, -80.0158, "Pittsburgh Steelers") + } + + def __init__(self, api_key: Optional[str] = None): + """ + Initialize Weather REST adapter. + + Args: + api_key: OpenWeatherMap API key (or from OPENWEATHER_API_KEY env) + """ + self.api_key = api_key or os.getenv("OPENWEATHER_API_KEY") + if not self.api_key: + logger.warning("No OpenWeatherMap API key provided. Weather features limited.") + + # Use API key auth in query parameters + auth_strategy = APIKeyAuth( + api_key=self.api_key, + header_name="appid", + in_header=False + ) if self.api_key else None + + super().__init__( + base_url="https://api.openweathermap.org/data/2.5", + name="WeatherREST", + auth_strategy=auth_strategy, + timeout=10, + cache_ttl=600, # Cache weather for 10 minutes + rate_limit=60, # 60 calls per minute for free tier + max_retries=3 + ) + + logger.info("Weather REST adapter initialized") + + async def validate_response(self, response) -> bool: + """ + Validate OpenWeatherMap API response. + + Args: + response: HTTP response object + + Returns: + True if valid, False otherwise + """ + if response.status_code == 200: + return True + + if response.status_code == 401: + logger.error("OpenWeatherMap authentication failed - check API key") + elif response.status_code == 404: + logger.warning("Location not found") + elif response.status_code == 429: + logger.warning("OpenWeatherMap rate limit exceeded") + + return False + + async def transform_response(self, data: Any, endpoint: str) -> Dict: + """ + Transform weather response to standardized format. + + Args: + data: Raw OpenWeatherMap response + endpoint: The endpoint that was called + + Returns: + Standardized response + """ + return { + "source": "openweathermap", + "endpoint": endpoint, + "data": data, + "timestamp": datetime.utcnow().isoformat(), + "metadata": { + "units": "imperial" # We use imperial units for US sports + } + } + + def _parse_weather_condition(self, weather_id: int) -> WeatherCondition: + """Parse OpenWeatherMap weather ID to condition enum.""" + if weather_id < 300: + return WeatherCondition.STORM + elif weather_id < 600: + return WeatherCondition.RAIN + elif weather_id < 700: + return WeatherCondition.SNOW + elif weather_id < 800: + return WeatherCondition.FOG + elif weather_id == 800: + return WeatherCondition.CLEAR + elif weather_id < 900: + return WeatherCondition.CLOUDY + else: + return WeatherCondition.EXTREME + + def _parse_weather_response(self, data: Dict) -> WeatherData: + """Parse OpenWeatherMap response to WeatherData.""" + main = data.get("main", {}) + wind = data.get("wind", {}) + weather = data.get("weather", [{}])[0] + coord = data.get("coord", {}) + sys = data.get("sys", {}) + + # Parse precipitation + rain = data.get("rain", {}) + snow = data.get("snow", {}) + precipitation = rain.get("1h", 0) + snow.get("1h", 0) + + return WeatherData( + latitude=coord.get("lat", 0), + longitude=coord.get("lon", 0), + city=data.get("name"), + temperature=main.get("temp", 0), + feels_like=main.get("feels_like", 0), + humidity=main.get("humidity", 0), + wind_speed=wind.get("speed", 0), + wind_direction=wind.get("deg", 0), + wind_gust=wind.get("gust"), + precipitation=precipitation / 25.4 if precipitation else 0, # Convert mm to inches + rain=rain.get("1h", 0) / 25.4 if rain.get("1h") else 0, + snow=snow.get("1h", 0) / 25.4 if snow.get("1h") else 0, + condition=self._parse_weather_condition(weather.get("id", 800)), + description=weather.get("description", ""), + visibility=data.get("visibility", 10000) / 1609.34, # Convert meters to miles + pressure=main.get("pressure", 1013), + cloud_cover=data.get("clouds", {}).get("all", 0), + timestamp=datetime.fromtimestamp(data.get("dt", 0)), + sunrise=datetime.fromtimestamp(sys.get("sunrise", 0)) if sys.get("sunrise") else None, + sunset=datetime.fromtimestamp(sys.get("sunset", 0)) if sys.get("sunset") else None + ) + + # Weather Data Methods + + async def get_weather_by_coords( + self, + lat: float, + lon: float, + venue: Optional[str] = None + ) -> Dict: + """ + Get current weather by coordinates. + + Args: + lat: Latitude + lon: Longitude + venue: Optional venue name + + Returns: + Weather data + """ + if not self.api_key: + return {"error": "No API key configured"} + + params = { + "lat": lat, + "lon": lon, + "units": "imperial", + "appid": self.api_key + } + + result = await self.fetch("/weather", params=params, use_cache=True) + + if "data" in result: + weather_data = self._parse_weather_response(result["data"]) + if venue: + weather_data.venue = venue + + result["parsed"] = weather_data.to_dict() + + return result + + async def get_weather_by_city(self, city: str, state: Optional[str] = None) -> Dict: + """ + Get current weather by city name. + + Args: + city: City name + state: State code (US only) + + Returns: + Weather data + """ + if not self.api_key: + return {"error": "No API key configured"} + + query = f"{city},{state},US" if state else city + + params = { + "q": query, + "units": "imperial", + "appid": self.api_key + } + + result = await self.fetch("/weather", params=params, use_cache=True) + + if "data" in result: + weather_data = self._parse_weather_response(result["data"]) + result["parsed"] = weather_data.to_dict() + + return result + + async def get_forecast( + self, + lat: float, + lon: float, + hours: int = 24 + ) -> Dict: + """ + Get weather forecast. + + Args: + lat: Latitude + lon: Longitude + hours: Hours to forecast (max 120) + + Returns: + Forecast data + """ + if not self.api_key: + return {"error": "No API key configured"} + + params = { + "lat": lat, + "lon": lon, + "units": "imperial", + "cnt": min(hours // 3, 40), # API returns 3-hour intervals + "appid": self.api_key + } + + result = await self.fetch("/forecast", params=params, use_cache=True) + + if "data" in result and "list" in result["data"]: + forecasts = [] + for item in result["data"]["list"]: + weather_data = self._parse_weather_response(item) + forecasts.append(weather_data.to_dict()) + + result["parsed"] = forecasts + + return result + + # NFL Stadium Weather Methods + + async def get_nfl_stadium_weather(self, stadium_name: str) -> Dict: + """ + Get weather for NFL stadium. + + Args: + stadium_name: Name of NFL stadium + + Returns: + Weather data with impact analysis + """ + if stadium_name not in self.NFL_STADIUMS: + return {"error": f"Stadium '{stadium_name}' not found or is indoor"} + + lat, lon, team = self.NFL_STADIUMS[stadium_name] + + # Get current weather + result = await self.get_weather_by_coords(lat, lon, venue=stadium_name) + + if "parsed" in result: + # Add weather impact analysis + weather_data = self._parse_weather_response(result["data"]) + weather_data.venue = stadium_name + + impact = WeatherImpact( + weather_data=weather_data, + sport="NFL", + is_outdoor=True + ) + impact.calculate_football_impact() + + result["impact"] = impact.to_dict() + result["team"] = team + + return result + + async def get_all_nfl_stadium_weather(self) -> Dict: + """ + Get weather for all outdoor NFL stadiums. + + Returns: + Dictionary of weather data by stadium + """ + stadium_weather = {} + + for stadium_name in self.NFL_STADIUMS: + try: + weather = await self.get_nfl_stadium_weather(stadium_name) + stadium_weather[stadium_name] = weather + except Exception as e: + logger.error(f"Failed to get weather for {stadium_name}: {e}") + stadium_weather[stadium_name] = {"error": str(e)} + + return { + "source": "openweathermap", + "sport": "NFL", + "stadiums": stadium_weather, + "timestamp": datetime.utcnow().isoformat() + } + + # Game Weather Analysis + + async def analyze_game_weather( + self, + lat: float, + lon: float, + game_time: datetime, + sport: str = "football", + venue: Optional[str] = None + ) -> Dict: + """ + Analyze weather impact for a specific game. + + Args: + lat: Venue latitude + lon: Venue longitude + game_time: Game start time + sport: Sport type + venue: Venue name + + Returns: + Weather analysis with betting impact + """ + # Get forecast if game is in future + if game_time > datetime.utcnow(): + hours_until = (game_time - datetime.utcnow()).total_seconds() / 3600 + forecast = await self.get_forecast(lat, lon, min(int(hours_until), 120)) + + # Find closest forecast to game time + if "parsed" in forecast and forecast["parsed"]: + # Use first forecast as approximation + weather_dict = forecast["parsed"][0] + else: + return {"error": "Could not get forecast for game time"} + else: + # Get current weather for past/current games + result = await self.get_weather_by_coords(lat, lon, venue) + if "parsed" in result: + weather_dict = result["parsed"] + else: + return {"error": "Could not get weather data"} + + # Create weather data object + weather_data = WeatherData( + latitude=lat, + longitude=lon, + venue=venue, + temperature=weather_dict["temperature"]["actual"], + feels_like=weather_dict["temperature"]["feels_like"], + humidity=weather_dict["conditions"]["humidity"], + wind_speed=weather_dict["wind"]["speed"], + wind_direction=weather_dict["wind"]["direction"], + wind_gust=weather_dict["wind"]["gust"], + precipitation=weather_dict["precipitation"]["total"], + rain=weather_dict["precipitation"]["rain"], + snow=weather_dict["precipitation"]["snow"] + ) + + # Calculate impact + impact = WeatherImpact( + weather_data=weather_data, + sport=sport, + is_outdoor=True + ) + + if sport.lower() in ["football", "nfl", "cfb"]: + impact.calculate_football_impact() + + return { + "source": "openweathermap", + "venue": venue, + "game_time": game_time.isoformat(), + "weather": weather_data.to_dict(), + "impact": impact.to_dict(), + "timestamp": datetime.utcnow().isoformat() + } + + # Health Check + + async def health_check(self) -> bool: + """ + Check OpenWeatherMap API health. + + Returns: + True if healthy, False otherwise + """ + if not self.api_key: + logger.warning("No API key configured for weather service") + return False + + try: + # Test with a known location (New York) + result = await self.get_weather_by_coords(40.7128, -74.0060) + return "data" in result and "error" not in result + except Exception as e: + logger.error(f"Weather API health check failed: {e}") + return False \ No newline at end of file diff --git a/neural_sdk/streaming/__init__.py b/neural_sdk/streaming/__init__.py deleted file mode 100644 index e3ef9eb3..00000000 --- a/neural_sdk/streaming/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -Neural SDK Streaming Module - -Real-time market data streaming and WebSocket functionality. -This module provides user-friendly wrappers around the data pipeline -WebSocket infrastructure. -""" - -from .websocket import NeuralWebSocket -from .market_stream import MarketStream, NFLMarketStream -from .handlers import StreamEventHandler - -__all__ = [ - "NeuralWebSocket", - "MarketStream", - "NFLMarketStream", - "StreamEventHandler", -] diff --git a/neural_sdk/streaming/handlers.py b/neural_sdk/streaming/handlers.py deleted file mode 100644 index e831049c..00000000 --- a/neural_sdk/streaming/handlers.py +++ /dev/null @@ -1,354 +0,0 @@ -""" -Neural SDK Stream Event Handlers - -Event handler utilities and decorators for WebSocket streaming. -""" - -import logging -from typing import Any, Callable, Dict, List -from enum import Enum - -logger = logging.getLogger(__name__) - - -class StreamEventType(Enum): - """Types of streaming events.""" - MARKET_DATA = "market_data" - TRADE = "trade" - CONNECTION = "connection" - ERROR = "error" - PRICE_ALERT = "price_alert" - VOLUME_ALERT = "volume_alert" - - -class StreamEventHandler: - """ - Event handler registry for streaming events. - - Provides a centralized way to manage and dispatch streaming events - with filtering, priority, and error handling capabilities. - """ - - def __init__(self): - """Initialize event handler registry.""" - self.handlers: Dict[StreamEventType, List[Dict]] = { - event_type: [] for event_type in StreamEventType - } - self.global_handlers: List[Callable] = [] - - # Event filtering - self.market_filters: Dict[str, List[Callable]] = {} # ticker -> handlers - self.team_filters: Dict[str, List[Callable]] = {} # team -> handlers - - # Statistics - self.event_count: Dict[StreamEventType, int] = { - event_type: 0 for event_type in StreamEventType - } - - def register_handler( - self, - event_type: StreamEventType, - handler: Callable, - priority: int = 0, - filters: Dict[str, Any] = None - ) -> None: - """ - Register an event handler. - - Args: - event_type: Type of event to handle - handler: Handler function - priority: Handler priority (higher = earlier execution) - filters: Event filters (e.g., {'ticker': 'NFL-*'}) - """ - handler_info = { - 'handler': handler, - 'priority': priority, - 'filters': filters or {}, - 'name': handler.__name__ - } - - self.handlers[event_type].append(handler_info) - - # Sort by priority (descending) - self.handlers[event_type].sort(key=lambda x: x['priority'], reverse=True) - - logger.info(f"Registered {event_type.value} handler: {handler.__name__}") - - def register_market_handler(self, ticker: str, handler: Callable) -> None: - """ - Register handler for specific market ticker. - - Args: - ticker: Market ticker (supports wildcards like 'NFL-*') - handler: Handler function - """ - if ticker not in self.market_filters: - self.market_filters[ticker] = [] - - self.market_filters[ticker].append(handler) - logger.info(f"Registered market handler for {ticker}: {handler.__name__}") - - def register_team_handler(self, team_code: str, handler: Callable) -> None: - """ - Register handler for specific team. - - Args: - team_code: Team code (e.g., 'PHI', 'KC') - handler: Handler function - """ - team_code = team_code.upper() - if team_code not in self.team_filters: - self.team_filters[team_code] = [] - - self.team_filters[team_code].append(handler) - logger.info(f"Registered team handler for {team_code}: {handler.__name__}") - - def register_global_handler(self, handler: Callable) -> None: - """ - Register global handler that receives all events. - - Args: - handler: Handler function - """ - self.global_handlers.append(handler) - logger.info(f"Registered global handler: {handler.__name__}") - - async def dispatch_event( - self, - event_type: StreamEventType, - event_data: Dict[str, Any] - ) -> None: - """ - Dispatch event to registered handlers. - - Args: - event_type: Type of event - event_data: Event data dictionary - """ - self.event_count[event_type] += 1 - - # Dispatch to global handlers first - for handler in self.global_handlers: - try: - await self._call_handler(handler, event_type, event_data) - except Exception as e: - logger.error(f"Error in global handler {handler.__name__}: {e}") - - # Dispatch to specific event type handlers - for handler_info in self.handlers[event_type]: - if self._should_handle_event(handler_info, event_data): - try: - await self._call_handler( - handler_info['handler'], - event_type, - event_data - ) - except Exception as e: - logger.error( - f"Error in {event_type.value} handler " - f"{handler_info['name']}: {e}" - ) - - # Dispatch to filtered handlers - await self._dispatch_filtered_events(event_type, event_data) - - async def _dispatch_filtered_events( - self, - event_type: StreamEventType, - event_data: Dict[str, Any] - ) -> None: - """Dispatch events to filtered handlers.""" - - # Market-specific handlers - if event_type == StreamEventType.MARKET_DATA: - ticker = event_data.get('market_ticker', '') - - for filter_ticker, handlers in self.market_filters.items(): - if self._matches_ticker_filter(ticker, filter_ticker): - for handler in handlers: - try: - await self._call_handler(handler, event_type, event_data) - except Exception as e: - logger.error(f"Error in market handler {handler.__name__}: {e}") - - # Team-specific handlers - team_code = self._extract_team_from_event(event_data) - if team_code: - for filter_team, handlers in self.team_filters.items(): - if team_code.upper() == filter_team: - for handler in handlers: - try: - await self._call_handler(handler, event_type, event_data) - except Exception as e: - logger.error(f"Error in team handler {handler.__name__}: {e}") - - def _should_handle_event(self, handler_info: Dict, event_data: Dict) -> bool: - """Check if handler should process this event based on filters.""" - filters = handler_info.get('filters', {}) - - if not filters: - return True - - # Check ticker filter - if 'ticker' in filters: - ticker = event_data.get('market_ticker', '') - if not self._matches_ticker_filter(ticker, filters['ticker']): - return False - - # Check team filter - if 'team' in filters: - team = self._extract_team_from_event(event_data) - if not team or team.upper() != filters['team'].upper(): - return False - - # Check price range filter - if 'price_range' in filters: - yes_price = event_data.get('yes_price') - if yes_price is not None: - min_price, max_price = filters['price_range'] - if not (min_price <= yes_price <= max_price): - return False - - return True - - def _matches_ticker_filter(self, ticker: str, filter_pattern: str) -> bool: - """Check if ticker matches filter pattern.""" - if filter_pattern.endswith('*'): - prefix = filter_pattern[:-1] - return ticker.startswith(prefix) - return ticker == filter_pattern - - def _extract_team_from_event(self, event_data: Dict) -> str: - """Extract team code from event data.""" - ticker = event_data.get('market_ticker', '') - - # Simple extraction - would need more sophisticated logic - if 'NFL' in ticker: - parts = ticker.split('-') - if len(parts) >= 4: - return parts[-1][:3] - - return '' - - async def _call_handler( - self, - handler: Callable, - event_type: StreamEventType, - event_data: Dict[str, Any] - ) -> None: - """Call handler function with proper error handling.""" - try: - # Check if handler expects event_type parameter - import inspect - sig = inspect.signature(handler) - - if len(sig.parameters) == 1: - # Handler expects only event_data - await handler(event_data) - elif len(sig.parameters) == 2: - # Handler expects event_type and event_data - await handler(event_type, event_data) - else: - # Default to event_data only - await handler(event_data) - - except Exception as e: - logger.error(f"Handler {handler.__name__} failed: {e}") - raise - - def get_statistics(self) -> Dict[str, Any]: - """Get event handler statistics.""" - return { - 'event_counts': dict(self.event_count), - 'handler_counts': { - event_type.value: len(handlers) - for event_type, handlers in self.handlers.items() - }, - 'market_filters': len(self.market_filters), - 'team_filters': len(self.team_filters), - 'global_handlers': len(self.global_handlers) - } - - def clear_handlers(self, event_type: StreamEventType = None) -> None: - """ - Clear registered handlers. - - Args: - event_type: Specific event type to clear, or None for all - """ - if event_type: - self.handlers[event_type].clear() - logger.info(f"Cleared {event_type.value} handlers") - else: - for handlers in self.handlers.values(): - handlers.clear() - self.market_filters.clear() - self.team_filters.clear() - self.global_handlers.clear() - logger.info("Cleared all handlers") - - -# Convenience decorators -def market_data_handler( - ticker_filter: str = None, - team_filter: str = None, - price_range: tuple = None -): - """ - Decorator for market data handlers with filtering. - - Args: - ticker_filter: Ticker pattern to filter (e.g., 'NFL-*') - team_filter: Team code to filter (e.g., 'PHI') - price_range: Price range tuple (min, max) - - Example: - ```python - @market_data_handler(ticker_filter='NFL-*', price_range=(0.0, 0.3)) - async def handle_nfl_oversold(market_data): - print(f"NFL oversold: {market_data['market_ticker']}") - ``` - """ - def decorator(func): - func._stream_handler_type = StreamEventType.MARKET_DATA - func._stream_filters = { - 'ticker': ticker_filter, - 'team': team_filter, - 'price_range': price_range - } - # Remove None values - func._stream_filters = {k: v for k, v in func._stream_filters.items() if v is not None} - return func - - return decorator - - -def trade_handler(ticker_filter: str = None): - """ - Decorator for trade execution handlers. - - Args: - ticker_filter: Ticker pattern to filter - """ - def decorator(func): - func._stream_handler_type = StreamEventType.TRADE - func._stream_filters = {'ticker': ticker_filter} if ticker_filter else {} - return func - - return decorator - - -def price_alert_handler(threshold: float = 0.05): - """ - Decorator for price alert handlers. - - Args: - threshold: Minimum price change to trigger alert - """ - def decorator(func): - func._stream_handler_type = StreamEventType.PRICE_ALERT - func._stream_filters = {'threshold': threshold} - return func - - return decorator diff --git a/neural_sdk/streaming/market_stream.py b/neural_sdk/streaming/market_stream.py deleted file mode 100644 index 4074507f..00000000 --- a/neural_sdk/streaming/market_stream.py +++ /dev/null @@ -1,256 +0,0 @@ -""" -Neural SDK Market Streaming Utilities - -Specialized streaming classes for different market types and sports. -""" - -import logging -from typing import Dict, List, Optional, Any -from datetime import datetime - -from .websocket import NeuralWebSocket -from ..core.config import SDKConfig -from ..core.exceptions import SDKError - -logger = logging.getLogger(__name__) - - -class MarketStream: - """ - Base class for market-specific streaming functionality. - - Provides common functionality for streaming different types of markets. - """ - - def __init__(self, config: SDKConfig): - """ - Initialize market stream. - - Args: - config: SDK configuration object - """ - self.config = config - self.websocket = NeuralWebSocket(config) - - # Market tracking - self.active_markets: Dict[str, Dict] = {} - self.price_history: Dict[str, List[Dict]] = {} - - # Set up default handlers - self._setup_default_handlers() - - def _setup_default_handlers(self): - """Set up default market data handlers.""" - - @self.websocket.on_market_data - async def track_market_data(market_data: Dict[str, Any]): - """Track market data updates.""" - ticker = market_data.get('market_ticker') - if ticker: - # Update active markets - self.active_markets[ticker] = market_data - - # Add to price history - if ticker not in self.price_history: - self.price_history[ticker] = [] - - self.price_history[ticker].append({ - 'timestamp': datetime.now(), - 'yes_price': market_data.get('yes_price'), - 'no_price': market_data.get('no_price'), - 'volume': market_data.get('volume', 0) - }) - - # Keep only last 100 price points - if len(self.price_history[ticker]) > 100: - self.price_history[ticker].pop(0) - - async def connect(self): - """Connect to WebSocket.""" - await self.websocket.connect() - - async def disconnect(self): - """Disconnect from WebSocket.""" - await self.websocket.disconnect() - - def get_market_data(self, ticker: str) -> Optional[Dict]: - """Get current market data for a ticker.""" - return self.active_markets.get(ticker) - - def get_price_history(self, ticker: str) -> List[Dict]: - """Get price history for a ticker.""" - return self.price_history.get(ticker, []) - - def get_active_markets(self) -> List[str]: - """Get list of active market tickers.""" - return list(self.active_markets.keys()) - - -class NFLMarketStream(MarketStream): - """ - Specialized streaming class for NFL markets. - - Provides NFL-specific functionality like game tracking, - team filtering, and game state analysis. - """ - - def __init__(self, config: SDKConfig): - """Initialize NFL market stream.""" - super().__init__(config) - - # NFL-specific tracking - self.games: Dict[str, Dict] = {} - self.teams: Dict[str, List[str]] = {} # team -> list of market tickers - - # Set up NFL-specific handlers - self._setup_nfl_handlers() - - def _setup_nfl_handlers(self): - """Set up NFL-specific market data handlers.""" - - @self.websocket.on_market_data - async def track_nfl_games(market_data: Dict[str, Any]): - """Track NFL game data.""" - ticker = market_data.get('market_ticker', '') - - if 'NFL' in ticker.upper() or 'KXNFL' in ticker.upper(): - game_id = self._extract_game_id(ticker) - if game_id: - if game_id not in self.games: - self.games[game_id] = { - 'markets': {}, - 'home_team': self._extract_home_team(ticker), - 'away_team': self._extract_away_team(ticker), - 'last_update': datetime.now() - } - - self.games[game_id]['markets'][ticker] = market_data - self.games[game_id]['last_update'] = datetime.now() - - # Track by teams - home_team = self.games[game_id]['home_team'] - away_team = self.games[game_id]['away_team'] - - for team in [home_team, away_team]: - if team and team != 'UNK': - if team not in self.teams: - self.teams[team] = [] - if ticker not in self.teams[team]: - self.teams[team].append(ticker) - - async def subscribe_to_game(self, game_id: str): - """ - Subscribe to all markets for a specific NFL game. - - Args: - game_id: Game identifier (e.g., "25SEP04DALPHI") - """ - await self.websocket.subscribe_nfl_game(game_id) - logger.info(f"๐Ÿˆ Subscribed to NFL game: {game_id}") - - async def subscribe_to_team(self, team_code: str): - """ - Subscribe to all markets for a specific NFL team. - - Args: - team_code: Team code (e.g., "PHI", "KC", "SF") - """ - await self.websocket.subscribe_nfl_team(team_code) - logger.info(f"๐Ÿˆ Subscribed to NFL team: {team_code}") - - async def subscribe_to_all_nfl(self): - """Subscribe to all active NFL markets.""" - try: - # This would need to be implemented to get all NFL markets - # For now, we'll use a pattern-based subscription - await self.websocket.subscribe_markets(['KXNFLGAME*']) # Pattern matching - logger.info("๐Ÿˆ Subscribed to all NFL markets") - except Exception as e: - logger.error(f"Failed to subscribe to all NFL markets: {e}") - raise SDKError(f"NFL subscription failed: {e}") from e - - def get_game_data(self, game_id: str) -> Optional[Dict]: - """Get data for a specific game.""" - return self.games.get(game_id) - - def get_team_markets(self, team_code: str) -> List[str]: - """Get market tickers for a specific team.""" - return self.teams.get(team_code.upper(), []) - - def get_active_games(self) -> List[str]: - """Get list of active game IDs.""" - return list(self.games.keys()) - - def get_game_win_probability(self, game_id: str) -> Optional[float]: - """ - Calculate win probability for home team in a game. - - Args: - game_id: Game identifier - - Returns: - Win probability (0.0 to 1.0) or None if not available - """ - game_data = self.games.get(game_id) - if not game_data: - return None - - # Look for winner markets - for ticker, market_data in game_data['markets'].items(): - if 'WINNER' in ticker.upper(): - yes_price = market_data.get('yes_price') - if yes_price is not None: - return yes_price - - return None - - def get_game_summary(self, game_id: str) -> Optional[Dict]: - """ - Get comprehensive summary for a game. - - Args: - game_id: Game identifier - - Returns: - Game summary dictionary or None if game not found - """ - game_data = self.games.get(game_id) - if not game_data: - return None - - win_prob = self.get_game_win_probability(game_id) - - return { - 'game_id': game_id, - 'home_team': game_data['home_team'], - 'away_team': game_data['away_team'], - 'markets_count': len(game_data['markets']), - 'win_probability': win_prob, - 'last_update': game_data['last_update'].isoformat() - } - - def _extract_game_id(self, ticker: str) -> Optional[str]: - """Extract game identifier from ticker.""" - if "KXNFLGAME" in ticker: - parts = ticker.split("-") - if len(parts) >= 3: - return f"{parts[1]}-{parts[2]}" - return None - - def _extract_home_team(self, ticker: str) -> str: - """Extract home team from ticker (simplified).""" - if "KXNFLGAME" in ticker: - parts = ticker.split("-") - if len(parts) >= 4: - return parts[-1][:3] - return "UNK" - - def _extract_away_team(self, ticker: str) -> str: - """Extract away team from ticker (simplified).""" - if "KXNFLGAME" in ticker: - parts = ticker.split("-") - if len(parts) >= 4: - game_part = parts[2] - if len(game_part) > 6: - return game_part[-6:-3] - return "UNK" diff --git a/neural_sdk/streaming/websocket.py b/neural_sdk/streaming/websocket.py deleted file mode 100644 index 3c6b0e68..00000000 --- a/neural_sdk/streaming/websocket.py +++ /dev/null @@ -1,377 +0,0 @@ -""" -Neural SDK WebSocket Client - -User-friendly wrapper around the data pipeline WebSocket infrastructure. -Provides a clean API for real-time market data streaming. -""" - -import asyncio -import logging -from typing import Any, Callable, Dict, List, Optional - -from ..core.config import SDKConfig -from ..core.exceptions import ConnectionError, SDKError - -# Import from data pipeline -try: - from neural_sdk.data_pipeline.streaming.websocket import KalshiWebSocket - from neural_sdk.data_pipeline.streaming.handlers import DefaultMessageHandler - from neural_sdk.data_pipeline.data_sources.kalshi.market_discovery import ( - KalshiMarketDiscovery, - SportMarket - ) - from neural_sdk.data_pipeline.sports_config import Sport -except ImportError as e: - raise SDKError( - f"Data pipeline not available: {e}. " - "Make sure data pipeline is properly installed." - ) from e - -logger = logging.getLogger(__name__) - - -class NeuralWebSocket: - """ - Neural SDK WebSocket client for real-time market data streaming. - - This class provides a user-friendly interface to the underlying - data pipeline WebSocket infrastructure. - - Example: - ```python - from neural_sdk import NeuralSDK - - sdk = NeuralSDK.from_env() - websocket = sdk.create_websocket() - - @websocket.on_market_data - async def handle_price_update(market_data): - print(f"Price update: {market_data['ticker']} = {market_data['yes_price']}") - - await websocket.connect() - await websocket.subscribe_markets(['NFL-*']) - await websocket.run_forever() - ``` - """ - - def __init__(self, config: SDKConfig): - """ - Initialize Neural WebSocket client. - - Args: - config: SDK configuration object - """ - self.config = config - self._ws_client: Optional[KalshiWebSocket] = None - self._market_discovery: Optional[KalshiMarketDiscovery] = None - - # Event handlers - self._market_data_handlers: List[Callable] = [] - self._trade_handlers: List[Callable] = [] - self._connection_handlers: List[Callable] = [] - self._error_handlers: List[Callable] = [] - - # Connection state - self._connected = False - self._subscribed_markets: set = set() - - logger.info("Neural WebSocket client initialized") - - async def connect(self) -> None: - """ - Connect to the WebSocket server. - - Raises: - ConnectionError: If connection fails - SDKError: If configuration is invalid - """ - try: - # Initialize data pipeline WebSocket client - self._ws_client = KalshiWebSocket() - self._market_discovery = KalshiMarketDiscovery() - - # Set up message handler - handler = DefaultMessageHandler() - handler.set_ticker_callback(self._handle_market_data) - handler.set_trade_callback(self._handle_trade_data) - handler.set_error_callback(self._handle_error) - - self._ws_client.message_handler = handler - - # Connect - await self._ws_client.connect() - self._connected = True - - logger.info("โœ… Neural WebSocket connected successfully") - - # Notify connection handlers - for handler in self._connection_handlers: - try: - await handler({"status": "connected"}) - except Exception as e: - logger.error(f"Error in connection handler: {e}") - - except Exception as e: - logger.error(f"Failed to connect WebSocket: {e}") - raise ConnectionError(f"WebSocket connection failed: {e}") from e - - async def disconnect(self) -> None: - """Disconnect from the WebSocket server.""" - if self._ws_client: - await self._ws_client.disconnect() - self._connected = False - logger.info("Neural WebSocket disconnected") - - # Notify connection handlers - for handler in self._connection_handlers: - try: - await handler({"status": "disconnected"}) - except Exception as e: - logger.error(f"Error in connection handler: {e}") - - async def subscribe_markets(self, tickers: List[str]) -> None: - """ - Subscribe to market tickers for real-time updates. - - Args: - tickers: List of market tickers to subscribe to - - Raises: - ConnectionError: If not connected - SDKError: If subscription fails - """ - if not self._connected or not self._ws_client: - raise ConnectionError("WebSocket not connected. Call connect() first.") - - try: - await self._ws_client.subscribe_markets(tickers) - self._subscribed_markets.update(tickers) - - logger.info(f"โœ… Subscribed to {len(tickers)} markets") - - except Exception as e: - logger.error(f"Failed to subscribe to markets: {e}") - raise SDKError(f"Market subscription failed: {e}") from e - - async def subscribe_nfl_game(self, game_id: str) -> None: - """ - Subscribe to all markets for a specific NFL game using proper series discovery. - - Args: - game_id: Game identifier (e.g., "25SEP04DALPHI") - - Raises: - SDKError: If game markets not found or subscription fails - """ - try: - # Import here to avoid circular imports - import sys - from pathlib import Path - - project_root = Path(__file__).parent.parent.parent - if str(project_root) not in sys.path: - sys.path.insert(0, str(project_root)) - - from sports_market_discovery import SportsMarketDiscovery - - discovery = SportsMarketDiscovery() - - try: - # Get all NFL markets using proper series discovery - nfl_markets = discovery.find_nfl_markets(status='open') - - # Filter for specific game - game_tickers = [] - for series_ticker, markets in nfl_markets.items(): - for market in markets: - ticker = market.get('ticker', '') - if game_id.upper() in ticker.upper(): - game_tickers.append(ticker) - - if not game_tickers: - raise SDKError(f"No markets found for game: {game_id}") - - await self.subscribe_markets(game_tickers) - logger.info(f"๐Ÿˆ Subscribed to {len(game_tickers)} markets for NFL game: {game_id}") - - finally: - discovery.close() - - except Exception as e: - logger.error(f"Failed to subscribe to NFL game {game_id}: {e}") - raise SDKError(f"NFL game subscription failed: {e}") from e - - async def subscribe_nfl_team(self, team_code: str) -> None: - """ - Subscribe to all markets for a specific NFL team using proper series discovery. - - Args: - team_code: Team code (e.g., "PHI", "KC", "SF") - - Raises: - SDKError: If team markets not found or subscription fails - """ - try: - # Import here to avoid circular imports - import sys - from pathlib import Path - - project_root = Path(__file__).parent.parent.parent - if str(project_root) not in sys.path: - sys.path.insert(0, str(project_root)) - - from sports_market_discovery import SportsMarketDiscovery - - discovery = SportsMarketDiscovery() - - try: - team_markets = discovery.find_team_markets(team_code) - - if not team_markets: - raise SDKError(f"No markets found for team: {team_code}") - - # Extract tickers from market data - market_tickers = [market.get('ticker') for market in team_markets if market.get('ticker')] - - if not market_tickers: - raise SDKError(f"No valid tickers found for team: {team_code}") - - await self.subscribe_markets(market_tickers) - logger.info(f"๐Ÿˆ Subscribed to {len(market_tickers)} markets for NFL team: {team_code}") - - finally: - discovery.close() - - except Exception as e: - logger.error(f"Failed to subscribe to NFL team {team_code}: {e}") - raise SDKError(f"NFL team subscription failed: {e}") from e - - def on_market_data(self, func: Callable) -> Callable: - """ - Decorator to register market data handler. - - Args: - func: Handler function that processes market data updates - - Returns: - Decorated handler function - - Example: - ```python - @websocket.on_market_data - async def handle_price_update(market_data): - print(f"Price: {market_data['yes_price']}") - ``` - """ - self._market_data_handlers.append(func) - logger.info(f"Registered market data handler: {func.__name__}") - return func - - def on_trade(self, func: Callable) -> Callable: - """ - Decorator to register trade execution handler. - - Args: - func: Handler function that processes trade executions - - Returns: - Decorated handler function - """ - self._trade_handlers.append(func) - logger.info(f"Registered trade handler: {func.__name__}") - return func - - def on_connection(self, func: Callable) -> Callable: - """ - Decorator to register connection status handler. - - Args: - func: Handler function that processes connection events - - Returns: - Decorated handler function - """ - self._connection_handlers.append(func) - logger.info(f"Registered connection handler: {func.__name__}") - return func - - def on_error(self, func: Callable) -> Callable: - """ - Decorator to register error handler. - - Args: - func: Handler function that processes errors - - Returns: - Decorated handler function - """ - self._error_handlers.append(func) - logger.info(f"Registered error handler: {func.__name__}") - return func - - async def _handle_market_data(self, market_data: Dict[str, Any]) -> None: - """Handle incoming market data updates.""" - # Call all registered market data handlers - for handler in self._market_data_handlers: - try: - await handler(market_data) - except Exception as e: - logger.error(f"Error in market data handler {handler.__name__}: {e}") - - async def _handle_trade_data(self, trade_data: Dict[str, Any]) -> None: - """Handle incoming trade execution data.""" - # Call all registered trade handlers - for handler in self._trade_handlers: - try: - await handler(trade_data) - except Exception as e: - logger.error(f"Error in trade handler {handler.__name__}: {e}") - - async def _handle_error(self, error_data: Dict[str, Any]) -> None: - """Handle WebSocket errors.""" - # Call all registered error handlers - for handler in self._error_handlers: - try: - await handler(error_data) - except Exception as e: - logger.error(f"Error in error handler {handler.__name__}: {e}") - - async def run_forever(self) -> None: - """ - Run the WebSocket client until stopped. - - This method will block until the WebSocket is disconnected - or an error occurs. - """ - if not self._connected: - raise ConnectionError("WebSocket not connected. Call connect() first.") - - try: - logger.info("๐Ÿš€ Neural WebSocket running... (Press Ctrl+C to stop)") - - while self._connected: - await asyncio.sleep(1) - - except KeyboardInterrupt: - logger.info("Received shutdown signal") - finally: - await self.disconnect() - - def get_subscribed_markets(self) -> List[str]: - """Get list of currently subscribed markets.""" - return list(self._subscribed_markets) - - def is_connected(self) -> bool: - """Check if WebSocket is connected.""" - return self._connected - - def get_status(self) -> Dict[str, Any]: - """Get WebSocket client status.""" - return { - "connected": self._connected, - "subscribed_markets": len(self._subscribed_markets), - "market_data_handlers": len(self._market_data_handlers), - "trade_handlers": len(self._trade_handlers), - "connection_handlers": len(self._connection_handlers), - "error_handlers": len(self._error_handlers) - } diff --git a/scripts/college_football_discovery.json b/scripts/college_football_discovery.json new file mode 100644 index 00000000..b2f1fa70 --- /dev/null +++ b/scripts/college_football_discovery.json @@ -0,0 +1,10 @@ +{ + "series_count": 4297, + "events": [ + { + "ticker": null, + "title": "College Football Championship Winner?" + } + ], + "markets": [] +} \ No newline at end of file diff --git a/scripts/discover_college_football.py b/scripts/discover_college_football.py new file mode 100644 index 00000000..91e76de7 --- /dev/null +++ b/scripts/discover_college_football.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +""" +Discover College Football Markets +================================== +Script to discover available college football markets on Kalshi. + +This script will search for college football games and markets +by trying different series tickers and search patterns. +""" + +import asyncio +import sys +from pathlib import Path +from typing import List, Dict, Any +import json + +# Add parent directory to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from neural_sdk.data_pipeline.data_sources.kalshi.client import KalshiClient + + +def discover_series(): + """Discover all available series on Kalshi""" + print("\n๐Ÿ” Discovering all series on Kalshi...") + print("=" * 60) + + client = KalshiClient() + + # Get all series + try: + response = client.get('/series') + series_list = response.get('series', []) + + print(f"Found {len(series_list)} total series\n") + + # Filter for potential college football series + college_keywords = ['NCAA', 'CFB', 'CFP', 'COLLEGE', 'FOOTBALL', 'BOWL'] + college_series = [] + + for series in series_list: + ticker = series.get('ticker', '') + title = series.get('title', '') + + # Check if any keyword matches + for keyword in college_keywords: + if keyword in ticker.upper() or keyword in title.upper(): + college_series.append(series) + break + + if college_series: + print("๐Ÿ“š Potential College Football Series:") + print("-" * 40) + for series in college_series: + print(f"Ticker: {series.get('ticker')}") + print(f"Title: {series.get('title')}") + print(f"Category: {series.get('category')}") + print() + else: + print("No college football specific series found.") + print("\nSearching all series for football content...") + + # Broader search + football_series = [] + for series in series_list: + ticker = series.get('ticker', '') + title = series.get('title', '') + if 'FOOTBALL' in title.upper() or 'GAME' in ticker.upper(): + football_series.append(series) + + if football_series: + print("\n๐Ÿˆ All Football-Related Series:") + print("-" * 40) + for series in football_series[:10]: # Limit to first 10 + print(f"Ticker: {series.get('ticker')}") + print(f"Title: {series.get('title')}") + print(f"Category: {series.get('category')}") + print() + + return series_list + + except Exception as e: + print(f"Error fetching series: {e}") + return [] + + +def discover_events_by_series(series_ticker: str): + """Discover events for a specific series""" + print(f"\n๐ŸŽฏ Discovering events for series: {series_ticker}") + print("-" * 40) + + client = KalshiClient() + + try: + params = { + 'series_ticker': series_ticker, + 'status': 'open', + 'limit': 100, + 'with_nested_markets': True + } + + response = client.get('/events', params=params) + events = response.get('events', []) + + if events: + print(f"Found {len(events)} events:") + for event in events[:5]: # Show first 5 + print(f"\nEvent: {event.get('ticker')}") + print(f"Title: {event.get('title')}") + print(f"Category: {event.get('category')}") + + # Show nested markets if available + markets = event.get('markets', []) + if markets: + print(f"Markets ({len(markets)}):") + for market in markets: + print(f" - {market.get('ticker')}: {market.get('yes_sub_title')} vs {market.get('no_sub_title')}") + else: + print("No events found for this series") + + return events + + except Exception as e: + print(f"Error fetching events: {e}") + return [] + + +def search_markets_by_keyword(keyword: str): + """Search markets by keyword""" + print(f"\n๐Ÿ”Ž Searching markets with keyword: '{keyword}'") + print("-" * 40) + + client = KalshiClient() + + try: + # Get all markets (paginated) + all_markets = [] + cursor = None + + for _ in range(5): # Limit pagination + params = { + 'limit': 100, + 'status': 'open' + } + if cursor: + params['cursor'] = cursor + + response = client.get('/markets', params=params) + markets = response.get('markets', []) + + # Filter by keyword + for market in markets: + title = market.get('title', '') + subtitle = market.get('subtitle', '') + ticker = market.get('ticker', '') + + if keyword.upper() in title.upper() or keyword.upper() in subtitle.upper() or keyword.upper() in ticker.upper(): + all_markets.append(market) + + cursor = response.get('cursor') + if not cursor: + break + + if all_markets: + print(f"Found {len(all_markets)} markets containing '{keyword}':") + for market in all_markets[:10]: # Show first 10 + print(f"\nTicker: {market.get('ticker')}") + print(f"Title: {market.get('title')}") + print(f"Event: {market.get('event_ticker')}") + print(f"YES: {market.get('yes_sub_title')} | NO: {market.get('no_sub_title')}") + + # Show current prices + yes_ask = market.get('yes_ask') + if yes_ask: + print(f"Price: ${yes_ask/100:.2f}") + else: + print(f"No markets found containing '{keyword}'") + + return all_markets + + except Exception as e: + print(f"Error searching markets: {e}") + return [] + + +def main(): + """Main discovery function""" + print("\n" + "=" * 60) + print("๐Ÿˆ COLLEGE FOOTBALL MARKET DISCOVERY") + print("=" * 60) + + # 1. Discover all series + series_list = discover_series() + + # 2. Try specific college football series tickers + potential_tickers = [ + 'KXCFP', # College Football Playoff + 'KXCFPGAME', # CFP Games + 'KXNCAAF', # NCAA Football + 'KXCFB', # College Football + 'KXBOWL', # Bowl Games + ] + + print("\n๐Ÿ“‹ Trying potential college football series tickers...") + print("=" * 60) + + found_events = [] + for ticker in potential_tickers: + events = discover_events_by_series(ticker) + if events: + found_events.extend(events) + + # 3. Search by keywords + print("\n๐Ÿ” Searching by college football keywords...") + print("=" * 60) + + keywords = ['NCAA', 'College Football', 'Bowl', 'CFP', 'Alabama', 'Georgia', 'Michigan', 'Ohio State'] + + found_markets = [] + for keyword in keywords: + markets = search_markets_by_keyword(keyword) + if markets: + found_markets.extend(markets) + + # Summary + print("\n" + "=" * 60) + print("๐Ÿ“Š DISCOVERY SUMMARY") + print("=" * 60) + print(f"Total Series Found: {len(series_list)}") + print(f"College Football Events Found: {len(found_events)}") + print(f"College Football Markets Found: {len(found_markets)}") + + # Save results + results = { + 'series_count': len(series_list), + 'events': [{'ticker': e.get('ticker'), 'title': e.get('title')} for e in found_events[:5]], + 'markets': [{'ticker': m.get('ticker'), 'title': m.get('title')} for m in found_markets[:5]] + } + + output_file = Path(__file__).parent / 'college_football_discovery.json' + with open(output_file, 'w') as f: + json.dump(results, f, indent=2) + + print(f"\n๐Ÿ’พ Results saved to: {output_file}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/discover_todays_cfb_games.py b/scripts/discover_todays_cfb_games.py new file mode 100644 index 00000000..0a579214 --- /dev/null +++ b/scripts/discover_todays_cfb_games.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +""" +Discover Today's College Football Games +======================================== +Script to find college football games happening today on Kalshi. +""" + +import sys +from pathlib import Path +from datetime import datetime, timedelta +from typing import List, Dict, Any + +# Add parent directory to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from neural_sdk.data_pipeline.data_sources.kalshi.client import KalshiClient + + +def discover_cfb_game_series(): + """Try to find college football game series""" + print("\n๐Ÿˆ Searching for College Football Game Series...") + print("=" * 60) + + client = KalshiClient() + + # Try different possible series tickers + potential_tickers = [ + 'KXNCAAFGAME', # Most likely for individual games + 'KXNCAAGAME', # Alternative + 'KXCFBGAME', # Alternative + 'KXCOLLEGEGAME', # Alternative + ] + + for ticker in potential_tickers: + print(f"\nTrying series ticker: {ticker}") + + try: + params = { + 'series_ticker': ticker, + 'status': 'open', + 'limit': 100, + 'with_nested_markets': True + } + + response = client.get('/events', params=params) + events = response.get('events', []) + + if events: + print(f"โœ… Found {len(events)} events for {ticker}!") + return ticker, events + else: + print(f" No events for {ticker}") + + except Exception as e: + print(f" Error: {e}") + + return None, [] + + +def get_todays_games(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Filter events for games happening today""" + today = datetime.now().date() + tomorrow = today + timedelta(days=1) + + todays_games = [] + + for event in events: + # Check expected expiration time or close time + exp_time_str = event.get('expected_expiration_time') or event.get('close_time') + if exp_time_str: + try: + exp_time = datetime.fromisoformat(exp_time_str.replace('Z', '+00:00')) + if today <= exp_time.date() <= tomorrow: + todays_games.append(event) + except: + pass + + return todays_games + + +def display_game_markets(event: Dict[str, Any]): + """Display markets for a game""" + print(f"\n๐ŸŽฏ {event.get('title', 'Unknown Game')}") + print(f" Event: {event.get('ticker', 'N/A')}") + print(f" Category: {event.get('category', 'N/A')}") + + markets = event.get('markets', []) + if markets: + print(f" Markets ({len(markets)}):") + for market in markets: + ticker = market.get('ticker') + yes_team = market.get('yes_sub_title', '') + no_team = market.get('no_sub_title', '') + + # Get prices if available + yes_price = market.get('yes_ask') + if yes_price: + price_str = f" - Current: ${yes_price/100:.2f}" + else: + price_str = "" + + print(f" โ€ข {ticker}: {yes_team} to win{price_str}") + + +def main(): + """Main function to discover today's college football games""" + print("\n" + "=" * 60) + print("๐Ÿˆ TODAY'S COLLEGE FOOTBALL GAMES ON KALSHI") + print("=" * 60) + print(f"๐Ÿ“… Date: {datetime.now().strftime('%A, %B %d, %Y')}") + + # Step 1: Find the college football game series + series_ticker, events = discover_cfb_game_series() + + if not series_ticker: + print("\nโŒ Could not find college football game series") + print("\nTrying to search all open events for college teams...") + + # Fallback: Search all events + client = KalshiClient() + + # Get events with college football keywords + college_teams = ['Alabama', 'Georgia', 'Michigan', 'Ohio State', 'Texas', + 'Oklahoma', 'LSU', 'Clemson', 'Florida', 'Auburn', + 'Penn State', 'Oregon', 'USC', 'Notre Dame'] + + found_events = [] + + for team in college_teams[:5]: # Check first 5 teams + print(f"\nSearching for {team}...") + + try: + # Get all open events + params = { + 'status': 'open', + 'limit': 200, + 'with_nested_markets': True + } + + response = client.get('/events', params=params) + all_events = response.get('events', []) + + # Filter for team + for event in all_events: + title = event.get('title', '') + if team.upper() in title.upper(): + found_events.append(event) + print(f" โœ“ Found: {title}") + + except Exception as e: + print(f" Error: {e}") + + events = found_events + + if not events: + print("\nโŒ No college football events found") + return + + # Step 2: Filter for today's games + print(f"\n๐Ÿ“Š Total events found: {len(events)}") + + todays_games = get_todays_games(events) + + if todays_games: + print(f"\n๐ŸŽฎ Games Today/Tomorrow: {len(todays_games)}") + print("=" * 60) + + for game in todays_games: + display_game_markets(game) + else: + print("\n๐Ÿ“… No games scheduled for today") + print("\n๐Ÿ” All available games:") + print("=" * 60) + + # Show all games + for i, event in enumerate(events[:10], 1): # Show first 10 + print(f"\n{i}. {event.get('title', 'Unknown')}") + + # Parse date from expected expiration + exp_time_str = event.get('expected_expiration_time') + if exp_time_str: + try: + exp_time = datetime.fromisoformat(exp_time_str.replace('Z', '+00:00')) + print(f" Date: {exp_time.strftime('%B %d, %Y')}") + except: + pass + + display_game_markets(event) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/stream_chiefs_chargers_rest.py b/scripts/stream_chiefs_chargers_rest.py new file mode 100644 index 00000000..693a8b4d --- /dev/null +++ b/scripts/stream_chiefs_chargers_rest.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python3 +""" +Chiefs vs Chargers REST API Market Streaming +============================================ +Stream live market data using REST API polling for Kansas City Chiefs vs Los Angeles Chargers game. + +This script demonstrates how to use the Neural SDK to: +- Discover game markets for Chiefs vs Chargers +- Poll market data using REST API +- Display real-time price updates and changes +- Track volume and trading activity + +Usage: + python scripts/stream_chiefs_chargers_rest.py [--duration SECONDS] [--interval SECONDS] + + Or make executable and run directly: + chmod +x scripts/stream_chiefs_chargers_rest.py + ./scripts/stream_chiefs_chargers_rest.py + +Requirements: + - Neural SDK installed (from project root: pip install -e .) + - .env file with Kalshi credentials: + KALSHI_API_KEY_ID=your_key_id + KALSHI_PRIVATE_KEY_FILE=path/to/private.key +""" + +import asyncio +import logging +import sys +from datetime import datetime +from pathlib import Path +from typing import Dict, Any, Optional, List +import argparse +import os + +# Add parent directory to path to import neural_sdk +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from dotenv import load_dotenv +from neural_sdk.streaming.rest_stream import RESTMarketStream, MarketSnapshot +from neural_sdk.data_pipeline.data_sources.kalshi.nfl_discovery import NFLMarketDiscovery +from neural_sdk.core.exceptions import ConfigurationError + +# Setup logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%H:%M:%S' +) +logger = logging.getLogger(__name__) + + +class ChiefsChargersRESTStream: + """Stream real-time market data for Chiefs vs Chargers game using REST API.""" + + def __init__(self, poll_interval: float = 2.0, debug: bool = False): + """ + Initialize the REST streaming client. + + Args: + poll_interval: Seconds between polls + debug: Enable debug logging if True + """ + # Load environment variables + load_dotenv() + + # Validate credentials + if not os.getenv('KALSHI_API_KEY_ID'): + raise ConfigurationError( + "Missing KALSHI_API_KEY_ID in environment. " + "Please add it to your .env file." + ) + + # Set logging level + if debug: + logging.getLogger().setLevel(logging.DEBUG) + + # Initialize REST streamer + self.streamer = RESTMarketStream(poll_interval=poll_interval) + self.discovery = NFLMarketDiscovery() + + # Market tracking + self.market_data = {} + self.price_alerts = [] + self.session_start = None + + # Team identifiers + self.chiefs_identifiers = ['KC', 'KANSAS', 'CHIEFS'] + self.chargers_identifiers = ['LAC', 'LA', 'CHARGERS', 'LOS ANGELES'] + + logger.info(f"โœ… REST streaming client initialized (poll interval: {poll_interval}s)") + + def setup_handlers(self): + """Configure event handlers for market updates.""" + + async def handle_market_update(data: Dict[str, Any]): + """Process market updates.""" + ticker = data.get('ticker') + snapshot: MarketSnapshot = data.get('snapshot') + changes = data.get('changes', []) + + if not snapshot: + return + + # Update tracking + self.market_data[ticker] = snapshot + + # Display update if there are changes + if changes: + self.display_market_update(ticker, snapshot, changes) + + async def handle_price_change(data: Dict[str, Any]): + """Process significant price changes.""" + ticker = data.get('ticker') + field = data.get('field') + old_price = data.get('old_price') + new_price = data.get('new_price') + change_percent = data.get('change_percent') + + # Alert on significant changes (> 2%) + if change_percent and abs(change_percent) > 2: + self.display_price_alert(ticker, field, old_price, new_price, change_percent) + + async def handle_error(data: Dict[str, Any]): + """Handle errors.""" + ticker = data.get('ticker') + error = data.get('error') + logger.error(f"Error for {ticker}: {error}") + + # Set handlers + self.streamer.on_market_update = handle_market_update + self.streamer.on_price_change = handle_price_change + self.streamer.on_error = handle_error + + def display_market_update(self, ticker: str, snapshot: MarketSnapshot, changes: List[Any]): + """Display formatted market update.""" + market_type = self.get_market_type(ticker) + + # Show initial snapshot or updates with changes + if ticker not in self.market_data or changes: + print(f"\n{'='*60}") + print(f"๐Ÿ“Š {market_type}") + print(f" Ticker: {ticker}") + print(f" Time: {snapshot.timestamp.strftime('%H:%M:%S')}") + + # Display prices + if snapshot.yes_price and snapshot.no_price: + print(f" YES: ${snapshot.yes_price:.4f} | NO: ${snapshot.no_price:.4f}") + + # Show implied probability + implied_prob = snapshot.implied_probability * 100 + print(f" Implied Probability: {implied_prob:.1f}%") + elif snapshot.yes_price: + print(f" YES Price: ${snapshot.yes_price:.4f}") + elif snapshot.no_price: + print(f" NO Price: ${snapshot.no_price:.4f}") + else: + print(f" No active prices available") + + # Display bid/ask spread if available + if snapshot.spread: + print(f" Spread: ${snapshot.spread:.4f}") + + # Display volume + print(f" Volume: {snapshot.volume:,} contracts") + + # Show changes if any + if changes: + price_changes = [c for c in changes if 'price' in c.field] + for change in price_changes: + direction = "๐Ÿ“ˆ" if change.new_value > change.old_value else "๐Ÿ“‰" + field_name = change.field.replace('_', ' ').title() + print(f" {direction} {field_name}: ${change.old_value:.4f} โ†’ ${change.new_value:.4f}") + + def display_price_alert(self, ticker: str, field: str, old_price: float, new_price: float, change_percent: float): + """Display price alert for significant changes.""" + market_type = self.get_market_type(ticker) + direction = "๐Ÿšจ๐Ÿ“ˆ" if new_price > old_price else "๐Ÿšจ๐Ÿ“‰" + + print(f"\n{direction} PRICE ALERT @ {datetime.now().strftime('%H:%M:%S')}") + print(f" Market: {market_type}") + print(f" {field.replace('_', ' ').title()}: ${old_price:.4f} โ†’ ${new_price:.4f}") + print(f" Change: {change_percent:+.2f}%") + + self.price_alerts.append({ + 'time': datetime.now(), + 'ticker': ticker, + 'change': change_percent + }) + + def get_market_type(self, ticker: str) -> str: + """Determine human-readable market type from ticker.""" + ticker_upper = ticker.upper() + + if 'KXNFLGAME' in ticker_upper: + if 'KC' in ticker_upper: + return "๐Ÿˆ Chiefs to Win" + elif 'LAC' in ticker_upper: + return "๐Ÿˆ Chargers to Win" + else: + return "๐Ÿˆ Game Outcome" + elif 'SPREAD' in ticker_upper: + return "๐Ÿ“ Point Spread" + elif 'TOTAL' in ticker_upper or 'O/U' in ticker_upper: + return "๐Ÿ“Š Total Points (Over/Under)" + else: + return "๐ŸŽฏ Game Market" + + async def discover_markets(self) -> List[str]: + """ + Discover available Chiefs vs Chargers markets. + + Returns: + List of market tickers to stream + """ + print("\n๐Ÿ” Discovering Chiefs vs Chargers markets...") + print("="*60) + + try: + # Get markets for both teams + kc_markets = self.discovery.get_team_markets('KC', status='open') + lac_markets = self.discovery.get_team_markets('LAC', status='open') + + # Find game markets (intersection of both teams) + game_markets = self.discovery.get_game_markets('LAC', 'KC', status='open') + + # Use game markets if found, otherwise combine team markets + if game_markets: + markets_to_stream = game_markets + print(f" โœ“ Found {len(markets_to_stream)} game-specific markets") + else: + # Combine and deduplicate + markets_to_stream = list(set(kc_markets + lac_markets)) + print(f" โœ“ Found {len(kc_markets)} KC + {len(lac_markets)} LAC markets") + + if markets_to_stream: + print("\n๐Ÿ“‹ Markets to stream:") + for i, ticker in enumerate(markets_to_stream[:10], 1): + print(f" {i}. {ticker}") + if len(markets_to_stream) > 10: + print(f" ... and {len(markets_to_stream) - 10} more") + else: + print(" โš ๏ธ No active markets found") + + return markets_to_stream + + except Exception as e: + logger.error(f"Market discovery error: {e}") + return [] + + async def stream_markets(self, duration_seconds: int = 300): + """ + Stream market data for specified duration. + + Args: + duration_seconds: Duration to stream in seconds (default 5 minutes) + """ + self.session_start = datetime.now() + + print(f"\n๐Ÿš€ Starting REST API stream for {duration_seconds} seconds...") + print("="*60) + + # Set up event handlers + self.setup_handlers() + + try: + # Discover markets + markets = await self.discover_markets() + + if not markets: + print("\nโš ๏ธ No markets to stream") + print("This is normal if there are no games scheduled.") + return + + # Start streaming + print(f"\n๐Ÿ“ก Streaming {len(markets)} markets...") + print("="*60) + print("๐ŸŽฎ LIVE MARKET DATA (via REST API)") + print("="*60) + print("Press Ctrl+C to stop early\n") + + # Stream markets + await self.streamer.stream_markets( + tickers=markets, + duration=duration_seconds + ) + + except KeyboardInterrupt: + print("\nโš ๏ธ Stream interrupted by user") + except Exception as e: + logger.error(f"โŒ Streaming error: {e}") + finally: + # Display session summary + self.display_summary() + + def display_summary(self): + """Display streaming session summary.""" + if not self.session_start: + return + + duration = (datetime.now() - self.session_start).total_seconds() + + print("\n" + "="*60) + print("๐Ÿ“Š STREAMING SESSION SUMMARY") + print("="*60) + print(f"โฑ๏ธ Duration: {duration:.0f} seconds") + print(f"๐Ÿ“ˆ Markets Tracked: {len(self.market_data)}") + print(f"๐Ÿ”„ Polls: {self.streamer.polls_count}") + print(f"๐Ÿ“ Changes Detected: {self.streamer.changes_count}") + print(f"๐Ÿšจ Price Alerts: {len(self.price_alerts)}") + print(f"โŒ Errors: {self.streamer.errors_count}") + + if self.market_data: + print("\n๐ŸŽฏ Final Market Prices:") + print("-"*40) + + for ticker, snapshot in sorted(self.market_data.items()): + market_type = self.get_market_type(ticker) + if snapshot.yes_price: + print(f"{market_type:30} ${snapshot.yes_price:.4f} ({snapshot.volume:,} vol)") + + if self.price_alerts: + print("\n๐Ÿšจ Significant Price Movements:") + print("-"*40) + for alert in self.price_alerts[:5]: + print(f" {alert['time'].strftime('%H:%M:%S')} - {alert['ticker']}: {alert['change']:+.2f}%") + + print("\nโœ… Stream completed successfully!") + + +async def main(): + """Main function to run the streaming client.""" + # Parse command line arguments + parser = argparse.ArgumentParser( + description='Stream Chiefs vs Chargers market data via REST API' + ) + parser.add_argument( + '--duration', + type=int, + default=300, + help='Stream duration in seconds (default: 300)' + ) + parser.add_argument( + '--interval', + type=float, + default=2.0, + help='Poll interval in seconds (default: 2.0)' + ) + parser.add_argument( + '--debug', + action='store_true', + help='Enable debug logging' + ) + + args = parser.parse_args() + + # Display header + print("\n" + "="*60) + print("๐Ÿˆ CHIEFS VS CHARGERS REST API STREAMING") + print("="*60) + print(f"๐Ÿ“… Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print(f"โฑ๏ธ Duration: {args.duration} seconds") + print(f"๐Ÿ”„ Poll Interval: {args.interval} seconds") + print("="*60) + + try: + # Create and run streaming client + streamer = ChiefsChargersRESTStream( + poll_interval=args.interval, + debug=args.debug + ) + await streamer.stream_markets(duration_seconds=args.duration) + except ConfigurationError as e: + print(f"\nโŒ Configuration Error: {e}") + print("\n๐Ÿ“ Please ensure you have a .env file with:") + print(" KALSHI_API_KEY_ID=your_key_id") + print(" KALSHI_PRIVATE_KEY_FILE=path/to/private.key") + sys.exit(1) + except KeyboardInterrupt: + print("\n\n๐Ÿ‘‹ Stream stopped by user") + except Exception as e: + print(f"\nโŒ Error: {e}") + if args.debug: + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + # Run the async main function + asyncio.run(main()) \ No newline at end of file diff --git a/scripts/stream_college_football_rest.py b/scripts/stream_college_football_rest.py new file mode 100644 index 00000000..72abf0b2 --- /dev/null +++ b/scripts/stream_college_football_rest.py @@ -0,0 +1,458 @@ +#!/usr/bin/env python3 +""" +College Football REST API Market Streaming +========================================== +Stream live market data using REST API polling for College Football games. + +This script demonstrates how to use the Neural SDK to: +- Discover college football game markets +- Filter by date, team, or conference +- Poll market data using REST API +- Display real-time price updates and changes +- Track volume and trading activity + +Usage: + python scripts/stream_college_football_rest.py [--date DATE] [--team TEAM] [--conference CONF] [--duration SECONDS] + +Examples: + # Stream today's games + python scripts/stream_college_football_rest.py + + # Stream specific team + python scripts/stream_college_football_rest.py --team "Ohio State" + + # Stream SEC games + python scripts/stream_college_football_rest.py --conference SEC + + # Stream specific date + python scripts/stream_college_football_rest.py --date 2025-09-13 +""" + +import asyncio +import logging +import sys +from datetime import datetime, date +from pathlib import Path +from typing import Dict, Any, Optional, List +import argparse +import os + +# Add parent directory to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from dotenv import load_dotenv +from neural_sdk.streaming.rest_stream import RESTMarketStream, MarketSnapshot +from neural_sdk.data_pipeline.data_sources.kalshi.cfb_discovery import CFBMarketDiscovery +from neural_sdk.core.exceptions import ConfigurationError + +# Setup logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s | %(levelname)s | %(message)s', + datefmt='%H:%M:%S' +) +logger = logging.getLogger(__name__) + + +class CollegeFootballRESTStream: + """Stream real-time market data for College Football games using REST API.""" + + def __init__(self, poll_interval: float = 2.0, debug: bool = False): + """ + Initialize the REST streaming client. + + Args: + poll_interval: Seconds between polls + debug: Enable debug logging if True + """ + # Load environment variables + load_dotenv() + + # Validate credentials + if not os.getenv('KALSHI_API_KEY_ID'): + raise ConfigurationError( + "Missing KALSHI_API_KEY_ID in environment. " + "Please add it to your .env file." + ) + + # Set logging level + if debug: + logging.getLogger().setLevel(logging.DEBUG) + + # Initialize components + self.streamer = RESTMarketStream(poll_interval=poll_interval) + self.discovery = CFBMarketDiscovery() + + # Market tracking + self.market_data = {} + self.price_alerts = [] + self.session_start = None + + logger.info(f"โœ… College Football streaming client initialized (poll interval: {poll_interval}s)") + + def setup_handlers(self): + """Configure event handlers for market updates.""" + + async def handle_market_update(data: Dict[str, Any]): + """Process market updates.""" + ticker = data.get('ticker') + snapshot: MarketSnapshot = data.get('snapshot') + changes = data.get('changes', []) + + if not snapshot: + return + + # Update tracking + self.market_data[ticker] = snapshot + + # Display update if there are changes or initial snapshot + if ticker not in self.market_data or changes: + self.display_market_update(ticker, snapshot, changes) + + async def handle_price_change(data: Dict[str, Any]): + """Process significant price changes.""" + ticker = data.get('ticker') + field = data.get('field') + old_price = data.get('old_price') + new_price = data.get('new_price') + change_percent = data.get('change_percent') + + # Alert on significant changes (> 2%) + if change_percent and abs(change_percent) > 2: + self.display_price_alert(ticker, field, old_price, new_price, change_percent) + + async def handle_error(data: Dict[str, Any]): + """Handle errors.""" + ticker = data.get('ticker') + error = data.get('error') + logger.error(f"Error for {ticker}: {error}") + + # Set handlers + self.streamer.on_market_update = handle_market_update + self.streamer.on_price_change = handle_price_change + self.streamer.on_error = handle_error + + def display_market_update(self, ticker: str, snapshot: MarketSnapshot, changes: List[Any]): + """Display formatted market update.""" + # Extract team names from ticker (format: KXNCAAFGAME-DATE-TEAM1TEAM2-TEAM) + parts = ticker.split('-') + if len(parts) >= 3: + teams = parts[-2] # Team codes + winner = parts[-1] # Which team this market is for + market_type = f"๐Ÿˆ {winner} to Win" + else: + market_type = "๐Ÿˆ College Football Market" + + # Show initial snapshot or updates with changes + if ticker not in self.market_data or changes: + print(f"\n{'='*60}") + print(f"๐Ÿ“Š {market_type}") + print(f" Ticker: {ticker}") + print(f" Time: {snapshot.timestamp.strftime('%H:%M:%S')}") + + # Display prices + if snapshot.yes_price and snapshot.no_price: + print(f" YES: ${snapshot.yes_price:.4f} | NO: ${snapshot.no_price:.4f}") + + # Show implied probability + implied_prob = snapshot.implied_probability * 100 + print(f" Implied Probability: {implied_prob:.1f}%") + elif snapshot.yes_price: + print(f" YES Price: ${snapshot.yes_price:.4f}") + elif snapshot.no_price: + print(f" NO Price: ${snapshot.no_price:.4f}") + else: + print(f" No active prices available") + + # Display bid/ask spread if available + if snapshot.spread: + print(f" Spread: ${snapshot.spread:.4f}") + + # Display volume + print(f" Volume: {snapshot.volume:,} contracts") + + # Show changes if any + if changes: + price_changes = [c for c in changes if 'price' in c.field] + for change in price_changes: + direction = "๐Ÿ“ˆ" if change.new_value > change.old_value else "๐Ÿ“‰" + field_name = change.field.replace('_', ' ').title() + print(f" {direction} {field_name}: ${change.old_value:.4f} โ†’ ${change.new_value:.4f}") + + def display_price_alert(self, ticker: str, field: str, old_price: float, new_price: float, change_percent: float): + """Display price alert for significant changes.""" + direction = "๐Ÿšจ๐Ÿ“ˆ" if new_price > old_price else "๐Ÿšจ๐Ÿ“‰" + + print(f"\n{direction} PRICE ALERT @ {datetime.now().strftime('%H:%M:%S')}") + print(f" Market: {ticker}") + print(f" {field.replace('_', ' ').title()}: ${old_price:.4f} โ†’ ${new_price:.4f}") + print(f" Change: {change_percent:+.2f}%") + + self.price_alerts.append({ + 'time': datetime.now(), + 'ticker': ticker, + 'change': change_percent + }) + + async def discover_markets( + self, + target_date: Optional[date] = None, + team: Optional[str] = None, + conference: Optional[str] = None + ) -> List[str]: + """ + Discover available college football markets. + + Args: + target_date: Date to filter games (None for today) + team: Specific team to filter + conference: Conference to filter + + Returns: + List of market tickers to stream + """ + print("\n๐Ÿ” Discovering College Football markets...") + print("="*60) + + try: + markets_to_stream = [] + + if team: + # Get markets for specific team + print(f" Searching for {team} games...") + events = self.discovery.get_team_events(team, status='open') + elif conference: + # Get markets for conference + print(f" Searching for {conference} games...") + events = self.discovery.get_conference_events(conference, status='open') + elif target_date: + # Get markets for specific date + print(f" Searching for games on {target_date}...") + events = self.discovery.get_events_by_date(target_date, status='open') + else: + # Get today's games + print(f" Searching for today's games...") + events = self.discovery.get_events_by_date(datetime.now().date(), status='open') + + # Extract market tickers from events + for event in events: + markets = event.get('markets', []) + for market in markets: + ticker = market.get('ticker') + if ticker: + markets_to_stream.append(ticker) + + # Remove duplicates + markets_to_stream = list(set(markets_to_stream)) + + if markets_to_stream: + print(f" โœ“ Found {len(markets_to_stream)} markets") + print("\n๐Ÿ“‹ Markets to stream:") + for i, ticker in enumerate(markets_to_stream[:10], 1): + print(f" {i}. {ticker}") + if len(markets_to_stream) > 10: + print(f" ... and {len(markets_to_stream) - 10} more") + else: + print(" โš ๏ธ No active markets found") + + # Show some available games + print("\n๐Ÿ“… Available games:") + all_events = self.discovery.get_all_cfb_events(status='open') + for i, event in enumerate(all_events[:5], 1): + info = self.discovery.format_game_info(event) + print(f" {i}. {info['title']} ({info['date']})") + + return markets_to_stream + + except Exception as e: + logger.error(f"Market discovery error: {e}") + return [] + + async def stream_markets( + self, + duration_seconds: int = 300, + target_date: Optional[date] = None, + team: Optional[str] = None, + conference: Optional[str] = None + ): + """ + Stream market data for specified duration. + + Args: + duration_seconds: Duration to stream in seconds + target_date: Date to filter games + team: Specific team to filter + conference: Conference to filter + """ + self.session_start = datetime.now() + + print(f"\n๐Ÿš€ Starting REST API stream for {duration_seconds} seconds...") + print("="*60) + + # Set up event handlers + self.setup_handlers() + + try: + # Discover markets + markets = await self.discover_markets(target_date, team, conference) + + if not markets: + print("\nโš ๏ธ No markets to stream") + return + + # Start streaming + print(f"\n๐Ÿ“ก Streaming {len(markets)} markets...") + print("="*60) + print("๐ŸŽฎ LIVE MARKET DATA (via REST API)") + print("="*60) + print("Press Ctrl+C to stop early\n") + + # Stream markets + await self.streamer.stream_markets( + tickers=markets, + duration=duration_seconds + ) + + except KeyboardInterrupt: + print("\nโš ๏ธ Stream interrupted by user") + except Exception as e: + logger.error(f"โŒ Streaming error: {e}") + finally: + # Display session summary + self.display_summary() + + def display_summary(self): + """Display streaming session summary.""" + if not self.session_start: + return + + duration = (datetime.now() - self.session_start).total_seconds() + + print("\n" + "="*60) + print("๐Ÿ“Š STREAMING SESSION SUMMARY") + print("="*60) + print(f"โฑ๏ธ Duration: {duration:.0f} seconds") + print(f"๐Ÿ“ˆ Markets Tracked: {len(self.market_data)}") + print(f"๐Ÿ”„ Polls: {self.streamer.polls_count}") + print(f"๐Ÿ“ Changes Detected: {self.streamer.changes_count}") + print(f"๐Ÿšจ Price Alerts: {len(self.price_alerts)}") + print(f"โŒ Errors: {self.streamer.errors_count}") + + if self.market_data: + print("\n๐ŸŽฏ Final Market Prices:") + print("-"*40) + + for ticker, snapshot in sorted(self.market_data.items()): + if snapshot.yes_price: + print(f"{ticker:40} ${snapshot.yes_price:.4f} ({snapshot.volume:,} vol)") + + if self.price_alerts: + print("\n๐Ÿšจ Significant Price Movements:") + print("-"*40) + for alert in self.price_alerts[:5]: + print(f" {alert['time'].strftime('%H:%M:%S')} - {alert['ticker']}: {alert['change']:+.2f}%") + + print("\nโœ… Stream completed successfully!") + + +async def main(): + """Main function to run the streaming client.""" + # Parse command line arguments + parser = argparse.ArgumentParser( + description='Stream College Football market data via REST API' + ) + parser.add_argument( + '--date', + type=str, + help='Date to filter games (YYYY-MM-DD format)' + ) + parser.add_argument( + '--team', + type=str, + help='Specific team to stream' + ) + parser.add_argument( + '--conference', + type=str, + choices=['SEC', 'Big Ten', 'ACC', 'Big 12', 'Pac-12', 'Independent'], + help='Conference to stream' + ) + parser.add_argument( + '--duration', + type=int, + default=300, + help='Stream duration in seconds (default: 300)' + ) + parser.add_argument( + '--interval', + type=float, + default=2.0, + help='Poll interval in seconds (default: 2.0)' + ) + parser.add_argument( + '--debug', + action='store_true', + help='Enable debug logging' + ) + + args = parser.parse_args() + + # Parse date if provided + target_date = None + if args.date: + try: + target_date = datetime.strptime(args.date, '%Y-%m-%d').date() + except ValueError: + print(f"โŒ Invalid date format: {args.date}. Use YYYY-MM-DD") + sys.exit(1) + + # Display header + print("\n" + "="*60) + print("๐Ÿˆ COLLEGE FOOTBALL REST API STREAMING") + print("="*60) + print(f"๐Ÿ“… Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + + if args.team: + print(f"๐ŸŽฏ Team: {args.team}") + elif args.conference: + print(f"๐Ÿ† Conference: {args.conference}") + elif target_date: + print(f"๐Ÿ“… Games on: {target_date}") + else: + print(f"๐Ÿ“… Today's Games") + + print(f"โฑ๏ธ Duration: {args.duration} seconds") + print(f"๐Ÿ”„ Poll Interval: {args.interval} seconds") + print("="*60) + + try: + # Create and run streaming client + streamer = CollegeFootballRESTStream( + poll_interval=args.interval, + debug=args.debug + ) + await streamer.stream_markets( + duration_seconds=args.duration, + target_date=target_date, + team=args.team, + conference=args.conference + ) + except ConfigurationError as e: + print(f"\nโŒ Configuration Error: {e}") + print("\n๐Ÿ“ Please ensure you have a .env file with:") + print(" KALSHI_API_KEY_ID=your_key_id") + print(" KALSHI_PRIVATE_KEY_FILE=path/to/private.key") + sys.exit(1) + except KeyboardInterrupt: + print("\n\n๐Ÿ‘‹ Stream stopped by user") + except Exception as e: + print(f"\nโŒ Error: {e}") + if args.debug: + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + # Run the async main function + asyncio.run(main()) \ No newline at end of file diff --git a/scripts/test_cfb_dates.py b/scripts/test_cfb_dates.py new file mode 100644 index 00000000..9365b998 --- /dev/null +++ b/scripts/test_cfb_dates.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +"""Test CFB date extraction""" + +import sys +from pathlib import Path +from datetime import datetime + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from neural_sdk.data_pipeline.data_sources.kalshi.cfb_discovery import CFBMarketDiscovery + +discovery = CFBMarketDiscovery() +events = discovery.get_all_cfb_events() + +print(f"Found {len(events)} total events\n") + +# Check first few events +for i, event in enumerate(events[:3]): + print(f"Event {i+1}:") + print(f" Title: {event.get('title')}") + print(f" Ticker: {event.get('ticker', 'NO TICKER')}") + + # Check dates + for field in ['expected_expiration_time', 'close_time', 'expiration_time']: + value = event.get(field) + if value: + print(f" {field}: {value}") + try: + dt = datetime.fromisoformat(value.replace('Z', '+00:00')) + print(f" -> Date: {dt.date()}") + except: + pass + + # Check markets + markets = event.get('markets', []) + if markets and len(markets) > 0: + print(f" Markets: {len(markets)}") + print(f" First market ticker: {markets[0].get('ticker')}") + + print() \ No newline at end of file From 7932ef3933004872ef920c91368f84b3122c0e78 Mon Sep 17 00:00:00 2001 From: hudsonaikins-crown Date: Fri, 5 Sep 2025 22:45:50 -0400 Subject: [PATCH 3/3] Add unit tests and enhance documentation for Kalshi REST adapter - Add comprehensive unit tests for edge cases: empty responses, network failures, pagination - Enhance docstrings with example usage for new parameters - Test coverage for _paginate_events, get_game_markets, get_nfl_markets, get_cfb_markets, get_events, health_check --- .../data_sources/kalshi/rest_adapter.py | 87 +++++++++---- tests/unit/test_kalshi_rest_adapter.py | 122 ++++++++++++++++++ 2 files changed, 182 insertions(+), 27 deletions(-) create mode 100644 tests/unit/test_kalshi_rest_adapter.py diff --git a/neural_sdk/data_sources/kalshi/rest_adapter.py b/neural_sdk/data_sources/kalshi/rest_adapter.py index 326891a8..16db2c5f 100644 --- a/neural_sdk/data_sources/kalshi/rest_adapter.py +++ b/neural_sdk/data_sources/kalshi/rest_adapter.py @@ -216,18 +216,25 @@ async def get_events( **kwargs ) -> Dict: """ - Get events. - + Get events from Kalshi. + Args: - limit: Maximum number of events - status: Event status filter - series_ticker: Series ticker filter - with_nested_markets: Include market details - cursor: Pagination cursor - **kwargs: Additional filters - + limit: Maximum number of events to return (default: 100) + status: Filter events by status (e.g., 'open', 'closed') + series_ticker: Filter by series ticker + with_nested_markets: Include nested market details in response (default: False) + cursor: Pagination cursor for fetching next page + **kwargs: Additional query parameters + Returns: - Events data + Dict containing events data with standardized format + + Examples: + # Get first 50 open events + events = await adapter.get_events(limit=50, status='open') + + # Get events with nested markets for pagination + events = await adapter.get_events(with_nested_markets=True, cursor='abc123') """ params = { "limit": limit, @@ -316,13 +323,23 @@ async def _paginate_events( async def get_game_markets(self, sport: Optional[str] = None) -> Dict: """ - Get game/sports betting markets. - + Get game/sports betting markets from Kalshi. + + Uses events with nested markets for accurate discovery, with fallback to markets endpoint. + Args: - sport: Optional sport filter ("soccer", "nfl", "bundesliga", "epl") - + sport: Optional sport filter for categorization + ("soccer", "nfl", "bundesliga", "epl", "cfb") + Returns: - Game markets + Dict with standardized response containing markets array + + Examples: + # Get all game markets + markets = await adapter.get_game_markets() + + # Get only NFL markets + nfl_markets = await adapter.get_game_markets(sport='nfl') """ # Prefer events with nested markets and paginate events = await self._paginate_events( @@ -405,14 +422,22 @@ async def get_soccer_markets(self) -> Dict: async def get_nfl_markets(self, week: Optional[int] = None) -> Dict: """ - Get NFL-related markets. - Uses events with nested markets when available. - + Get NFL-related markets with optional week filtering. + + Uses events with nested markets for comprehensive discovery. + Args: - week: NFL week number (for filtering) - + week: Optional NFL week number to filter markets (e.g., 1, 2) + Returns: - NFL-related markets + Dict with NFL markets, tagged with sport='NFL' and league='National Football League' + + Examples: + # Get all NFL markets + nfl_markets = await adapter.get_nfl_markets() + + # Get NFL markets for week 1 + week1_markets = await adapter.get_nfl_markets(week=1) """ # Fetch events first, with nested markets events = await self._paginate_events( @@ -456,14 +481,22 @@ async def get_nfl_markets(self, week: Optional[int] = None) -> Dict: async def get_cfb_markets(self, week: Optional[int] = None) -> Dict: """ - Get college football markets. - Uses events with nested markets when available. - + Get college football markets with optional week filtering. + + Uses events with nested markets for comprehensive discovery. + Args: - week: College football week number - + week: Optional college football week number to filter markets + Returns: - CFB markets + Dict with CFB markets, tagged with sport='CFB' and league='College Football' + + Examples: + # Get all CFB markets + cfb_markets = await adapter.get_cfb_markets() + + # Get CFB markets for week 1 + week1_markets = await adapter.get_cfb_markets(week=1) """ # Fetch events first, with nested markets events = await self._paginate_events( diff --git a/tests/unit/test_kalshi_rest_adapter.py b/tests/unit/test_kalshi_rest_adapter.py new file mode 100644 index 00000000..8877830f --- /dev/null +++ b/tests/unit/test_kalshi_rest_adapter.py @@ -0,0 +1,122 @@ +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +import asyncio +from neural_sdk.data_sources.kalshi.rest_adapter import KalshiRESTAdapter + + +class TestKalshiRESTAdapter: + @pytest.fixture + def adapter(self): + """Create adapter with mocked dependencies""" + with patch('neural_sdk.data_sources.kalshi.rest_adapter.KalshiClient') as mock_client, \ + patch('neural_sdk.data_sources.kalshi.rest_adapter.RSASignatureAuth') as mock_auth, \ + patch('neural_sdk.data_sources.kalshi.rest_adapter.RESTDataSource.__init__', return_value=None): + + mock_config = MagicMock() + mock_config.api_key_id = "test_key" + mock_config.private_key = "test_private" + mock_config.api_base_url = "https://api.test.com" + mock_config.environment = "test" + + mock_client.return_value.config = mock_config + + adapter = KalshiRESTAdapter() + adapter.kalshi_client = mock_client.return_value + adapter.config = mock_config + adapter.fetch = AsyncMock() + adapter.transform_response = AsyncMock() + + return adapter + + @pytest.mark.asyncio + async def test_paginate_events_empty_response(self, adapter): + """Test pagination with empty response""" + adapter.get_events = AsyncMock(return_value={"data": {"events": []}}) + + result = await adapter._paginate_events() + + assert result == [] + assert adapter.get_events.call_count == 1 + + @pytest.mark.asyncio + async def test_paginate_events_with_cursor(self, adapter): + """Test pagination with cursor""" + responses = [ + {"data": {"events": [{"id": 1}], "cursor": "cursor1"}}, + {"data": {"events": [{"id": 2}], "cursor": "cursor2"}}, + {"data": {"events": [{"id": 3}]}} # No cursor, stops + ] + adapter.get_events = AsyncMock(side_effect=responses) + + result = await adapter._paginate_events() + + assert len(result) == 3 + assert adapter.get_events.call_count == 3 + + @pytest.mark.asyncio + async def test_paginate_events_network_failure(self, adapter): + """Test pagination handles network failure""" + adapter.get_events = AsyncMock(side_effect=Exception("Network error")) + + with pytest.raises(Exception, match="Network error"): + await adapter._paginate_events() + + @pytest.mark.asyncio + async def test_get_game_markets_fallback(self, adapter): + """Test get_game_markets falls back to markets endpoint""" + # Mock empty events + adapter._paginate_events = AsyncMock(return_value=[]) + adapter.get_markets = AsyncMock(return_value={"data": {"markets": [{"title": "Test Game"}]}}) + adapter.transform_response = AsyncMock(return_value={"data": {"markets": [{"title": "Test Game"}]}}) + + result = await adapter.get_game_markets() + + adapter.get_markets.assert_called_once_with(status="open", limit=500) + adapter.transform_response.assert_called_once() + + @pytest.mark.asyncio + async def test_get_nfl_markets_with_week_filter(self, adapter): + """Test NFL markets with week filter""" + events = [{ + "title": "Week 1 Game", + "markets": [{"title": "NFL Game Winner", "ticker": "NFL123"}] + }] + adapter._paginate_events = AsyncMock(return_value=events) + adapter.transform_response = AsyncMock(return_value={"data": {"markets": []}}) + + result = await adapter.get_nfl_markets(week=1) + + adapter._paginate_events.assert_called_once() + adapter.transform_response.assert_called_once() + + @pytest.mark.asyncio + async def test_get_events_with_nested_markets(self, adapter): + """Test get_events with nested markets parameter""" + adapter.fetch = AsyncMock(return_value={"data": {"events": []}}) + + result = await adapter.get_events(with_nested_markets=True, cursor="test_cursor") + + adapter.fetch.assert_called_once_with("/events", params={ + "limit": 100, + "with_nested_markets": True, + "cursor": "test_cursor" + }) + + @pytest.mark.asyncio + async def test_health_check_success(self, adapter): + """Test health check success""" + adapter.fetch = AsyncMock(return_value={"data": {"markets": []}}) + + result = await adapter.health_check() + + assert result is True + adapter.fetch.assert_called_once_with("/markets", params={"limit": 1}) + + @pytest.mark.asyncio + async def test_health_check_failure(self, adapter): + """Test health check failure""" + adapter.fetch = AsyncMock(return_value={}) + + result = await adapter.health_check() + + assert result is False \ No newline at end of file